]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.10.9-201308282054.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.10.9-201308282054.patch
CommitLineData
0c7023f8
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..79768fb 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89@@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93+dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97+exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105+gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112+hash
113+hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117@@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121-kconfig
122+kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129-linux
130+lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134@@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138-media
139 mconf
140+mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147+mkpiggy
148 mkprep
149 mkregtable
150 mktables
151@@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155+parse-events*
156+pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160@@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164+pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168@@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172+realmode.lds
173+realmode.relocs
174 recordmcount
175+regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179@@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183+signing_key*
184+size_overflow_hash.h
185 sImage
186+slabinfo
187 sm_tbl*
188+sortextable
189 split-include
190 syscalltab.h
191 tables.c
192@@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196+user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200@@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204+vdsox32.lds
205+vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212+vmlinux.bin.bz2
213 vmlinux.lds
214+vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218@@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222+utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226+x509*
227 zImage*
228 zconf.hash.c
229+zconf.lex.c
230 zoffset.h
231diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232index 2fe6e76..889ee23 100644
233--- a/Documentation/kernel-parameters.txt
234+++ b/Documentation/kernel-parameters.txt
235@@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240+ ignore grsecurity's /proc restrictions
241+
242+
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246@@ -1928,6 +1932,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 noexec=on: enable non-executable mappings (default)
248 noexec=off: disable non-executable mappings
249
250+ nopcid [X86-64]
251+ Disable PCID (Process-Context IDentifier) even if it
252+ is supported by the processor.
253+
254 nosmap [X86]
255 Disable SMAP (Supervisor Mode Access Prevention)
256 even if it is supported by processor.
257@@ -2195,6 +2203,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
258 the specified number of seconds. This is to be used if
259 your oopses keep scrolling off the screen.
260
261+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
262+ virtualization environments that don't cope well with the
263+ expand down segment used by UDEREF on X86-32 or the frequent
264+ page table updates on X86-64.
265+
266+ pax_sanitize_slab=
267+ 0/1 to disable/enable slab object sanitization (enabled by
268+ default).
269+
270+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
271+
272+ pax_extra_latent_entropy
273+ Enable a very simple form of latent entropy extraction
274+ from the first 4GB of memory as the bootmem allocator
275+ passes the memory pages to the buddy allocator.
276+
277+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
278+ when the processor supports PCID.
279+
280 pcbit= [HW,ISDN]
281
282 pcd. [PARIDE]
283diff --git a/Makefile b/Makefile
284index 4b31d62..ac99d49 100644
285--- a/Makefile
286+++ b/Makefile
287@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
288
289 HOSTCC = gcc
290 HOSTCXX = g++
291-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
292-HOSTCXXFLAGS = -O2
293+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
294+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
295+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
296
297 # Decide whether to build built-in, modular, or both.
298 # Normally, just do built-in.
299@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
300 # Rules shared between *config targets and build targets
301
302 # Basic helpers built in scripts/
303-PHONY += scripts_basic
304-scripts_basic:
305+PHONY += scripts_basic gcc-plugins
306+scripts_basic: gcc-plugins
307 $(Q)$(MAKE) $(build)=scripts/basic
308 $(Q)rm -f .tmp_quiet_recordmcount
309
310@@ -576,6 +577,65 @@ else
311 KBUILD_CFLAGS += -O2
312 endif
313
314+ifndef DISABLE_PAX_PLUGINS
315+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
316+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
317+else
318+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
319+endif
320+ifneq ($(PLUGINCC),)
321+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
322+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
323+endif
324+ifdef CONFIG_PAX_MEMORY_STACKLEAK
325+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
326+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
327+endif
328+ifdef CONFIG_KALLOCSTAT_PLUGIN
329+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
330+endif
331+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
332+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
333+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
334+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
335+endif
336+ifdef CONFIG_CHECKER_PLUGIN
337+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
338+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
339+endif
340+endif
341+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
342+ifdef CONFIG_PAX_SIZE_OVERFLOW
343+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
344+endif
345+ifdef CONFIG_PAX_LATENT_ENTROPY
346+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
347+endif
348+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
349+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
350+endif
351+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
352+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
353+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
354+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
355+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
356+ifeq ($(KBUILD_EXTMOD),)
357+gcc-plugins:
358+ $(Q)$(MAKE) $(build)=tools/gcc
359+else
360+gcc-plugins: ;
361+endif
362+else
363+gcc-plugins:
364+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
365+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
366+else
367+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
368+endif
369+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
370+endif
371+endif
372+
373 include $(srctree)/arch/$(SRCARCH)/Makefile
374
375 ifdef CONFIG_READABLE_ASM
376@@ -733,7 +793,7 @@ export mod_sign_cmd
377
378
379 ifeq ($(KBUILD_EXTMOD),)
380-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
381+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
382
383 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
384 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
385@@ -782,6 +842,8 @@ endif
386
387 # The actual objects are generated when descending,
388 # make sure no implicit rule kicks in
389+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
390+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
391 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
392
393 # Handle descending into subdirectories listed in $(vmlinux-dirs)
394@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
395 # Error messages still appears in the original language
396
397 PHONY += $(vmlinux-dirs)
398-$(vmlinux-dirs): prepare scripts
399+$(vmlinux-dirs): gcc-plugins prepare scripts
400 $(Q)$(MAKE) $(build)=$@
401
402 # Store (new) KERNELRELASE string in include/config/kernel.release
403@@ -835,6 +897,7 @@ prepare0: archprepare FORCE
404 $(Q)$(MAKE) $(build)=.
405
406 # All the preparing..
407+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
408 prepare: prepare0
409
410 # Generate some files
411@@ -942,6 +1005,8 @@ all: modules
412 # using awk while concatenating to the final file.
413
414 PHONY += modules
415+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
418 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
419 @$(kecho) ' Building modules, stage 2.';
420@@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
421
422 # Target to prepare building external modules
423 PHONY += modules_prepare
424-modules_prepare: prepare scripts
425+modules_prepare: gcc-plugins prepare scripts
426
427 # Target to install modules
428 PHONY += modules_install
429@@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
430 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
431 signing_key.priv signing_key.x509 x509.genkey \
432 extra_certificates signing_key.x509.keyid \
433- signing_key.x509.signer
434+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
435
436 # clean - Delete most, but leave enough to build external modules
437 #
438@@ -1063,6 +1128,7 @@ distclean: mrproper
439 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
440 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
441 -o -name '.*.rej' \
442+ -o -name '.*.rej' -o -name '*.so' \
443 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
444 -type f -print | xargs rm -f
445
446@@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
447 $(module-dirs): crmodverdir $(objtree)/Module.symvers
448 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
449
450+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
451+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
452 modules: $(module-dirs)
453 @$(kecho) ' Building modules, stage 2.';
454 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
455@@ -1359,17 +1427,21 @@ else
456 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
457 endif
458
459-%.s: %.c prepare scripts FORCE
460+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
461+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
462+%.s: %.c gcc-plugins prepare scripts FORCE
463 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
464 %.i: %.c prepare scripts FORCE
465 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
466-%.o: %.c prepare scripts FORCE
467+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
468+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
469+%.o: %.c gcc-plugins prepare scripts FORCE
470 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
471 %.lst: %.c prepare scripts FORCE
472 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
473-%.s: %.S prepare scripts FORCE
474+%.s: %.S gcc-plugins prepare scripts FORCE
475 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
476-%.o: %.S prepare scripts FORCE
477+%.o: %.S gcc-plugins prepare scripts FORCE
478 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
479 %.symtypes: %.c prepare scripts FORCE
480 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
481@@ -1379,11 +1451,15 @@ endif
482 $(cmd_crmodverdir)
483 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
484 $(build)=$(build-dir)
485-%/: prepare scripts FORCE
486+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
487+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
488+%/: gcc-plugins prepare scripts FORCE
489 $(cmd_crmodverdir)
490 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
491 $(build)=$(build-dir)
492-%.ko: prepare scripts FORCE
493+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495+%.ko: gcc-plugins prepare scripts FORCE
496 $(cmd_crmodverdir)
497 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
498 $(build)=$(build-dir) $(@:.ko=.o)
499diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
500index c2cbe4f..f7264b4 100644
501--- a/arch/alpha/include/asm/atomic.h
502+++ b/arch/alpha/include/asm/atomic.h
503@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
504 #define atomic_dec(v) atomic_sub(1,(v))
505 #define atomic64_dec(v) atomic64_sub(1,(v))
506
507+#define atomic64_read_unchecked(v) atomic64_read(v)
508+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
509+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
510+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
511+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
512+#define atomic64_inc_unchecked(v) atomic64_inc(v)
513+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
514+#define atomic64_dec_unchecked(v) atomic64_dec(v)
515+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
516+
517 #define smp_mb__before_atomic_dec() smp_mb()
518 #define smp_mb__after_atomic_dec() smp_mb()
519 #define smp_mb__before_atomic_inc() smp_mb()
520diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
521index ad368a9..fbe0f25 100644
522--- a/arch/alpha/include/asm/cache.h
523+++ b/arch/alpha/include/asm/cache.h
524@@ -4,19 +4,19 @@
525 #ifndef __ARCH_ALPHA_CACHE_H
526 #define __ARCH_ALPHA_CACHE_H
527
528+#include <linux/const.h>
529
530 /* Bytes per L1 (data) cache line. */
531 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
532-# define L1_CACHE_BYTES 64
533 # define L1_CACHE_SHIFT 6
534 #else
535 /* Both EV4 and EV5 are write-through, read-allocate,
536 direct-mapped, physical.
537 */
538-# define L1_CACHE_BYTES 32
539 # define L1_CACHE_SHIFT 5
540 #endif
541
542+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
543 #define SMP_CACHE_BYTES L1_CACHE_BYTES
544
545 #endif
546diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
547index 968d999..d36b2df 100644
548--- a/arch/alpha/include/asm/elf.h
549+++ b/arch/alpha/include/asm/elf.h
550@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
551
552 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
553
554+#ifdef CONFIG_PAX_ASLR
555+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
556+
557+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
558+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
559+#endif
560+
561 /* $0 is set by ld.so to a pointer to a function which might be
562 registered using atexit. This provides a mean for the dynamic
563 linker to call DT_FINI functions for shared libraries that have
564diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
565index bc2a0da..8ad11ee 100644
566--- a/arch/alpha/include/asm/pgalloc.h
567+++ b/arch/alpha/include/asm/pgalloc.h
568@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
569 pgd_set(pgd, pmd);
570 }
571
572+static inline void
573+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
574+{
575+ pgd_populate(mm, pgd, pmd);
576+}
577+
578 extern pgd_t *pgd_alloc(struct mm_struct *mm);
579
580 static inline void
581diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
582index 81a4342..348b927 100644
583--- a/arch/alpha/include/asm/pgtable.h
584+++ b/arch/alpha/include/asm/pgtable.h
585@@ -102,6 +102,17 @@ struct vm_area_struct;
586 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
587 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
588 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
589+
590+#ifdef CONFIG_PAX_PAGEEXEC
591+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
592+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
593+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
594+#else
595+# define PAGE_SHARED_NOEXEC PAGE_SHARED
596+# define PAGE_COPY_NOEXEC PAGE_COPY
597+# define PAGE_READONLY_NOEXEC PAGE_READONLY
598+#endif
599+
600 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
601
602 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
603diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
604index 2fd00b7..cfd5069 100644
605--- a/arch/alpha/kernel/module.c
606+++ b/arch/alpha/kernel/module.c
607@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
608
609 /* The small sections were sorted to the end of the segment.
610 The following should definitely cover them. */
611- gp = (u64)me->module_core + me->core_size - 0x8000;
612+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
613 got = sechdrs[me->arch.gotsecindex].sh_addr;
614
615 for (i = 0; i < n; i++) {
616diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
617index b9e37ad..44c24e7 100644
618--- a/arch/alpha/kernel/osf_sys.c
619+++ b/arch/alpha/kernel/osf_sys.c
620@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
621 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
622
623 static unsigned long
624-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
625- unsigned long limit)
626+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
627+ unsigned long limit, unsigned long flags)
628 {
629 struct vm_unmapped_area_info info;
630+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
631
632 info.flags = 0;
633 info.length = len;
634@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
635 info.high_limit = limit;
636 info.align_mask = 0;
637 info.align_offset = 0;
638+ info.threadstack_offset = offset;
639 return vm_unmapped_area(&info);
640 }
641
642@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
643 merely specific addresses, but regions of memory -- perhaps
644 this feature should be incorporated into all ports? */
645
646+#ifdef CONFIG_PAX_RANDMMAP
647+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
648+#endif
649+
650 if (addr) {
651- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
652+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
653 if (addr != (unsigned long) -ENOMEM)
654 return addr;
655 }
656
657 /* Next, try allocating at TASK_UNMAPPED_BASE. */
658- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
659- len, limit);
660+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
661+
662 if (addr != (unsigned long) -ENOMEM)
663 return addr;
664
665 /* Finally, try allocating in low memory. */
666- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
667+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
668
669 return addr;
670 }
671diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
672index 0c4132d..88f0d53 100644
673--- a/arch/alpha/mm/fault.c
674+++ b/arch/alpha/mm/fault.c
675@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
676 __reload_thread(pcb);
677 }
678
679+#ifdef CONFIG_PAX_PAGEEXEC
680+/*
681+ * PaX: decide what to do with offenders (regs->pc = fault address)
682+ *
683+ * returns 1 when task should be killed
684+ * 2 when patched PLT trampoline was detected
685+ * 3 when unpatched PLT trampoline was detected
686+ */
687+static int pax_handle_fetch_fault(struct pt_regs *regs)
688+{
689+
690+#ifdef CONFIG_PAX_EMUPLT
691+ int err;
692+
693+ do { /* PaX: patched PLT emulation #1 */
694+ unsigned int ldah, ldq, jmp;
695+
696+ err = get_user(ldah, (unsigned int *)regs->pc);
697+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
698+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
699+
700+ if (err)
701+ break;
702+
703+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
704+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
705+ jmp == 0x6BFB0000U)
706+ {
707+ unsigned long r27, addr;
708+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
709+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
710+
711+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
712+ err = get_user(r27, (unsigned long *)addr);
713+ if (err)
714+ break;
715+
716+ regs->r27 = r27;
717+ regs->pc = r27;
718+ return 2;
719+ }
720+ } while (0);
721+
722+ do { /* PaX: patched PLT emulation #2 */
723+ unsigned int ldah, lda, br;
724+
725+ err = get_user(ldah, (unsigned int *)regs->pc);
726+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
727+ err |= get_user(br, (unsigned int *)(regs->pc+8));
728+
729+ if (err)
730+ break;
731+
732+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
733+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
734+ (br & 0xFFE00000U) == 0xC3E00000U)
735+ {
736+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
737+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
738+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
739+
740+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
741+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
742+ return 2;
743+ }
744+ } while (0);
745+
746+ do { /* PaX: unpatched PLT emulation */
747+ unsigned int br;
748+
749+ err = get_user(br, (unsigned int *)regs->pc);
750+
751+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
752+ unsigned int br2, ldq, nop, jmp;
753+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
754+
755+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
756+ err = get_user(br2, (unsigned int *)addr);
757+ err |= get_user(ldq, (unsigned int *)(addr+4));
758+ err |= get_user(nop, (unsigned int *)(addr+8));
759+ err |= get_user(jmp, (unsigned int *)(addr+12));
760+ err |= get_user(resolver, (unsigned long *)(addr+16));
761+
762+ if (err)
763+ break;
764+
765+ if (br2 == 0xC3600000U &&
766+ ldq == 0xA77B000CU &&
767+ nop == 0x47FF041FU &&
768+ jmp == 0x6B7B0000U)
769+ {
770+ regs->r28 = regs->pc+4;
771+ regs->r27 = addr+16;
772+ regs->pc = resolver;
773+ return 3;
774+ }
775+ }
776+ } while (0);
777+#endif
778+
779+ return 1;
780+}
781+
782+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
783+{
784+ unsigned long i;
785+
786+ printk(KERN_ERR "PAX: bytes at PC: ");
787+ for (i = 0; i < 5; i++) {
788+ unsigned int c;
789+ if (get_user(c, (unsigned int *)pc+i))
790+ printk(KERN_CONT "???????? ");
791+ else
792+ printk(KERN_CONT "%08x ", c);
793+ }
794+ printk("\n");
795+}
796+#endif
797
798 /*
799 * This routine handles page faults. It determines the address,
800@@ -133,8 +251,29 @@ retry:
801 good_area:
802 si_code = SEGV_ACCERR;
803 if (cause < 0) {
804- if (!(vma->vm_flags & VM_EXEC))
805+ if (!(vma->vm_flags & VM_EXEC)) {
806+
807+#ifdef CONFIG_PAX_PAGEEXEC
808+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
809+ goto bad_area;
810+
811+ up_read(&mm->mmap_sem);
812+ switch (pax_handle_fetch_fault(regs)) {
813+
814+#ifdef CONFIG_PAX_EMUPLT
815+ case 2:
816+ case 3:
817+ return;
818+#endif
819+
820+ }
821+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
822+ do_group_exit(SIGKILL);
823+#else
824 goto bad_area;
825+#endif
826+
827+ }
828 } else if (!cause) {
829 /* Allow reads even for write-only mappings */
830 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
831diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
832index 18a9f5e..ca910b7 100644
833--- a/arch/arm/Kconfig
834+++ b/arch/arm/Kconfig
835@@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
836
837 config UACCESS_WITH_MEMCPY
838 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
839- depends on MMU
840+ depends on MMU && !PAX_MEMORY_UDEREF
841 default y if CPU_FEROCEON
842 help
843 Implement faster copy_to_user and clear_user methods for CPU
844diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
845index da1c77d..2ee6056 100644
846--- a/arch/arm/include/asm/atomic.h
847+++ b/arch/arm/include/asm/atomic.h
848@@ -17,17 +17,35 @@
849 #include <asm/barrier.h>
850 #include <asm/cmpxchg.h>
851
852+#ifdef CONFIG_GENERIC_ATOMIC64
853+#include <asm-generic/atomic64.h>
854+#endif
855+
856 #define ATOMIC_INIT(i) { (i) }
857
858 #ifdef __KERNEL__
859
860+#define _ASM_EXTABLE(from, to) \
861+" .pushsection __ex_table,\"a\"\n"\
862+" .align 3\n" \
863+" .long " #from ", " #to"\n" \
864+" .popsection"
865+
866 /*
867 * On ARM, ordinary assignment (str instruction) doesn't clear the local
868 * strex/ldrex monitor on some implementations. The reason we can use it for
869 * atomic_set() is the clrex or dummy strex done on every exception return.
870 */
871 #define atomic_read(v) (*(volatile int *)&(v)->counter)
872+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
873+{
874+ return v->counter;
875+}
876 #define atomic_set(v,i) (((v)->counter) = (i))
877+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
878+{
879+ v->counter = i;
880+}
881
882 #if __LINUX_ARM_ARCH__ >= 6
883
884@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
885 int result;
886
887 __asm__ __volatile__("@ atomic_add\n"
888+"1: ldrex %1, [%3]\n"
889+" adds %0, %1, %4\n"
890+
891+#ifdef CONFIG_PAX_REFCOUNT
892+" bvc 3f\n"
893+"2: bkpt 0xf103\n"
894+"3:\n"
895+#endif
896+
897+" strex %1, %0, [%3]\n"
898+" teq %1, #0\n"
899+" bne 1b"
900+
901+#ifdef CONFIG_PAX_REFCOUNT
902+"\n4:\n"
903+ _ASM_EXTABLE(2b, 4b)
904+#endif
905+
906+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
907+ : "r" (&v->counter), "Ir" (i)
908+ : "cc");
909+}
910+
911+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
912+{
913+ unsigned long tmp;
914+ int result;
915+
916+ __asm__ __volatile__("@ atomic_add_unchecked\n"
917 "1: ldrex %0, [%3]\n"
918 " add %0, %0, %4\n"
919 " strex %1, %0, [%3]\n"
920@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
921 smp_mb();
922
923 __asm__ __volatile__("@ atomic_add_return\n"
924+"1: ldrex %1, [%3]\n"
925+" adds %0, %1, %4\n"
926+
927+#ifdef CONFIG_PAX_REFCOUNT
928+" bvc 3f\n"
929+" mov %0, %1\n"
930+"2: bkpt 0xf103\n"
931+"3:\n"
932+#endif
933+
934+" strex %1, %0, [%3]\n"
935+" teq %1, #0\n"
936+" bne 1b"
937+
938+#ifdef CONFIG_PAX_REFCOUNT
939+"\n4:\n"
940+ _ASM_EXTABLE(2b, 4b)
941+#endif
942+
943+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
944+ : "r" (&v->counter), "Ir" (i)
945+ : "cc");
946+
947+ smp_mb();
948+
949+ return result;
950+}
951+
952+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
953+{
954+ unsigned long tmp;
955+ int result;
956+
957+ smp_mb();
958+
959+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
960 "1: ldrex %0, [%3]\n"
961 " add %0, %0, %4\n"
962 " strex %1, %0, [%3]\n"
963@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
964 int result;
965
966 __asm__ __volatile__("@ atomic_sub\n"
967+"1: ldrex %1, [%3]\n"
968+" subs %0, %1, %4\n"
969+
970+#ifdef CONFIG_PAX_REFCOUNT
971+" bvc 3f\n"
972+"2: bkpt 0xf103\n"
973+"3:\n"
974+#endif
975+
976+" strex %1, %0, [%3]\n"
977+" teq %1, #0\n"
978+" bne 1b"
979+
980+#ifdef CONFIG_PAX_REFCOUNT
981+"\n4:\n"
982+ _ASM_EXTABLE(2b, 4b)
983+#endif
984+
985+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
986+ : "r" (&v->counter), "Ir" (i)
987+ : "cc");
988+}
989+
990+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
991+{
992+ unsigned long tmp;
993+ int result;
994+
995+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
996 "1: ldrex %0, [%3]\n"
997 " sub %0, %0, %4\n"
998 " strex %1, %0, [%3]\n"
999@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1000 smp_mb();
1001
1002 __asm__ __volatile__("@ atomic_sub_return\n"
1003-"1: ldrex %0, [%3]\n"
1004-" sub %0, %0, %4\n"
1005+"1: ldrex %1, [%3]\n"
1006+" subs %0, %1, %4\n"
1007+
1008+#ifdef CONFIG_PAX_REFCOUNT
1009+" bvc 3f\n"
1010+" mov %0, %1\n"
1011+"2: bkpt 0xf103\n"
1012+"3:\n"
1013+#endif
1014+
1015 " strex %1, %0, [%3]\n"
1016 " teq %1, #0\n"
1017 " bne 1b"
1018+
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+"\n4:\n"
1021+ _ASM_EXTABLE(2b, 4b)
1022+#endif
1023+
1024 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1025 : "r" (&v->counter), "Ir" (i)
1026 : "cc");
1027@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1028 return oldval;
1029 }
1030
1031+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1032+{
1033+ unsigned long oldval, res;
1034+
1035+ smp_mb();
1036+
1037+ do {
1038+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1039+ "ldrex %1, [%3]\n"
1040+ "mov %0, #0\n"
1041+ "teq %1, %4\n"
1042+ "strexeq %0, %5, [%3]\n"
1043+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1044+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1045+ : "cc");
1046+ } while (res);
1047+
1048+ smp_mb();
1049+
1050+ return oldval;
1051+}
1052+
1053 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 {
1055 unsigned long tmp, tmp2;
1056@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1057
1058 return val;
1059 }
1060+
1061+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1062+{
1063+ return atomic_add_return(i, v);
1064+}
1065+
1066 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1067+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1068+{
1069+ (void) atomic_add_return(i, v);
1070+}
1071
1072 static inline int atomic_sub_return(int i, atomic_t *v)
1073 {
1074@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1075 return val;
1076 }
1077 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1078+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1079+{
1080+ (void) atomic_sub_return(i, v);
1081+}
1082
1083 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1084 {
1085@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1086 return ret;
1087 }
1088
1089+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1090+{
1091+ return atomic_cmpxchg(v, old, new);
1092+}
1093+
1094 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1095 {
1096 unsigned long flags;
1097@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1098 #endif /* __LINUX_ARM_ARCH__ */
1099
1100 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1101+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1102+{
1103+ return xchg(&v->counter, new);
1104+}
1105
1106 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1107 {
1108@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1109 }
1110
1111 #define atomic_inc(v) atomic_add(1, v)
1112+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1113+{
1114+ atomic_add_unchecked(1, v);
1115+}
1116 #define atomic_dec(v) atomic_sub(1, v)
1117+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1118+{
1119+ atomic_sub_unchecked(1, v);
1120+}
1121
1122 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1123+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1124+{
1125+ return atomic_add_return_unchecked(1, v) == 0;
1126+}
1127 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1128 #define atomic_inc_return(v) (atomic_add_return(1, v))
1129+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1130+{
1131+ return atomic_add_return_unchecked(1, v);
1132+}
1133 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1134 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1135
1136@@ -241,6 +428,14 @@ typedef struct {
1137 u64 __aligned(8) counter;
1138 } atomic64_t;
1139
1140+#ifdef CONFIG_PAX_REFCOUNT
1141+typedef struct {
1142+ u64 __aligned(8) counter;
1143+} atomic64_unchecked_t;
1144+#else
1145+typedef atomic64_t atomic64_unchecked_t;
1146+#endif
1147+
1148 #define ATOMIC64_INIT(i) { (i) }
1149
1150 #ifdef CONFIG_ARM_LPAE
1151@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1152 return result;
1153 }
1154
1155+static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1156+{
1157+ u64 result;
1158+
1159+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1160+" ldrd %0, %H0, [%1]"
1161+ : "=&r" (result)
1162+ : "r" (&v->counter), "Qo" (v->counter)
1163+ );
1164+
1165+ return result;
1166+}
1167+
1168 static inline void atomic64_set(atomic64_t *v, u64 i)
1169 {
1170 __asm__ __volatile__("@ atomic64_set\n"
1171@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1172 : "r" (&v->counter), "r" (i)
1173 );
1174 }
1175+
1176+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1177+{
1178+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1179+" strd %2, %H2, [%1]"
1180+ : "=Qo" (v->counter)
1181+ : "r" (&v->counter), "r" (i)
1182+ );
1183+}
1184 #else
1185 static inline u64 atomic64_read(const atomic64_t *v)
1186 {
1187@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1188 return result;
1189 }
1190
1191+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1192+{
1193+ u64 result;
1194+
1195+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1196+" ldrexd %0, %H0, [%1]"
1197+ : "=&r" (result)
1198+ : "r" (&v->counter), "Qo" (v->counter)
1199+ );
1200+
1201+ return result;
1202+}
1203+
1204 static inline void atomic64_set(atomic64_t *v, u64 i)
1205 {
1206 u64 tmp;
1207@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1208 : "r" (&v->counter), "r" (i)
1209 : "cc");
1210 }
1211+
1212+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1213+{
1214+ u64 tmp;
1215+
1216+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1217+"1: ldrexd %0, %H0, [%2]\n"
1218+" strexd %0, %3, %H3, [%2]\n"
1219+" teq %0, #0\n"
1220+" bne 1b"
1221+ : "=&r" (tmp), "=Qo" (v->counter)
1222+ : "r" (&v->counter), "r" (i)
1223+ : "cc");
1224+}
1225+
1226 #endif
1227
1228 static inline void atomic64_add(u64 i, atomic64_t *v)
1229@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1230 __asm__ __volatile__("@ atomic64_add\n"
1231 "1: ldrexd %0, %H0, [%3]\n"
1232 " adds %0, %0, %4\n"
1233+" adcs %H0, %H0, %H4\n"
1234+
1235+#ifdef CONFIG_PAX_REFCOUNT
1236+" bvc 3f\n"
1237+"2: bkpt 0xf103\n"
1238+"3:\n"
1239+#endif
1240+
1241+" strexd %1, %0, %H0, [%3]\n"
1242+" teq %1, #0\n"
1243+" bne 1b"
1244+
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+"\n4:\n"
1247+ _ASM_EXTABLE(2b, 4b)
1248+#endif
1249+
1250+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1251+ : "r" (&v->counter), "r" (i)
1252+ : "cc");
1253+}
1254+
1255+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1256+{
1257+ u64 result;
1258+ unsigned long tmp;
1259+
1260+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1261+"1: ldrexd %0, %H0, [%3]\n"
1262+" adds %0, %0, %4\n"
1263 " adc %H0, %H0, %H4\n"
1264 " strexd %1, %0, %H0, [%3]\n"
1265 " teq %1, #0\n"
1266@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1267
1268 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1269 {
1270- u64 result;
1271- unsigned long tmp;
1272+ u64 result, tmp;
1273
1274 smp_mb();
1275
1276 __asm__ __volatile__("@ atomic64_add_return\n"
1277+"1: ldrexd %1, %H1, [%3]\n"
1278+" adds %0, %1, %4\n"
1279+" adcs %H0, %H1, %H4\n"
1280+
1281+#ifdef CONFIG_PAX_REFCOUNT
1282+" bvc 3f\n"
1283+" mov %0, %1\n"
1284+" mov %H0, %H1\n"
1285+"2: bkpt 0xf103\n"
1286+"3:\n"
1287+#endif
1288+
1289+" strexd %1, %0, %H0, [%3]\n"
1290+" teq %1, #0\n"
1291+" bne 1b"
1292+
1293+#ifdef CONFIG_PAX_REFCOUNT
1294+"\n4:\n"
1295+ _ASM_EXTABLE(2b, 4b)
1296+#endif
1297+
1298+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1299+ : "r" (&v->counter), "r" (i)
1300+ : "cc");
1301+
1302+ smp_mb();
1303+
1304+ return result;
1305+}
1306+
1307+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1308+{
1309+ u64 result;
1310+ unsigned long tmp;
1311+
1312+ smp_mb();
1313+
1314+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1315 "1: ldrexd %0, %H0, [%3]\n"
1316 " adds %0, %0, %4\n"
1317 " adc %H0, %H0, %H4\n"
1318@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_sub\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " subs %0, %0, %4\n"
1322+" sbcs %H0, %H0, %H4\n"
1323+
1324+#ifdef CONFIG_PAX_REFCOUNT
1325+" bvc 3f\n"
1326+"2: bkpt 0xf103\n"
1327+"3:\n"
1328+#endif
1329+
1330+" strexd %1, %0, %H0, [%3]\n"
1331+" teq %1, #0\n"
1332+" bne 1b"
1333+
1334+#ifdef CONFIG_PAX_REFCOUNT
1335+"\n4:\n"
1336+ _ASM_EXTABLE(2b, 4b)
1337+#endif
1338+
1339+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1340+ : "r" (&v->counter), "r" (i)
1341+ : "cc");
1342+}
1343+
1344+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1345+{
1346+ u64 result;
1347+ unsigned long tmp;
1348+
1349+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1350+"1: ldrexd %0, %H0, [%3]\n"
1351+" subs %0, %0, %4\n"
1352 " sbc %H0, %H0, %H4\n"
1353 " strexd %1, %0, %H0, [%3]\n"
1354 " teq %1, #0\n"
1355@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1356
1357 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1358 {
1359- u64 result;
1360- unsigned long tmp;
1361+ u64 result, tmp;
1362
1363 smp_mb();
1364
1365 __asm__ __volatile__("@ atomic64_sub_return\n"
1366-"1: ldrexd %0, %H0, [%3]\n"
1367-" subs %0, %0, %4\n"
1368-" sbc %H0, %H0, %H4\n"
1369+"1: ldrexd %1, %H1, [%3]\n"
1370+" subs %0, %1, %4\n"
1371+" sbcs %H0, %H1, %H4\n"
1372+
1373+#ifdef CONFIG_PAX_REFCOUNT
1374+" bvc 3f\n"
1375+" mov %0, %1\n"
1376+" mov %H0, %H1\n"
1377+"2: bkpt 0xf103\n"
1378+"3:\n"
1379+#endif
1380+
1381 " strexd %1, %0, %H0, [%3]\n"
1382 " teq %1, #0\n"
1383 " bne 1b"
1384+
1385+#ifdef CONFIG_PAX_REFCOUNT
1386+"\n4:\n"
1387+ _ASM_EXTABLE(2b, 4b)
1388+#endif
1389+
1390 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1391 : "r" (&v->counter), "r" (i)
1392 : "cc");
1393@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1394 return oldval;
1395 }
1396
1397+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1398+{
1399+ u64 oldval;
1400+ unsigned long res;
1401+
1402+ smp_mb();
1403+
1404+ do {
1405+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1406+ "ldrexd %1, %H1, [%3]\n"
1407+ "mov %0, #0\n"
1408+ "teq %1, %4\n"
1409+ "teqeq %H1, %H4\n"
1410+ "strexdeq %0, %5, %H5, [%3]"
1411+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1412+ : "r" (&ptr->counter), "r" (old), "r" (new)
1413+ : "cc");
1414+ } while (res);
1415+
1416+ smp_mb();
1417+
1418+ return oldval;
1419+}
1420+
1421 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1422 {
1423 u64 result;
1424@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1425
1426 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1427 {
1428- u64 result;
1429- unsigned long tmp;
1430+ u64 result, tmp;
1431
1432 smp_mb();
1433
1434 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1435-"1: ldrexd %0, %H0, [%3]\n"
1436-" subs %0, %0, #1\n"
1437-" sbc %H0, %H0, #0\n"
1438+"1: ldrexd %1, %H1, [%3]\n"
1439+" subs %0, %1, #1\n"
1440+" sbcs %H0, %H1, #0\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: bkpt 0xf103\n"
1447+"3:\n"
1448+#endif
1449+
1450 " teq %H0, #0\n"
1451-" bmi 2f\n"
1452+" bmi 4f\n"
1453 " strexd %1, %0, %H0, [%3]\n"
1454 " teq %1, #0\n"
1455 " bne 1b\n"
1456-"2:"
1457+"4:\n"
1458+
1459+#ifdef CONFIG_PAX_REFCOUNT
1460+ _ASM_EXTABLE(2b, 4b)
1461+#endif
1462+
1463 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1464 : "r" (&v->counter)
1465 : "cc");
1466@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1467 " teq %0, %5\n"
1468 " teqeq %H0, %H5\n"
1469 " moveq %1, #0\n"
1470-" beq 2f\n"
1471+" beq 4f\n"
1472 " adds %0, %0, %6\n"
1473-" adc %H0, %H0, %H6\n"
1474+" adcs %H0, %H0, %H6\n"
1475+
1476+#ifdef CONFIG_PAX_REFCOUNT
1477+" bvc 3f\n"
1478+"2: bkpt 0xf103\n"
1479+"3:\n"
1480+#endif
1481+
1482 " strexd %2, %0, %H0, [%4]\n"
1483 " teq %2, #0\n"
1484 " bne 1b\n"
1485-"2:"
1486+"4:\n"
1487+
1488+#ifdef CONFIG_PAX_REFCOUNT
1489+ _ASM_EXTABLE(2b, 4b)
1490+#endif
1491+
1492 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1493 : "r" (&v->counter), "r" (u), "r" (a)
1494 : "cc");
1495@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1496
1497 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1498 #define atomic64_inc(v) atomic64_add(1LL, (v))
1499+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1500 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1501+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1502 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1503 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1504 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1505+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1506 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1507 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1508 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1509diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1510index 75fe66b..ba3dee4 100644
1511--- a/arch/arm/include/asm/cache.h
1512+++ b/arch/arm/include/asm/cache.h
1513@@ -4,8 +4,10 @@
1514 #ifndef __ASMARM_CACHE_H
1515 #define __ASMARM_CACHE_H
1516
1517+#include <linux/const.h>
1518+
1519 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1520-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1521+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1522
1523 /*
1524 * Memory returned by kmalloc() may be used for DMA, so we must make
1525@@ -24,5 +26,6 @@
1526 #endif
1527
1528 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1529+#define __read_only __attribute__ ((__section__(".data..read_only")))
1530
1531 #endif
1532diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1533index 17d0ae8..014e350 100644
1534--- a/arch/arm/include/asm/cacheflush.h
1535+++ b/arch/arm/include/asm/cacheflush.h
1536@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1537 void (*dma_unmap_area)(const void *, size_t, int);
1538
1539 void (*dma_flush_range)(const void *, const void *);
1540-};
1541+} __no_const;
1542
1543 /*
1544 * Select the calling method
1545diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1546index 6dcc164..b14d917 100644
1547--- a/arch/arm/include/asm/checksum.h
1548+++ b/arch/arm/include/asm/checksum.h
1549@@ -37,7 +37,19 @@ __wsum
1550 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1551
1552 __wsum
1553-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1554+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1555+
1556+static inline __wsum
1557+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1558+{
1559+ __wsum ret;
1560+ pax_open_userland();
1561+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1562+ pax_close_userland();
1563+ return ret;
1564+}
1565+
1566+
1567
1568 /*
1569 * Fold a partial checksum without adding pseudo headers
1570diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1571index 4f009c1..466c59b 100644
1572--- a/arch/arm/include/asm/cmpxchg.h
1573+++ b/arch/arm/include/asm/cmpxchg.h
1574@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1575
1576 #define xchg(ptr,x) \
1577 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1578+#define xchg_unchecked(ptr,x) \
1579+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1580
1581 #include <asm-generic/cmpxchg-local.h>
1582
1583diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1584index 6ddbe44..b5e38b1 100644
1585--- a/arch/arm/include/asm/domain.h
1586+++ b/arch/arm/include/asm/domain.h
1587@@ -48,18 +48,37 @@
1588 * Domain types
1589 */
1590 #define DOMAIN_NOACCESS 0
1591-#define DOMAIN_CLIENT 1
1592 #ifdef CONFIG_CPU_USE_DOMAINS
1593+#define DOMAIN_USERCLIENT 1
1594+#define DOMAIN_KERNELCLIENT 1
1595 #define DOMAIN_MANAGER 3
1596+#define DOMAIN_VECTORS DOMAIN_USER
1597 #else
1598+
1599+#ifdef CONFIG_PAX_KERNEXEC
1600 #define DOMAIN_MANAGER 1
1601+#define DOMAIN_KERNEXEC 3
1602+#else
1603+#define DOMAIN_MANAGER 1
1604+#endif
1605+
1606+#ifdef CONFIG_PAX_MEMORY_UDEREF
1607+#define DOMAIN_USERCLIENT 0
1608+#define DOMAIN_UDEREF 1
1609+#define DOMAIN_VECTORS DOMAIN_KERNEL
1610+#else
1611+#define DOMAIN_USERCLIENT 1
1612+#define DOMAIN_VECTORS DOMAIN_USER
1613+#endif
1614+#define DOMAIN_KERNELCLIENT 1
1615+
1616 #endif
1617
1618 #define domain_val(dom,type) ((type) << (2*(dom)))
1619
1620 #ifndef __ASSEMBLY__
1621
1622-#ifdef CONFIG_CPU_USE_DOMAINS
1623+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1624 static inline void set_domain(unsigned val)
1625 {
1626 asm volatile(
1627@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1628 isb();
1629 }
1630
1631-#define modify_domain(dom,type) \
1632- do { \
1633- struct thread_info *thread = current_thread_info(); \
1634- unsigned int domain = thread->cpu_domain; \
1635- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1636- thread->cpu_domain = domain | domain_val(dom, type); \
1637- set_domain(thread->cpu_domain); \
1638- } while (0)
1639-
1640+extern void modify_domain(unsigned int dom, unsigned int type);
1641 #else
1642 static inline void set_domain(unsigned val) { }
1643 static inline void modify_domain(unsigned dom, unsigned type) { }
1644diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1645index 56211f2..17e8a25 100644
1646--- a/arch/arm/include/asm/elf.h
1647+++ b/arch/arm/include/asm/elf.h
1648@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1649 the loader. We need to make sure that it is out of the way of the program
1650 that it will "exec", and that there is sufficient room for the brk. */
1651
1652-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1653+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1654+
1655+#ifdef CONFIG_PAX_ASLR
1656+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1657+
1658+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1659+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1660+#endif
1661
1662 /* When the program starts, a1 contains a pointer to a function to be
1663 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1664@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1665 extern void elf_set_personality(const struct elf32_hdr *);
1666 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1667
1668-struct mm_struct;
1669-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1670-#define arch_randomize_brk arch_randomize_brk
1671-
1672 #ifdef CONFIG_MMU
1673 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1674 struct linux_binprm;
1675diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1676index de53547..52b9a28 100644
1677--- a/arch/arm/include/asm/fncpy.h
1678+++ b/arch/arm/include/asm/fncpy.h
1679@@ -81,7 +81,9 @@
1680 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1681 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1682 \
1683+ pax_open_kernel(); \
1684 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1685+ pax_close_kernel(); \
1686 flush_icache_range((unsigned long)(dest_buf), \
1687 (unsigned long)(dest_buf) + (size)); \
1688 \
1689diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1690index e42cf59..7b94b8f 100644
1691--- a/arch/arm/include/asm/futex.h
1692+++ b/arch/arm/include/asm/futex.h
1693@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1694 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1695 return -EFAULT;
1696
1697+ pax_open_userland();
1698+
1699 smp_mb();
1700 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1701 "1: ldrex %1, [%4]\n"
1702@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1703 : "cc", "memory");
1704 smp_mb();
1705
1706+ pax_close_userland();
1707+
1708 *uval = val;
1709 return ret;
1710 }
1711@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1712 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1713 return -EFAULT;
1714
1715+ pax_open_userland();
1716+
1717 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1718 "1: " TUSER(ldr) " %1, [%4]\n"
1719 " teq %1, %2\n"
1720@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1721 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1722 : "cc", "memory");
1723
1724+ pax_close_userland();
1725+
1726 *uval = val;
1727 return ret;
1728 }
1729@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1730 return -EFAULT;
1731
1732 pagefault_disable(); /* implies preempt_disable() */
1733+ pax_open_userland();
1734
1735 switch (op) {
1736 case FUTEX_OP_SET:
1737@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1738 ret = -ENOSYS;
1739 }
1740
1741+ pax_close_userland();
1742 pagefault_enable(); /* subsumes preempt_enable() */
1743
1744 if (!ret) {
1745diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1746index 83eb2f7..ed77159 100644
1747--- a/arch/arm/include/asm/kmap_types.h
1748+++ b/arch/arm/include/asm/kmap_types.h
1749@@ -4,6 +4,6 @@
1750 /*
1751 * This is the "bare minimum". AIO seems to require this.
1752 */
1753-#define KM_TYPE_NR 16
1754+#define KM_TYPE_NR 17
1755
1756 #endif
1757diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1758index 9e614a1..3302cca 100644
1759--- a/arch/arm/include/asm/mach/dma.h
1760+++ b/arch/arm/include/asm/mach/dma.h
1761@@ -22,7 +22,7 @@ struct dma_ops {
1762 int (*residue)(unsigned int, dma_t *); /* optional */
1763 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1764 const char *type;
1765-};
1766+} __do_const;
1767
1768 struct dma_struct {
1769 void *addr; /* single DMA address */
1770diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1771index 2fe141f..192dc01 100644
1772--- a/arch/arm/include/asm/mach/map.h
1773+++ b/arch/arm/include/asm/mach/map.h
1774@@ -27,13 +27,16 @@ struct map_desc {
1775 #define MT_MINICLEAN 6
1776 #define MT_LOW_VECTORS 7
1777 #define MT_HIGH_VECTORS 8
1778-#define MT_MEMORY 9
1779+#define MT_MEMORY_RWX 9
1780 #define MT_ROM 10
1781-#define MT_MEMORY_NONCACHED 11
1782+#define MT_MEMORY_NONCACHED_RX 11
1783 #define MT_MEMORY_DTCM 12
1784 #define MT_MEMORY_ITCM 13
1785 #define MT_MEMORY_SO 14
1786 #define MT_MEMORY_DMA_READY 15
1787+#define MT_MEMORY_RW 16
1788+#define MT_MEMORY_RX 17
1789+#define MT_MEMORY_NONCACHED_RW 18
1790
1791 #ifdef CONFIG_MMU
1792 extern void iotable_init(struct map_desc *, int);
1793diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1794index 12f71a1..04e063c 100644
1795--- a/arch/arm/include/asm/outercache.h
1796+++ b/arch/arm/include/asm/outercache.h
1797@@ -35,7 +35,7 @@ struct outer_cache_fns {
1798 #endif
1799 void (*set_debug)(unsigned long);
1800 void (*resume)(void);
1801-};
1802+} __no_const;
1803
1804 #ifdef CONFIG_OUTER_CACHE
1805
1806diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1807index cbdc7a2..32f44fe 100644
1808--- a/arch/arm/include/asm/page.h
1809+++ b/arch/arm/include/asm/page.h
1810@@ -114,7 +114,7 @@ struct cpu_user_fns {
1811 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1812 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1813 unsigned long vaddr, struct vm_area_struct *vma);
1814-};
1815+} __no_const;
1816
1817 #ifdef MULTI_USER
1818 extern struct cpu_user_fns cpu_user;
1819diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1820index 943504f..c37a730 100644
1821--- a/arch/arm/include/asm/pgalloc.h
1822+++ b/arch/arm/include/asm/pgalloc.h
1823@@ -17,6 +17,7 @@
1824 #include <asm/processor.h>
1825 #include <asm/cacheflush.h>
1826 #include <asm/tlbflush.h>
1827+#include <asm/system_info.h>
1828
1829 #define check_pgt_cache() do { } while (0)
1830
1831@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1832 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1833 }
1834
1835+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1836+{
1837+ pud_populate(mm, pud, pmd);
1838+}
1839+
1840 #else /* !CONFIG_ARM_LPAE */
1841
1842 /*
1843@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1844 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1845 #define pmd_free(mm, pmd) do { } while (0)
1846 #define pud_populate(mm,pmd,pte) BUG()
1847+#define pud_populate_kernel(mm,pmd,pte) BUG()
1848
1849 #endif /* CONFIG_ARM_LPAE */
1850
1851@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1852 __free_page(pte);
1853 }
1854
1855+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1856+{
1857+#ifdef CONFIG_ARM_LPAE
1858+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859+#else
1860+ if (addr & SECTION_SIZE)
1861+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1862+ else
1863+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1864+#endif
1865+ flush_pmd_entry(pmdp);
1866+}
1867+
1868 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1869 pmdval_t prot)
1870 {
1871@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1872 static inline void
1873 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1874 {
1875- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1876+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1877 }
1878 #define pmd_pgtable(pmd) pmd_page(pmd)
1879
1880diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1881index 5cfba15..f415e1a 100644
1882--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1883+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1884@@ -20,12 +20,15 @@
1885 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1886 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1887 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1888+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1889 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1890 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1891 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1892+
1893 /*
1894 * - section
1895 */
1896+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1897 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1898 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1899 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1900@@ -37,6 +40,7 @@
1901 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1902 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1903 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1904+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1905
1906 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1907 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1908@@ -66,6 +70,7 @@
1909 * - extended small page/tiny page
1910 */
1911 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1912+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1913 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1914 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1915 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1916diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1917index f97ee02..cc9fe9e 100644
1918--- a/arch/arm/include/asm/pgtable-2level.h
1919+++ b/arch/arm/include/asm/pgtable-2level.h
1920@@ -126,6 +126,9 @@
1921 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1922 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1923
1924+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1925+#define L_PTE_PXN (_AT(pteval_t, 0))
1926+
1927 /*
1928 * These are the memory types, defined to be compatible with
1929 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1930diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1931index 18f5cef..25b8f43 100644
1932--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1933+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1934@@ -41,6 +41,7 @@
1935 */
1936 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1937 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1938+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1939 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1940 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1941 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1942@@ -71,6 +72,7 @@
1943 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1944 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1945 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1946+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1948
1949 /*
1950diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1951index 86b8fe3..e25f975 100644
1952--- a/arch/arm/include/asm/pgtable-3level.h
1953+++ b/arch/arm/include/asm/pgtable-3level.h
1954@@ -74,6 +74,7 @@
1955 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1956 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1957 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1958+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1959 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1960 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1961 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1962@@ -82,6 +83,7 @@
1963 /*
1964 * To be used in assembly code with the upper page attributes.
1965 */
1966+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1967 #define L_PTE_XN_HIGH (1 << (54 - 32))
1968 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1969
1970diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1971index 9bcd262..1ff999b 100644
1972--- a/arch/arm/include/asm/pgtable.h
1973+++ b/arch/arm/include/asm/pgtable.h
1974@@ -30,6 +30,9 @@
1975 #include <asm/pgtable-2level.h>
1976 #endif
1977
1978+#define ktla_ktva(addr) (addr)
1979+#define ktva_ktla(addr) (addr)
1980+
1981 /*
1982 * Just any arbitrary offset to the start of the vmalloc VM area: the
1983 * current 8MB value just means that there will be a 8MB "hole" after the
1984@@ -45,6 +48,9 @@
1985 #define LIBRARY_TEXT_START 0x0c000000
1986
1987 #ifndef __ASSEMBLY__
1988+extern pteval_t __supported_pte_mask;
1989+extern pmdval_t __supported_pmd_mask;
1990+
1991 extern void __pte_error(const char *file, int line, pte_t);
1992 extern void __pmd_error(const char *file, int line, pmd_t);
1993 extern void __pgd_error(const char *file, int line, pgd_t);
1994@@ -53,6 +59,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1995 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1996 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1997
1998+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1999+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2000+
2001+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2002+#include <asm/domain.h>
2003+#include <linux/thread_info.h>
2004+#include <linux/preempt.h>
2005+
2006+static inline int test_domain(int domain, int domaintype)
2007+{
2008+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2009+}
2010+#endif
2011+
2012+#ifdef CONFIG_PAX_KERNEXEC
2013+static inline unsigned long pax_open_kernel(void) {
2014+#ifdef CONFIG_ARM_LPAE
2015+ /* TODO */
2016+#else
2017+ preempt_disable();
2018+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2019+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2020+#endif
2021+ return 0;
2022+}
2023+
2024+static inline unsigned long pax_close_kernel(void) {
2025+#ifdef CONFIG_ARM_LPAE
2026+ /* TODO */
2027+#else
2028+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2029+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2030+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2031+ preempt_enable_no_resched();
2032+#endif
2033+ return 0;
2034+}
2035+#else
2036+static inline unsigned long pax_open_kernel(void) { return 0; }
2037+static inline unsigned long pax_close_kernel(void) { return 0; }
2038+#endif
2039+
2040 /*
2041 * This is the lowest virtual address we can permit any user space
2042 * mapping to be mapped at. This is particularly important for
2043@@ -72,8 +120,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2044 /*
2045 * The pgprot_* and protection_map entries will be fixed up in runtime
2046 * to include the cachable and bufferable bits based on memory policy,
2047- * as well as any architecture dependent bits like global/ASID and SMP
2048- * shared mapping bits.
2049+ * as well as any architecture dependent bits like global/ASID, PXN,
2050+ * and SMP shared mapping bits.
2051 */
2052 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2053
2054@@ -257,7 +305,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2055 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2056 {
2057 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2058- L_PTE_NONE | L_PTE_VALID;
2059+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2060 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2061 return pte;
2062 }
2063diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2064index f3628fb..a0672dd 100644
2065--- a/arch/arm/include/asm/proc-fns.h
2066+++ b/arch/arm/include/asm/proc-fns.h
2067@@ -75,7 +75,7 @@ extern struct processor {
2068 unsigned int suspend_size;
2069 void (*do_suspend)(void *);
2070 void (*do_resume)(void *);
2071-} processor;
2072+} __do_const processor;
2073
2074 #ifndef MULTI_CPU
2075 extern void cpu_proc_init(void);
2076diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2077index ce0dbe7..c085b6f 100644
2078--- a/arch/arm/include/asm/psci.h
2079+++ b/arch/arm/include/asm/psci.h
2080@@ -29,7 +29,7 @@ struct psci_operations {
2081 int (*cpu_off)(struct psci_power_state state);
2082 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2083 int (*migrate)(unsigned long cpuid);
2084-};
2085+} __no_const;
2086
2087 extern struct psci_operations psci_ops;
2088
2089diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2090index d3a22be..3a69ad5 100644
2091--- a/arch/arm/include/asm/smp.h
2092+++ b/arch/arm/include/asm/smp.h
2093@@ -107,7 +107,7 @@ struct smp_operations {
2094 int (*cpu_disable)(unsigned int cpu);
2095 #endif
2096 #endif
2097-};
2098+} __no_const;
2099
2100 /*
2101 * set platform specific SMP operations
2102diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2103index f00b569..aa5bb41 100644
2104--- a/arch/arm/include/asm/thread_info.h
2105+++ b/arch/arm/include/asm/thread_info.h
2106@@ -77,9 +77,9 @@ struct thread_info {
2107 .flags = 0, \
2108 .preempt_count = INIT_PREEMPT_COUNT, \
2109 .addr_limit = KERNEL_DS, \
2110- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2111- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2112- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2113+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2114+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2115+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2116 .restart_block = { \
2117 .fn = do_no_restart_syscall, \
2118 }, \
2119@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2120 #define TIF_SYSCALL_AUDIT 9
2121 #define TIF_SYSCALL_TRACEPOINT 10
2122 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2123-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2124+/* within 8 bits of TIF_SYSCALL_TRACE
2125+ * to meet flexible second operand requirements
2126+ */
2127+#define TIF_GRSEC_SETXID 12
2128+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2129 #define TIF_USING_IWMMXT 17
2130 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2131 #define TIF_RESTORE_SIGMASK 20
2132@@ -165,10 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2133 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2134 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2135 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2136+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2137
2138 /* Checks for any syscall work in entry-common.S */
2139 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2140- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2141+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2142
2143 /*
2144 * Change these and you break ASM code in entry-common.S
2145diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2146index 7e1f760..de33b13 100644
2147--- a/arch/arm/include/asm/uaccess.h
2148+++ b/arch/arm/include/asm/uaccess.h
2149@@ -18,6 +18,7 @@
2150 #include <asm/domain.h>
2151 #include <asm/unified.h>
2152 #include <asm/compiler.h>
2153+#include <asm/pgtable.h>
2154
2155 #define VERIFY_READ 0
2156 #define VERIFY_WRITE 1
2157@@ -63,11 +64,38 @@ extern int __put_user_bad(void);
2158 static inline void set_fs(mm_segment_t fs)
2159 {
2160 current_thread_info()->addr_limit = fs;
2161- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2162+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2163 }
2164
2165 #define segment_eq(a,b) ((a) == (b))
2166
2167+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2168+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2169+
2170+static inline void pax_open_userland(void)
2171+{
2172+
2173+#ifdef CONFIG_PAX_MEMORY_UDEREF
2174+ if (segment_eq(get_fs(), USER_DS)) {
2175+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2176+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2177+ }
2178+#endif
2179+
2180+}
2181+
2182+static inline void pax_close_userland(void)
2183+{
2184+
2185+#ifdef CONFIG_PAX_MEMORY_UDEREF
2186+ if (segment_eq(get_fs(), USER_DS)) {
2187+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2188+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2189+ }
2190+#endif
2191+
2192+}
2193+
2194 #define __addr_ok(addr) ({ \
2195 unsigned long flag; \
2196 __asm__("cmp %2, %0; movlo %0, #0" \
2197@@ -143,8 +171,12 @@ extern int __get_user_4(void *);
2198
2199 #define get_user(x,p) \
2200 ({ \
2201+ int __e; \
2202 might_fault(); \
2203- __get_user_check(x,p); \
2204+ pax_open_userland(); \
2205+ __e = __get_user_check(x,p); \
2206+ pax_close_userland(); \
2207+ __e; \
2208 })
2209
2210 extern int __put_user_1(void *, unsigned int);
2211@@ -188,8 +220,12 @@ extern int __put_user_8(void *, unsigned long long);
2212
2213 #define put_user(x,p) \
2214 ({ \
2215+ int __e; \
2216 might_fault(); \
2217- __put_user_check(x,p); \
2218+ pax_open_userland(); \
2219+ __e = __put_user_check(x,p); \
2220+ pax_close_userland(); \
2221+ __e; \
2222 })
2223
2224 #else /* CONFIG_MMU */
2225@@ -230,13 +266,17 @@ static inline void set_fs(mm_segment_t fs)
2226 #define __get_user(x,ptr) \
2227 ({ \
2228 long __gu_err = 0; \
2229+ pax_open_userland(); \
2230 __get_user_err((x),(ptr),__gu_err); \
2231+ pax_close_userland(); \
2232 __gu_err; \
2233 })
2234
2235 #define __get_user_error(x,ptr,err) \
2236 ({ \
2237+ pax_open_userland(); \
2238 __get_user_err((x),(ptr),err); \
2239+ pax_close_userland(); \
2240 (void) 0; \
2241 })
2242
2243@@ -312,13 +352,17 @@ do { \
2244 #define __put_user(x,ptr) \
2245 ({ \
2246 long __pu_err = 0; \
2247+ pax_open_userland(); \
2248 __put_user_err((x),(ptr),__pu_err); \
2249+ pax_close_userland(); \
2250 __pu_err; \
2251 })
2252
2253 #define __put_user_error(x,ptr,err) \
2254 ({ \
2255+ pax_open_userland(); \
2256 __put_user_err((x),(ptr),err); \
2257+ pax_close_userland(); \
2258 (void) 0; \
2259 })
2260
2261@@ -418,11 +462,44 @@ do { \
2262
2263
2264 #ifdef CONFIG_MMU
2265-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2266-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2267+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2268+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2269+
2270+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2271+{
2272+ unsigned long ret;
2273+
2274+ check_object_size(to, n, false);
2275+ pax_open_userland();
2276+ ret = ___copy_from_user(to, from, n);
2277+ pax_close_userland();
2278+ return ret;
2279+}
2280+
2281+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2282+{
2283+ unsigned long ret;
2284+
2285+ check_object_size(from, n, true);
2286+ pax_open_userland();
2287+ ret = ___copy_to_user(to, from, n);
2288+ pax_close_userland();
2289+ return ret;
2290+}
2291+
2292 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2293-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2294+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2295 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2296+
2297+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2298+{
2299+ unsigned long ret;
2300+ pax_open_userland();
2301+ ret = ___clear_user(addr, n);
2302+ pax_close_userland();
2303+ return ret;
2304+}
2305+
2306 #else
2307 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2308 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2309@@ -431,6 +508,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2310
2311 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2312 {
2313+ if ((long)n < 0)
2314+ return n;
2315+
2316 if (access_ok(VERIFY_READ, from, n))
2317 n = __copy_from_user(to, from, n);
2318 else /* security hole - plug it */
2319@@ -440,6 +520,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2320
2321 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2322 {
2323+ if ((long)n < 0)
2324+ return n;
2325+
2326 if (access_ok(VERIFY_WRITE, to, n))
2327 n = __copy_to_user(to, from, n);
2328 return n;
2329diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2330index 96ee092..37f1844 100644
2331--- a/arch/arm/include/uapi/asm/ptrace.h
2332+++ b/arch/arm/include/uapi/asm/ptrace.h
2333@@ -73,7 +73,7 @@
2334 * ARMv7 groups of PSR bits
2335 */
2336 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2337-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2338+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2339 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2340 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2341
2342diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2343index 60d3b73..e5a0f22 100644
2344--- a/arch/arm/kernel/armksyms.c
2345+++ b/arch/arm/kernel/armksyms.c
2346@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2347
2348 /* networking */
2349 EXPORT_SYMBOL(csum_partial);
2350-EXPORT_SYMBOL(csum_partial_copy_from_user);
2351+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2352 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2353 EXPORT_SYMBOL(__csum_ipv6_magic);
2354
2355@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2356 #ifdef CONFIG_MMU
2357 EXPORT_SYMBOL(copy_page);
2358
2359-EXPORT_SYMBOL(__copy_from_user);
2360-EXPORT_SYMBOL(__copy_to_user);
2361-EXPORT_SYMBOL(__clear_user);
2362+EXPORT_SYMBOL(___copy_from_user);
2363+EXPORT_SYMBOL(___copy_to_user);
2364+EXPORT_SYMBOL(___clear_user);
2365
2366 EXPORT_SYMBOL(__get_user_1);
2367 EXPORT_SYMBOL(__get_user_2);
2368diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2369index d43c7e5..257c050 100644
2370--- a/arch/arm/kernel/entry-armv.S
2371+++ b/arch/arm/kernel/entry-armv.S
2372@@ -47,6 +47,87 @@
2373 9997:
2374 .endm
2375
2376+ .macro pax_enter_kernel
2377+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2378+ @ make aligned space for saved DACR
2379+ sub sp, sp, #8
2380+ @ save regs
2381+ stmdb sp!, {r1, r2}
2382+ @ read DACR from cpu_domain into r1
2383+ mov r2, sp
2384+ @ assume 8K pages, since we have to split the immediate in two
2385+ bic r2, r2, #(0x1fc0)
2386+ bic r2, r2, #(0x3f)
2387+ ldr r1, [r2, #TI_CPU_DOMAIN]
2388+ @ store old DACR on stack
2389+ str r1, [sp, #8]
2390+#ifdef CONFIG_PAX_KERNEXEC
2391+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2392+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2393+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2394+#endif
2395+#ifdef CONFIG_PAX_MEMORY_UDEREF
2396+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2397+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2398+#endif
2399+ @ write r1 to current_thread_info()->cpu_domain
2400+ str r1, [r2, #TI_CPU_DOMAIN]
2401+ @ write r1 to DACR
2402+ mcr p15, 0, r1, c3, c0, 0
2403+ @ instruction sync
2404+ instr_sync
2405+ @ restore regs
2406+ ldmia sp!, {r1, r2}
2407+#endif
2408+ .endm
2409+
2410+ .macro pax_open_userland
2411+#ifdef CONFIG_PAX_MEMORY_UDEREF
2412+ @ save regs
2413+ stmdb sp!, {r0, r1}
2414+ @ read DACR from cpu_domain into r1
2415+ mov r0, sp
2416+ @ assume 8K pages, since we have to split the immediate in two
2417+ bic r0, r0, #(0x1fc0)
2418+ bic r0, r0, #(0x3f)
2419+ ldr r1, [r0, #TI_CPU_DOMAIN]
2420+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2421+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2422+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2423+ @ write r1 to current_thread_info()->cpu_domain
2424+ str r1, [r0, #TI_CPU_DOMAIN]
2425+ @ write r1 to DACR
2426+ mcr p15, 0, r1, c3, c0, 0
2427+ @ instruction sync
2428+ instr_sync
2429+ @ restore regs
2430+ ldmia sp!, {r0, r1}
2431+#endif
2432+ .endm
2433+
2434+ .macro pax_close_userland
2435+#ifdef CONFIG_PAX_MEMORY_UDEREF
2436+ @ save regs
2437+ stmdb sp!, {r0, r1}
2438+ @ read DACR from cpu_domain into r1
2439+ mov r0, sp
2440+ @ assume 8K pages, since we have to split the immediate in two
2441+ bic r0, r0, #(0x1fc0)
2442+ bic r0, r0, #(0x3f)
2443+ ldr r1, [r0, #TI_CPU_DOMAIN]
2444+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2445+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2446+ @ write r1 to current_thread_info()->cpu_domain
2447+ str r1, [r0, #TI_CPU_DOMAIN]
2448+ @ write r1 to DACR
2449+ mcr p15, 0, r1, c3, c0, 0
2450+ @ instruction sync
2451+ instr_sync
2452+ @ restore regs
2453+ ldmia sp!, {r0, r1}
2454+#endif
2455+ .endm
2456+
2457 .macro pabt_helper
2458 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2459 #ifdef MULTI_PABORT
2460@@ -89,11 +170,15 @@
2461 * Invalid mode handlers
2462 */
2463 .macro inv_entry, reason
2464+
2465+ pax_enter_kernel
2466+
2467 sub sp, sp, #S_FRAME_SIZE
2468 ARM( stmib sp, {r1 - lr} )
2469 THUMB( stmia sp, {r0 - r12} )
2470 THUMB( str sp, [sp, #S_SP] )
2471 THUMB( str lr, [sp, #S_LR] )
2472+
2473 mov r1, #\reason
2474 .endm
2475
2476@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2477 .macro svc_entry, stack_hole=0
2478 UNWIND(.fnstart )
2479 UNWIND(.save {r0 - pc} )
2480+
2481+ pax_enter_kernel
2482+
2483 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2484+
2485 #ifdef CONFIG_THUMB2_KERNEL
2486 SPFIX( str r0, [sp] ) @ temporarily saved
2487 SPFIX( mov r0, sp )
2488@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2489 ldmia r0, {r3 - r5}
2490 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2491 mov r6, #-1 @ "" "" "" ""
2492+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2493+ @ offset sp by 8 as done in pax_enter_kernel
2494+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2495+#else
2496 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2497+#endif
2498 SPFIX( addeq r2, r2, #4 )
2499 str r3, [sp, #-4]! @ save the "real" r0 copied
2500 @ from the exception stack
2501@@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
2502 .macro usr_entry
2503 UNWIND(.fnstart )
2504 UNWIND(.cantunwind ) @ don't unwind the user space
2505+
2506+ pax_enter_kernel_user
2507+
2508 sub sp, sp, #S_FRAME_SIZE
2509 ARM( stmib sp, {r1 - r12} )
2510 THUMB( stmia sp, {r0 - r12} )
2511@@ -357,7 +454,8 @@ ENDPROC(__pabt_svc)
2512 .endm
2513
2514 .macro kuser_cmpxchg_check
2515-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2516+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
2517+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2518 #ifndef CONFIG_MMU
2519 #warning "NPTL on non MMU needs fixing"
2520 #else
2521@@ -414,7 +512,9 @@ __und_usr:
2522 tst r3, #PSR_T_BIT @ Thumb mode?
2523 bne __und_usr_thumb
2524 sub r4, r2, #4 @ ARM instr at LR - 4
2525+ pax_open_userland
2526 1: ldrt r0, [r4]
2527+ pax_close_userland
2528 #ifdef CONFIG_CPU_ENDIAN_BE8
2529 rev r0, r0 @ little endian instruction
2530 #endif
2531@@ -449,10 +549,14 @@ __und_usr_thumb:
2532 */
2533 .arch armv6t2
2534 #endif
2535+ pax_open_userland
2536 2: ldrht r5, [r4]
2537+ pax_close_userland
2538 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2539 blo __und_usr_fault_16 @ 16bit undefined instruction
2540+ pax_open_userland
2541 3: ldrht r0, [r2]
2542+ pax_close_userland
2543 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2544 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2545 orr r0, r0, r5, lsl #16
2546@@ -481,7 +585,8 @@ ENDPROC(__und_usr)
2547 */
2548 .pushsection .fixup, "ax"
2549 .align 2
2550-4: mov pc, r9
2551+4: pax_close_userland
2552+ mov pc, r9
2553 .popsection
2554 .pushsection __ex_table,"a"
2555 .long 1b, 4b
2556@@ -690,7 +795,7 @@ ENTRY(__switch_to)
2557 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2558 THUMB( str sp, [ip], #4 )
2559 THUMB( str lr, [ip], #4 )
2560-#ifdef CONFIG_CPU_USE_DOMAINS
2561+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2562 ldr r6, [r2, #TI_CPU_DOMAIN]
2563 #endif
2564 set_tls r3, r4, r5
2565@@ -699,7 +804,7 @@ ENTRY(__switch_to)
2566 ldr r8, =__stack_chk_guard
2567 ldr r7, [r7, #TSK_STACK_CANARY]
2568 #endif
2569-#ifdef CONFIG_CPU_USE_DOMAINS
2570+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2571 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2572 #endif
2573 mov r5, r0
2574diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2575index bc5bc0a..d0998ca 100644
2576--- a/arch/arm/kernel/entry-common.S
2577+++ b/arch/arm/kernel/entry-common.S
2578@@ -10,18 +10,46 @@
2579
2580 #include <asm/unistd.h>
2581 #include <asm/ftrace.h>
2582+#include <asm/domain.h>
2583 #include <asm/unwind.h>
2584
2585+#include "entry-header.S"
2586+
2587 #ifdef CONFIG_NEED_RET_TO_USER
2588 #include <mach/entry-macro.S>
2589 #else
2590 .macro arch_ret_to_user, tmp1, tmp2
2591+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2592+ @ save regs
2593+ stmdb sp!, {r1, r2}
2594+ @ read DACR from cpu_domain into r1
2595+ mov r2, sp
2596+ @ assume 8K pages, since we have to split the immediate in two
2597+ bic r2, r2, #(0x1fc0)
2598+ bic r2, r2, #(0x3f)
2599+ ldr r1, [r2, #TI_CPU_DOMAIN]
2600+#ifdef CONFIG_PAX_KERNEXEC
2601+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2602+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2603+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2604+#endif
2605+#ifdef CONFIG_PAX_MEMORY_UDEREF
2606+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2607+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2608+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2609+#endif
2610+ @ write r1 to current_thread_info()->cpu_domain
2611+ str r1, [r2, #TI_CPU_DOMAIN]
2612+ @ write r1 to DACR
2613+ mcr p15, 0, r1, c3, c0, 0
2614+ @ instruction sync
2615+ instr_sync
2616+ @ restore regs
2617+ ldmia sp!, {r1, r2}
2618+#endif
2619 .endm
2620 #endif
2621
2622-#include "entry-header.S"
2623-
2624-
2625 .align 5
2626 /*
2627 * This is the fast syscall return path. We do as little as
2628@@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
2629
2630 .align 5
2631 ENTRY(vector_swi)
2632+
2633 sub sp, sp, #S_FRAME_SIZE
2634 stmia sp, {r0 - r12} @ Calling r0 - r12
2635 ARM( add r8, sp, #S_PC )
2636@@ -399,6 +428,12 @@ ENTRY(vector_swi)
2637 ldr scno, [lr, #-4] @ get SWI instruction
2638 #endif
2639
2640+ /*
2641+ * do this here to avoid a performance hit of wrapping the code above
2642+ * that directly dereferences userland to parse the SWI instruction
2643+ */
2644+ pax_enter_kernel_user
2645+
2646 #ifdef CONFIG_ALIGNMENT_TRAP
2647 ldr ip, __cr_alignment
2648 ldr ip, [ip]
2649diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2650index 160f337..db67ee4 100644
2651--- a/arch/arm/kernel/entry-header.S
2652+++ b/arch/arm/kernel/entry-header.S
2653@@ -73,6 +73,60 @@
2654 msr cpsr_c, \rtemp @ switch back to the SVC mode
2655 .endm
2656
2657+ .macro pax_enter_kernel_user
2658+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2659+ @ save regs
2660+ stmdb sp!, {r0, r1}
2661+ @ read DACR from cpu_domain into r1
2662+ mov r0, sp
2663+ @ assume 8K pages, since we have to split the immediate in two
2664+ bic r0, r0, #(0x1fc0)
2665+ bic r0, r0, #(0x3f)
2666+ ldr r1, [r0, #TI_CPU_DOMAIN]
2667+#ifdef CONFIG_PAX_MEMORY_UDEREF
2668+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2669+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2670+#endif
2671+#ifdef CONFIG_PAX_KERNEXEC
2672+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2673+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2674+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2675+#endif
2676+ @ write r1 to current_thread_info()->cpu_domain
2677+ str r1, [r0, #TI_CPU_DOMAIN]
2678+ @ write r1 to DACR
2679+ mcr p15, 0, r1, c3, c0, 0
2680+ @ instruction sync
2681+ instr_sync
2682+ @ restore regs
2683+ ldmia sp!, {r0, r1}
2684+#endif
2685+ .endm
2686+
2687+ .macro pax_exit_kernel
2688+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2689+ @ save regs
2690+ stmdb sp!, {r0, r1}
2691+ @ read old DACR from stack into r1
2692+ ldr r1, [sp, #(8 + S_SP)]
2693+ sub r1, r1, #8
2694+ ldr r1, [r1]
2695+
2696+ @ write r1 to current_thread_info()->cpu_domain
2697+ mov r0, sp
2698+ @ assume 8K pages, since we have to split the immediate in two
2699+ bic r0, r0, #(0x1fc0)
2700+ bic r0, r0, #(0x3f)
2701+ str r1, [r0, #TI_CPU_DOMAIN]
2702+ @ write r1 to DACR
2703+ mcr p15, 0, r1, c3, c0, 0
2704+ @ instruction sync
2705+ instr_sync
2706+ @ restore regs
2707+ ldmia sp!, {r0, r1}
2708+#endif
2709+ .endm
2710+
2711 #ifndef CONFIG_THUMB2_KERNEL
2712 .macro svc_exit, rpsr, irq = 0
2713 .if \irq != 0
2714@@ -92,6 +146,9 @@
2715 blne trace_hardirqs_off
2716 #endif
2717 .endif
2718+
2719+ pax_exit_kernel
2720+
2721 msr spsr_cxsf, \rpsr
2722 #if defined(CONFIG_CPU_V6)
2723 ldr r0, [sp]
2724@@ -155,6 +212,9 @@
2725 blne trace_hardirqs_off
2726 #endif
2727 .endif
2728+
2729+ pax_exit_kernel
2730+
2731 ldr lr, [sp, #S_SP] @ top of the stack
2732 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2733 clrex @ clear the exclusive monitor
2734diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2735index 25442f4..d4948fc 100644
2736--- a/arch/arm/kernel/fiq.c
2737+++ b/arch/arm/kernel/fiq.c
2738@@ -84,17 +84,16 @@ int show_fiq_list(struct seq_file *p, int prec)
2739
2740 void set_fiq_handler(void *start, unsigned int length)
2741 {
2742-#if defined(CONFIG_CPU_USE_DOMAINS)
2743- void *base = (void *)0xffff0000;
2744-#else
2745 void *base = vectors_page;
2746-#endif
2747 unsigned offset = FIQ_OFFSET;
2748
2749+ pax_open_kernel();
2750 memcpy(base + offset, start, length);
2751+ pax_close_kernel();
2752+
2753+ if (!cache_is_vipt_nonaliasing())
2754+ flush_icache_range(base + offset, offset + length);
2755 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
2756- if (!vectors_high())
2757- flush_icache_range(offset, offset + length);
2758 }
2759
2760 int claim_fiq(struct fiq_handler *f)
2761diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2762index 8bac553..caee108 100644
2763--- a/arch/arm/kernel/head.S
2764+++ b/arch/arm/kernel/head.S
2765@@ -52,7 +52,9 @@
2766 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2767
2768 .macro pgtbl, rd, phys
2769- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2770+ mov \rd, #TEXT_OFFSET
2771+ sub \rd, #PG_DIR_SIZE
2772+ add \rd, \rd, \phys
2773 .endm
2774
2775 /*
2776@@ -434,7 +436,7 @@ __enable_mmu:
2777 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2778 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2779 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2780- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2781+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2782 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2783 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2784 #endif
2785diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2786index 1fd749e..47adb08 100644
2787--- a/arch/arm/kernel/hw_breakpoint.c
2788+++ b/arch/arm/kernel/hw_breakpoint.c
2789@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2790 return NOTIFY_OK;
2791 }
2792
2793-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2794+static struct notifier_block dbg_reset_nb = {
2795 .notifier_call = dbg_reset_notify,
2796 };
2797
2798diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2799index 1e9be5d..03edbc2 100644
2800--- a/arch/arm/kernel/module.c
2801+++ b/arch/arm/kernel/module.c
2802@@ -37,12 +37,37 @@
2803 #endif
2804
2805 #ifdef CONFIG_MMU
2806-void *module_alloc(unsigned long size)
2807+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2808 {
2809+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2810+ return NULL;
2811 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2812- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2813+ GFP_KERNEL, prot, -1,
2814 __builtin_return_address(0));
2815 }
2816+
2817+void *module_alloc(unsigned long size)
2818+{
2819+
2820+#ifdef CONFIG_PAX_KERNEXEC
2821+ return __module_alloc(size, PAGE_KERNEL);
2822+#else
2823+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2824+#endif
2825+
2826+}
2827+
2828+#ifdef CONFIG_PAX_KERNEXEC
2829+void module_free_exec(struct module *mod, void *module_region)
2830+{
2831+ module_free(mod, module_region);
2832+}
2833+
2834+void *module_alloc_exec(unsigned long size)
2835+{
2836+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2837+}
2838+#endif
2839 #endif
2840
2841 int
2842diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2843index 07314af..c46655c 100644
2844--- a/arch/arm/kernel/patch.c
2845+++ b/arch/arm/kernel/patch.c
2846@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2847 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2848 int size;
2849
2850+ pax_open_kernel();
2851 if (thumb2 && __opcode_is_thumb16(insn)) {
2852 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2853 size = sizeof(u16);
2854@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2855 *(u32 *)addr = insn;
2856 size = sizeof(u32);
2857 }
2858+ pax_close_kernel();
2859
2860 flush_icache_range((uintptr_t)(addr),
2861 (uintptr_t)(addr) + size);
2862diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
2863index e19edc6..e186ee1 100644
2864--- a/arch/arm/kernel/perf_event.c
2865+++ b/arch/arm/kernel/perf_event.c
2866@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
2867 int mapping;
2868
2869 if (config >= PERF_COUNT_HW_MAX)
2870- return -ENOENT;
2871+ return -EINVAL;
2872
2873 mapping = (*event_map)[config];
2874 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
2875diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2876index 1f2740e..b36e225 100644
2877--- a/arch/arm/kernel/perf_event_cpu.c
2878+++ b/arch/arm/kernel/perf_event_cpu.c
2879@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2880 return NOTIFY_OK;
2881 }
2882
2883-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2884+static struct notifier_block cpu_pmu_hotplug_notifier = {
2885 .notifier_call = cpu_pmu_notify,
2886 };
2887
2888diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2889index 5bc2615..dcd439f 100644
2890--- a/arch/arm/kernel/process.c
2891+++ b/arch/arm/kernel/process.c
2892@@ -223,6 +223,7 @@ void machine_power_off(void)
2893
2894 if (pm_power_off)
2895 pm_power_off();
2896+ BUG();
2897 }
2898
2899 /*
2900@@ -236,7 +237,7 @@ void machine_power_off(void)
2901 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2902 * to use. Implementing such co-ordination would be essentially impossible.
2903 */
2904-void machine_restart(char *cmd)
2905+__noreturn void machine_restart(char *cmd)
2906 {
2907 smp_send_stop();
2908
2909@@ -258,8 +259,8 @@ void __show_regs(struct pt_regs *regs)
2910
2911 show_regs_print_info(KERN_DEFAULT);
2912
2913- print_symbol("PC is at %s\n", instruction_pointer(regs));
2914- print_symbol("LR is at %s\n", regs->ARM_lr);
2915+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2916+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2917 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2918 "sp : %08lx ip : %08lx fp : %08lx\n",
2919 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2920@@ -426,12 +427,6 @@ unsigned long get_wchan(struct task_struct *p)
2921 return 0;
2922 }
2923
2924-unsigned long arch_randomize_brk(struct mm_struct *mm)
2925-{
2926- unsigned long range_end = mm->brk + 0x02000000;
2927- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2928-}
2929-
2930 #ifdef CONFIG_MMU
2931 #ifdef CONFIG_KUSER_HELPERS
2932 /*
2933@@ -447,7 +442,7 @@ static struct vm_area_struct gate_vma = {
2934
2935 static int __init gate_vma_init(void)
2936 {
2937- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2938+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2939 return 0;
2940 }
2941 arch_initcall(gate_vma_init);
2942@@ -466,48 +461,23 @@ int in_gate_area_no_mm(unsigned long addr)
2943 {
2944 return in_gate_area(NULL, addr);
2945 }
2946-#define is_gate_vma(vma) ((vma) = &gate_vma)
2947+#define is_gate_vma(vma) ((vma) == &gate_vma)
2948 #else
2949 #define is_gate_vma(vma) 0
2950 #endif
2951
2952 const char *arch_vma_name(struct vm_area_struct *vma)
2953 {
2954- return is_gate_vma(vma) ? "[vectors]" :
2955- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2956- "[sigpage]" : NULL;
2957+ return is_gate_vma(vma) ? "[vectors]" : NULL;
2958 }
2959
2960-static struct page *signal_page;
2961-extern struct page *get_signal_page(void);
2962-
2963 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2964 {
2965 struct mm_struct *mm = current->mm;
2966- unsigned long addr;
2967- int ret;
2968-
2969- if (!signal_page)
2970- signal_page = get_signal_page();
2971- if (!signal_page)
2972- return -ENOMEM;
2973
2974 down_write(&mm->mmap_sem);
2975- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2976- if (IS_ERR_VALUE(addr)) {
2977- ret = addr;
2978- goto up_fail;
2979- }
2980-
2981- ret = install_special_mapping(mm, addr, PAGE_SIZE,
2982- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2983- &signal_page);
2984-
2985- if (ret == 0)
2986- mm->context.sigpage = addr;
2987-
2988- up_fail:
2989+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2990 up_write(&mm->mmap_sem);
2991- return ret;
2992+ return 0;
2993 }
2994 #endif
2995diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2996index 3653164..d83e55d 100644
2997--- a/arch/arm/kernel/psci.c
2998+++ b/arch/arm/kernel/psci.c
2999@@ -24,7 +24,7 @@
3000 #include <asm/opcodes-virt.h>
3001 #include <asm/psci.h>
3002
3003-struct psci_operations psci_ops;
3004+struct psci_operations psci_ops __read_only;
3005
3006 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3007
3008diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3009index 03deeff..741ce88 100644
3010--- a/arch/arm/kernel/ptrace.c
3011+++ b/arch/arm/kernel/ptrace.c
3012@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
3013 return current_thread_info()->syscall;
3014 }
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+extern void gr_delayed_cred_worker(void);
3018+#endif
3019+
3020 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3021 {
3022 current_thread_info()->syscall = scno;
3023
3024+#ifdef CONFIG_GRKERNSEC_SETXID
3025+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3026+ gr_delayed_cred_worker();
3027+#endif
3028+
3029 /* Do the secure computing check first; failures should be fast. */
3030 if (secure_computing(scno) == -1)
3031 return -1;
3032diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3033index b4b1d39..efdc9be 100644
3034--- a/arch/arm/kernel/setup.c
3035+++ b/arch/arm/kernel/setup.c
3036@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3037 unsigned int elf_hwcap __read_mostly;
3038 EXPORT_SYMBOL(elf_hwcap);
3039
3040+pteval_t __supported_pte_mask __read_only;
3041+pmdval_t __supported_pmd_mask __read_only;
3042
3043 #ifdef MULTI_CPU
3044-struct processor processor __read_mostly;
3045+struct processor processor;
3046 #endif
3047 #ifdef MULTI_TLB
3048-struct cpu_tlb_fns cpu_tlb __read_mostly;
3049+struct cpu_tlb_fns cpu_tlb __read_only;
3050 #endif
3051 #ifdef MULTI_USER
3052-struct cpu_user_fns cpu_user __read_mostly;
3053+struct cpu_user_fns cpu_user __read_only;
3054 #endif
3055 #ifdef MULTI_CACHE
3056-struct cpu_cache_fns cpu_cache __read_mostly;
3057+struct cpu_cache_fns cpu_cache __read_only;
3058 #endif
3059 #ifdef CONFIG_OUTER_CACHE
3060-struct outer_cache_fns outer_cache __read_mostly;
3061+struct outer_cache_fns outer_cache __read_only;
3062 EXPORT_SYMBOL(outer_cache);
3063 #endif
3064
3065@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3066 asm("mrc p15, 0, %0, c0, c1, 4"
3067 : "=r" (mmfr0));
3068 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3069- (mmfr0 & 0x000000f0) >= 0x00000030)
3070+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3071 cpu_arch = CPU_ARCH_ARMv7;
3072- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3073+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3074+ __supported_pte_mask |= L_PTE_PXN;
3075+ __supported_pmd_mask |= PMD_PXNTABLE;
3076+ }
3077+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3078 (mmfr0 & 0x000000f0) == 0x00000020)
3079 cpu_arch = CPU_ARCH_ARMv6;
3080 else
3081@@ -479,7 +485,7 @@ static void __init setup_processor(void)
3082 __cpu_architecture = __get_cpu_architecture();
3083
3084 #ifdef MULTI_CPU
3085- processor = *list->proc;
3086+ memcpy((void *)&processor, list->proc, sizeof processor);
3087 #endif
3088 #ifdef MULTI_TLB
3089 cpu_tlb = *list->tlb;
3090diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3091index 5a42c12..a2bb7c6 100644
3092--- a/arch/arm/kernel/signal.c
3093+++ b/arch/arm/kernel/signal.c
3094@@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
3095 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
3096 };
3097
3098-static unsigned long signal_return_offset;
3099-
3100 #ifdef CONFIG_CRUNCH
3101 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3102 {
3103@@ -406,8 +404,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3104 * except when the MPU has protected the vectors
3105 * page from PL0
3106 */
3107- retcode = mm->context.sigpage + signal_return_offset +
3108- (idx << 2) + thumb;
3109+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3110 } else
3111 #endif
3112 {
3113@@ -611,33 +608,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3114 } while (thread_flags & _TIF_WORK_MASK);
3115 return 0;
3116 }
3117-
3118-struct page *get_signal_page(void)
3119-{
3120- unsigned long ptr;
3121- unsigned offset;
3122- struct page *page;
3123- void *addr;
3124-
3125- page = alloc_pages(GFP_KERNEL, 0);
3126-
3127- if (!page)
3128- return NULL;
3129-
3130- addr = page_address(page);
3131-
3132- /* Give the signal return code some randomness */
3133- offset = 0x200 + (get_random_int() & 0x7fc);
3134- signal_return_offset = offset;
3135-
3136- /*
3137- * Copy signal return handlers into the vector page, and
3138- * set sigreturn to be a pointer to these.
3139- */
3140- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3141-
3142- ptr = (unsigned long)addr + offset;
3143- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3144-
3145- return page;
3146-}
3147diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3148index 5919eb4..b5d6dfe 100644
3149--- a/arch/arm/kernel/smp.c
3150+++ b/arch/arm/kernel/smp.c
3151@@ -70,7 +70,7 @@ enum ipi_msg_type {
3152
3153 static DECLARE_COMPLETION(cpu_running);
3154
3155-static struct smp_operations smp_ops;
3156+static struct smp_operations smp_ops __read_only;
3157
3158 void __init smp_set_ops(struct smp_operations *ops)
3159 {
3160diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3161index 6b9567e..b8af2d6 100644
3162--- a/arch/arm/kernel/traps.c
3163+++ b/arch/arm/kernel/traps.c
3164@@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3165 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3166 {
3167 #ifdef CONFIG_KALLSYMS
3168- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3169+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3170 #else
3171 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3172 #endif
3173@@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3174 static int die_owner = -1;
3175 static unsigned int die_nest_count;
3176
3177+extern void gr_handle_kernel_exploit(void);
3178+
3179 static unsigned long oops_begin(void)
3180 {
3181 int cpu;
3182@@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3183 panic("Fatal exception in interrupt");
3184 if (panic_on_oops)
3185 panic("Fatal exception");
3186+
3187+ gr_handle_kernel_exploit();
3188+
3189 if (signr)
3190 do_exit(signr);
3191 }
3192@@ -592,7 +597,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3193 * The user helper at 0xffff0fe0 must be used instead.
3194 * (see entry-armv.S for details)
3195 */
3196+ pax_open_kernel();
3197 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3198+ pax_close_kernel();
3199 }
3200 return 0;
3201
3202@@ -848,5 +855,9 @@ void __init early_trap_init(void *vectors_base)
3203 kuser_init(vectors_base);
3204
3205 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3206- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3207+
3208+#ifndef CONFIG_PAX_MEMORY_UDEREF
3209+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3210+#endif
3211+
3212 }
3213diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3214index 33f2ea3..0b91824 100644
3215--- a/arch/arm/kernel/vmlinux.lds.S
3216+++ b/arch/arm/kernel/vmlinux.lds.S
3217@@ -8,7 +8,11 @@
3218 #include <asm/thread_info.h>
3219 #include <asm/memory.h>
3220 #include <asm/page.h>
3221-
3222+
3223+#ifdef CONFIG_PAX_KERNEXEC
3224+#include <asm/pgtable.h>
3225+#endif
3226+
3227 #define PROC_INFO \
3228 . = ALIGN(4); \
3229 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3230@@ -94,6 +98,11 @@ SECTIONS
3231 _text = .;
3232 HEAD_TEXT
3233 }
3234+
3235+#ifdef CONFIG_PAX_KERNEXEC
3236+ . = ALIGN(1<<SECTION_SHIFT);
3237+#endif
3238+
3239 .text : { /* Real text segment */
3240 _stext = .; /* Text and read-only data */
3241 __exception_text_start = .;
3242@@ -116,6 +125,8 @@ SECTIONS
3243 ARM_CPU_KEEP(PROC_INFO)
3244 }
3245
3246+ _etext = .; /* End of text section */
3247+
3248 RO_DATA(PAGE_SIZE)
3249
3250 . = ALIGN(4);
3251@@ -146,7 +157,9 @@ SECTIONS
3252
3253 NOTES
3254
3255- _etext = .; /* End of text and rodata section */
3256+#ifdef CONFIG_PAX_KERNEXEC
3257+ . = ALIGN(1<<SECTION_SHIFT);
3258+#endif
3259
3260 #ifndef CONFIG_XIP_KERNEL
3261 . = ALIGN(PAGE_SIZE);
3262@@ -224,6 +237,11 @@ SECTIONS
3263 . = PAGE_OFFSET + TEXT_OFFSET;
3264 #else
3265 __init_end = .;
3266+
3267+#ifdef CONFIG_PAX_KERNEXEC
3268+ . = ALIGN(1<<SECTION_SHIFT);
3269+#endif
3270+
3271 . = ALIGN(THREAD_SIZE);
3272 __data_loc = .;
3273 #endif
3274diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3275index 14a0d98..7771a7d 100644
3276--- a/arch/arm/lib/clear_user.S
3277+++ b/arch/arm/lib/clear_user.S
3278@@ -12,14 +12,14 @@
3279
3280 .text
3281
3282-/* Prototype: int __clear_user(void *addr, size_t sz)
3283+/* Prototype: int ___clear_user(void *addr, size_t sz)
3284 * Purpose : clear some user memory
3285 * Params : addr - user memory address to clear
3286 * : sz - number of bytes to clear
3287 * Returns : number of bytes NOT cleared
3288 */
3289 ENTRY(__clear_user_std)
3290-WEAK(__clear_user)
3291+WEAK(___clear_user)
3292 stmfd sp!, {r1, lr}
3293 mov r2, #0
3294 cmp r1, #4
3295@@ -44,7 +44,7 @@ WEAK(__clear_user)
3296 USER( strnebt r2, [r0])
3297 mov r0, #0
3298 ldmfd sp!, {r1, pc}
3299-ENDPROC(__clear_user)
3300+ENDPROC(___clear_user)
3301 ENDPROC(__clear_user_std)
3302
3303 .pushsection .fixup,"ax"
3304diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3305index 66a477a..bee61d3 100644
3306--- a/arch/arm/lib/copy_from_user.S
3307+++ b/arch/arm/lib/copy_from_user.S
3308@@ -16,7 +16,7 @@
3309 /*
3310 * Prototype:
3311 *
3312- * size_t __copy_from_user(void *to, const void *from, size_t n)
3313+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3314 *
3315 * Purpose:
3316 *
3317@@ -84,11 +84,11 @@
3318
3319 .text
3320
3321-ENTRY(__copy_from_user)
3322+ENTRY(___copy_from_user)
3323
3324 #include "copy_template.S"
3325
3326-ENDPROC(__copy_from_user)
3327+ENDPROC(___copy_from_user)
3328
3329 .pushsection .fixup,"ax"
3330 .align 0
3331diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3332index 6ee2f67..d1cce76 100644
3333--- a/arch/arm/lib/copy_page.S
3334+++ b/arch/arm/lib/copy_page.S
3335@@ -10,6 +10,7 @@
3336 * ASM optimised string functions
3337 */
3338 #include <linux/linkage.h>
3339+#include <linux/const.h>
3340 #include <asm/assembler.h>
3341 #include <asm/asm-offsets.h>
3342 #include <asm/cache.h>
3343diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3344index d066df6..df28194 100644
3345--- a/arch/arm/lib/copy_to_user.S
3346+++ b/arch/arm/lib/copy_to_user.S
3347@@ -16,7 +16,7 @@
3348 /*
3349 * Prototype:
3350 *
3351- * size_t __copy_to_user(void *to, const void *from, size_t n)
3352+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3353 *
3354 * Purpose:
3355 *
3356@@ -88,11 +88,11 @@
3357 .text
3358
3359 ENTRY(__copy_to_user_std)
3360-WEAK(__copy_to_user)
3361+WEAK(___copy_to_user)
3362
3363 #include "copy_template.S"
3364
3365-ENDPROC(__copy_to_user)
3366+ENDPROC(___copy_to_user)
3367 ENDPROC(__copy_to_user_std)
3368
3369 .pushsection .fixup,"ax"
3370diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3371index 7d08b43..f7ca7ea 100644
3372--- a/arch/arm/lib/csumpartialcopyuser.S
3373+++ b/arch/arm/lib/csumpartialcopyuser.S
3374@@ -57,8 +57,8 @@
3375 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3376 */
3377
3378-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3379-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3380+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3381+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3382
3383 #include "csumpartialcopygeneric.S"
3384
3385diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3386index 64dbfa5..84a3fd9 100644
3387--- a/arch/arm/lib/delay.c
3388+++ b/arch/arm/lib/delay.c
3389@@ -28,7 +28,7 @@
3390 /*
3391 * Default to the loop-based delay implementation.
3392 */
3393-struct arm_delay_ops arm_delay_ops = {
3394+struct arm_delay_ops arm_delay_ops __read_only = {
3395 .delay = __loop_delay,
3396 .const_udelay = __loop_const_udelay,
3397 .udelay = __loop_udelay,
3398diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3399index 025f742..8432b08 100644
3400--- a/arch/arm/lib/uaccess_with_memcpy.c
3401+++ b/arch/arm/lib/uaccess_with_memcpy.c
3402@@ -104,7 +104,7 @@ out:
3403 }
3404
3405 unsigned long
3406-__copy_to_user(void __user *to, const void *from, unsigned long n)
3407+___copy_to_user(void __user *to, const void *from, unsigned long n)
3408 {
3409 /*
3410 * This test is stubbed out of the main function above to keep
3411diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3412index f389228..592ef66 100644
3413--- a/arch/arm/mach-kirkwood/common.c
3414+++ b/arch/arm/mach-kirkwood/common.c
3415@@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3416 clk_gate_ops.disable(hw);
3417 }
3418
3419-static struct clk_ops clk_gate_fn_ops;
3420+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3421+{
3422+ return clk_gate_ops.is_enabled(hw);
3423+}
3424+
3425+static struct clk_ops clk_gate_fn_ops = {
3426+ .enable = clk_gate_fn_enable,
3427+ .disable = clk_gate_fn_disable,
3428+ .is_enabled = clk_gate_fn_is_enabled,
3429+};
3430
3431 static struct clk __init *clk_register_gate_fn(struct device *dev,
3432 const char *name,
3433@@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3434 gate_fn->fn_en = fn_en;
3435 gate_fn->fn_dis = fn_dis;
3436
3437- /* ops is the gate ops, but with our enable/disable functions */
3438- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3439- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3440- clk_gate_fn_ops = clk_gate_ops;
3441- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3442- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3443- }
3444-
3445 clk = clk_register(dev, &gate_fn->gate.hw);
3446
3447 if (IS_ERR(clk))
3448diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3449index f6eeb87..cc90868 100644
3450--- a/arch/arm/mach-omap2/board-n8x0.c
3451+++ b/arch/arm/mach-omap2/board-n8x0.c
3452@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3453 }
3454 #endif
3455
3456-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3457+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3458 .late_init = n8x0_menelaus_late_init,
3459 };
3460
3461diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3462index 6c4da12..d9ca72d 100644
3463--- a/arch/arm/mach-omap2/gpmc.c
3464+++ b/arch/arm/mach-omap2/gpmc.c
3465@@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
3466 };
3467
3468 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3469-static struct irq_chip gpmc_irq_chip;
3470 static unsigned gpmc_irq_start;
3471
3472 static struct resource gpmc_mem_root;
3473@@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3474
3475 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3476
3477+static struct irq_chip gpmc_irq_chip = {
3478+ .name = "gpmc",
3479+ .irq_startup = gpmc_irq_noop_ret,
3480+ .irq_enable = gpmc_irq_enable,
3481+ .irq_disable = gpmc_irq_disable,
3482+ .irq_shutdown = gpmc_irq_noop,
3483+ .irq_ack = gpmc_irq_noop,
3484+ .irq_mask = gpmc_irq_noop,
3485+ .irq_unmask = gpmc_irq_noop,
3486+
3487+};
3488+
3489 static int gpmc_setup_irq(void)
3490 {
3491 int i;
3492@@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
3493 return gpmc_irq_start;
3494 }
3495
3496- gpmc_irq_chip.name = "gpmc";
3497- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3498- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3499- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3500- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3501- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3502- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3503- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3504-
3505 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3506 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3507
3508diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3509index f8bb3b9..831e7b8 100644
3510--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3511+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3512@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3513 return NOTIFY_OK;
3514 }
3515
3516-static struct notifier_block __refdata irq_hotplug_notifier = {
3517+static struct notifier_block irq_hotplug_notifier = {
3518 .notifier_call = irq_cpu_hotplug_notify,
3519 };
3520
3521diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3522index e6d2307..d057195 100644
3523--- a/arch/arm/mach-omap2/omap_device.c
3524+++ b/arch/arm/mach-omap2/omap_device.c
3525@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3526 struct platform_device __init *omap_device_build(const char *pdev_name,
3527 int pdev_id,
3528 struct omap_hwmod *oh,
3529- void *pdata, int pdata_len)
3530+ const void *pdata, int pdata_len)
3531 {
3532 struct omap_hwmod *ohs[] = { oh };
3533
3534@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3535 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3536 int pdev_id,
3537 struct omap_hwmod **ohs,
3538- int oh_cnt, void *pdata,
3539+ int oh_cnt, const void *pdata,
3540 int pdata_len)
3541 {
3542 int ret = -ENOMEM;
3543diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3544index 044c31d..2ee0861 100644
3545--- a/arch/arm/mach-omap2/omap_device.h
3546+++ b/arch/arm/mach-omap2/omap_device.h
3547@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3548 /* Core code interface */
3549
3550 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3551- struct omap_hwmod *oh, void *pdata,
3552+ struct omap_hwmod *oh, const void *pdata,
3553 int pdata_len);
3554
3555 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3556 struct omap_hwmod **oh, int oh_cnt,
3557- void *pdata, int pdata_len);
3558+ const void *pdata, int pdata_len);
3559
3560 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3561 struct omap_hwmod **ohs, int oh_cnt);
3562diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3563index 7341eff..fd75e34 100644
3564--- a/arch/arm/mach-omap2/omap_hwmod.c
3565+++ b/arch/arm/mach-omap2/omap_hwmod.c
3566@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3567 int (*init_clkdm)(struct omap_hwmod *oh);
3568 void (*update_context_lost)(struct omap_hwmod *oh);
3569 int (*get_context_lost)(struct omap_hwmod *oh);
3570-};
3571+} __no_const;
3572
3573 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3574-static struct omap_hwmod_soc_ops soc_ops;
3575+static struct omap_hwmod_soc_ops soc_ops __read_only;
3576
3577 /* omap_hwmod_list contains all registered struct omap_hwmods */
3578 static LIST_HEAD(omap_hwmod_list);
3579diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3580index d15c7bb..b2d1f0c 100644
3581--- a/arch/arm/mach-omap2/wd_timer.c
3582+++ b/arch/arm/mach-omap2/wd_timer.c
3583@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3584 struct omap_hwmod *oh;
3585 char *oh_name = "wd_timer2";
3586 char *dev_name = "omap_wdt";
3587- struct omap_wd_timer_platform_data pdata;
3588+ static struct omap_wd_timer_platform_data pdata = {
3589+ .read_reset_sources = prm_read_reset_sources
3590+ };
3591
3592 if (!cpu_class_is_omap2() || of_have_populated_dt())
3593 return 0;
3594@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3595 return -EINVAL;
3596 }
3597
3598- pdata.read_reset_sources = prm_read_reset_sources;
3599-
3600 pdev = omap_device_build(dev_name, id, oh, &pdata,
3601 sizeof(struct omap_wd_timer_platform_data));
3602 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3603diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3604index 0cdba8d..297993e 100644
3605--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3606+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3607@@ -181,7 +181,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3608 bool entered_lp2 = false;
3609
3610 if (tegra_pending_sgi())
3611- ACCESS_ONCE(abort_flag) = true;
3612+ ACCESS_ONCE_RW(abort_flag) = true;
3613
3614 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3615
3616diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3617index cad3ca86..1d79e0f 100644
3618--- a/arch/arm/mach-ux500/setup.h
3619+++ b/arch/arm/mach-ux500/setup.h
3620@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3621 .type = MT_DEVICE, \
3622 }
3623
3624-#define __MEM_DEV_DESC(x, sz) { \
3625- .virtual = IO_ADDRESS(x), \
3626- .pfn = __phys_to_pfn(x), \
3627- .length = sz, \
3628- .type = MT_MEMORY, \
3629-}
3630-
3631 extern struct smp_operations ux500_smp_ops;
3632 extern void ux500_cpu_die(unsigned int cpu);
3633
3634diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3635index 2950082..d0f0782 100644
3636--- a/arch/arm/mm/Kconfig
3637+++ b/arch/arm/mm/Kconfig
3638@@ -436,7 +436,7 @@ config CPU_32v5
3639
3640 config CPU_32v6
3641 bool
3642- select CPU_USE_DOMAINS if CPU_V6 && MMU
3643+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3644 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3645
3646 config CPU_32v6K
3647@@ -585,6 +585,7 @@ config CPU_CP15_MPU
3648
3649 config CPU_USE_DOMAINS
3650 bool
3651+ depends on !ARM_LPAE && !PAX_KERNEXEC
3652 help
3653 This option enables or disables the use of domain switching
3654 via the set_fs() function.
3655@@ -780,6 +781,7 @@ config NEED_KUSER_HELPERS
3656 config KUSER_HELPERS
3657 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3658 default y
3659+ depends on !(CPU_V6 || CPU_V6K || CPU_V7)
3660 help
3661 Warning: disabling this option may break user programs.
3662
3663@@ -790,7 +792,7 @@ config KUSER_HELPERS
3664 run on ARMv4 through to ARMv7 without modification.
3665
3666 However, the fixed address nature of these helpers can be used
3667- by ROP (return orientated programming) authors when creating
3668+ by ROP (Return Oriented Programming) authors when creating
3669 exploits.
3670
3671 If all of the binaries and libraries which run on your platform
3672diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3673index 6f4585b..7b6f52b 100644
3674--- a/arch/arm/mm/alignment.c
3675+++ b/arch/arm/mm/alignment.c
3676@@ -211,10 +211,12 @@ union offset_union {
3677 #define __get16_unaligned_check(ins,val,addr) \
3678 do { \
3679 unsigned int err = 0, v, a = addr; \
3680+ pax_open_userland(); \
3681 __get8_unaligned_check(ins,v,a,err); \
3682 val = v << ((BE) ? 8 : 0); \
3683 __get8_unaligned_check(ins,v,a,err); \
3684 val |= v << ((BE) ? 0 : 8); \
3685+ pax_close_userland(); \
3686 if (err) \
3687 goto fault; \
3688 } while (0)
3689@@ -228,6 +230,7 @@ union offset_union {
3690 #define __get32_unaligned_check(ins,val,addr) \
3691 do { \
3692 unsigned int err = 0, v, a = addr; \
3693+ pax_open_userland(); \
3694 __get8_unaligned_check(ins,v,a,err); \
3695 val = v << ((BE) ? 24 : 0); \
3696 __get8_unaligned_check(ins,v,a,err); \
3697@@ -236,6 +239,7 @@ union offset_union {
3698 val |= v << ((BE) ? 8 : 16); \
3699 __get8_unaligned_check(ins,v,a,err); \
3700 val |= v << ((BE) ? 0 : 24); \
3701+ pax_close_userland(); \
3702 if (err) \
3703 goto fault; \
3704 } while (0)
3705@@ -249,6 +253,7 @@ union offset_union {
3706 #define __put16_unaligned_check(ins,val,addr) \
3707 do { \
3708 unsigned int err = 0, v = val, a = addr; \
3709+ pax_open_userland(); \
3710 __asm__( FIRST_BYTE_16 \
3711 ARM( "1: "ins" %1, [%2], #1\n" ) \
3712 THUMB( "1: "ins" %1, [%2]\n" ) \
3713@@ -268,6 +273,7 @@ union offset_union {
3714 " .popsection\n" \
3715 : "=r" (err), "=&r" (v), "=&r" (a) \
3716 : "0" (err), "1" (v), "2" (a)); \
3717+ pax_close_userland(); \
3718 if (err) \
3719 goto fault; \
3720 } while (0)
3721@@ -281,6 +287,7 @@ union offset_union {
3722 #define __put32_unaligned_check(ins,val,addr) \
3723 do { \
3724 unsigned int err = 0, v = val, a = addr; \
3725+ pax_open_userland(); \
3726 __asm__( FIRST_BYTE_32 \
3727 ARM( "1: "ins" %1, [%2], #1\n" ) \
3728 THUMB( "1: "ins" %1, [%2]\n" ) \
3729@@ -310,6 +317,7 @@ union offset_union {
3730 " .popsection\n" \
3731 : "=r" (err), "=&r" (v), "=&r" (a) \
3732 : "0" (err), "1" (v), "2" (a)); \
3733+ pax_close_userland(); \
3734 if (err) \
3735 goto fault; \
3736 } while (0)
3737diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3738index 5dbf13f..a2d1876 100644
3739--- a/arch/arm/mm/fault.c
3740+++ b/arch/arm/mm/fault.c
3741@@ -25,6 +25,7 @@
3742 #include <asm/system_misc.h>
3743 #include <asm/system_info.h>
3744 #include <asm/tlbflush.h>
3745+#include <asm/sections.h>
3746
3747 #include "fault.h"
3748
3749@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3750 if (fixup_exception(regs))
3751 return;
3752
3753+#ifdef CONFIG_PAX_KERNEXEC
3754+ if ((fsr & FSR_WRITE) &&
3755+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3756+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3757+ {
3758+ if (current->signal->curr_ip)
3759+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3760+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3761+ else
3762+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3763+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3764+ }
3765+#endif
3766+
3767 /*
3768 * No handler, we'll have to terminate things with extreme prejudice.
3769 */
3770@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3771 }
3772 #endif
3773
3774+#ifdef CONFIG_PAX_PAGEEXEC
3775+ if (fsr & FSR_LNX_PF) {
3776+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3777+ do_group_exit(SIGKILL);
3778+ }
3779+#endif
3780+
3781 tsk->thread.address = addr;
3782 tsk->thread.error_code = fsr;
3783 tsk->thread.trap_no = 14;
3784@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3785 }
3786 #endif /* CONFIG_MMU */
3787
3788+#ifdef CONFIG_PAX_PAGEEXEC
3789+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3790+{
3791+ long i;
3792+
3793+ printk(KERN_ERR "PAX: bytes at PC: ");
3794+ for (i = 0; i < 20; i++) {
3795+ unsigned char c;
3796+ if (get_user(c, (__force unsigned char __user *)pc+i))
3797+ printk(KERN_CONT "?? ");
3798+ else
3799+ printk(KERN_CONT "%02x ", c);
3800+ }
3801+ printk("\n");
3802+
3803+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3804+ for (i = -1; i < 20; i++) {
3805+ unsigned long c;
3806+ if (get_user(c, (__force unsigned long __user *)sp+i))
3807+ printk(KERN_CONT "???????? ");
3808+ else
3809+ printk(KERN_CONT "%08lx ", c);
3810+ }
3811+ printk("\n");
3812+}
3813+#endif
3814+
3815 /*
3816 * First Level Translation Fault Handler
3817 *
3818@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3819 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3820 struct siginfo info;
3821
3822+#ifdef CONFIG_PAX_MEMORY_UDEREF
3823+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3824+ if (current->signal->curr_ip)
3825+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3826+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3827+ else
3828+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3829+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3830+ goto die;
3831+ }
3832+#endif
3833+
3834 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3835 return;
3836
3837+die:
3838 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3839 inf->name, fsr, addr);
3840
3841@@ -569,15 +631,68 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3842 ifsr_info[nr].name = name;
3843 }
3844
3845+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3846+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3847+
3848 asmlinkage void __exception
3849 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3850 {
3851 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3852 struct siginfo info;
3853+ unsigned long pc = instruction_pointer(regs);
3854+
3855+ if (user_mode(regs)) {
3856+ unsigned long sigpage = current->mm->context.sigpage;
3857+
3858+ if (sigpage <= pc && pc < sigpage + 7*4) {
3859+ if (pc < sigpage + 3*4)
3860+ sys_sigreturn(regs);
3861+ else
3862+ sys_rt_sigreturn(regs);
3863+ return;
3864+ }
3865+ if (pc == 0xffff0fe0UL) {
3866+ /*
3867+ * PaX: __kuser_get_tls emulation
3868+ */
3869+ regs->ARM_r0 = current_thread_info()->tp_value;
3870+ regs->ARM_pc = regs->ARM_lr;
3871+ return;
3872+ }
3873+ }
3874+
3875+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3876+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3877+ if (current->signal->curr_ip)
3878+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3879+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3880+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
3881+ else
3882+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3883+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3884+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
3885+ goto die;
3886+ }
3887+#endif
3888+
3889+#ifdef CONFIG_PAX_REFCOUNT
3890+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3891+ unsigned int bkpt;
3892+
3893+ if (!probe_kernel_address((unsigned int *)pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
3894+ current->thread.error_code = ifsr;
3895+ current->thread.trap_no = 0;
3896+ pax_report_refcount_overflow(regs);
3897+ fixup_exception(regs);
3898+ return;
3899+ }
3900+ }
3901+#endif
3902
3903 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3904 return;
3905
3906+die:
3907 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3908 inf->name, ifsr, addr);
3909
3910diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3911index cf08bdf..772656c 100644
3912--- a/arch/arm/mm/fault.h
3913+++ b/arch/arm/mm/fault.h
3914@@ -3,6 +3,7 @@
3915
3916 /*
3917 * Fault status register encodings. We steal bit 31 for our own purposes.
3918+ * Set when the FSR value is from an instruction fault.
3919 */
3920 #define FSR_LNX_PF (1 << 31)
3921 #define FSR_WRITE (1 << 11)
3922@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3923 }
3924 #endif
3925
3926+/* valid for LPAE and !LPAE */
3927+static inline int is_xn_fault(unsigned int fsr)
3928+{
3929+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3930+}
3931+
3932+static inline int is_domain_fault(unsigned int fsr)
3933+{
3934+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3935+}
3936+
3937 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3938 unsigned long search_exception_table(unsigned long addr);
3939
3940diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3941index 0ecc43f..190b956 100644
3942--- a/arch/arm/mm/init.c
3943+++ b/arch/arm/mm/init.c
3944@@ -30,6 +30,8 @@
3945 #include <asm/setup.h>
3946 #include <asm/tlb.h>
3947 #include <asm/fixmap.h>
3948+#include <asm/system_info.h>
3949+#include <asm/cp15.h>
3950
3951 #include <asm/mach/arch.h>
3952 #include <asm/mach/map.h>
3953@@ -726,7 +728,46 @@ void free_initmem(void)
3954 {
3955 #ifdef CONFIG_HAVE_TCM
3956 extern char __tcm_start, __tcm_end;
3957+#endif
3958
3959+#ifdef CONFIG_PAX_KERNEXEC
3960+ unsigned long addr;
3961+ pgd_t *pgd;
3962+ pud_t *pud;
3963+ pmd_t *pmd;
3964+ int cpu_arch = cpu_architecture();
3965+ unsigned int cr = get_cr();
3966+
3967+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3968+ /* make pages tables, etc before .text NX */
3969+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3970+ pgd = pgd_offset_k(addr);
3971+ pud = pud_offset(pgd, addr);
3972+ pmd = pmd_offset(pud, addr);
3973+ __section_update(pmd, addr, PMD_SECT_XN);
3974+ }
3975+ /* make init NX */
3976+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3977+ pgd = pgd_offset_k(addr);
3978+ pud = pud_offset(pgd, addr);
3979+ pmd = pmd_offset(pud, addr);
3980+ __section_update(pmd, addr, PMD_SECT_XN);
3981+ }
3982+ /* make kernel code/rodata RX */
3983+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3984+ pgd = pgd_offset_k(addr);
3985+ pud = pud_offset(pgd, addr);
3986+ pmd = pmd_offset(pud, addr);
3987+#ifdef CONFIG_ARM_LPAE
3988+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3989+#else
3990+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3991+#endif
3992+ }
3993+ }
3994+#endif
3995+
3996+#ifdef CONFIG_HAVE_TCM
3997 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3998 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
3999 #endif
4000diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4001index 04d9006..c547d85 100644
4002--- a/arch/arm/mm/ioremap.c
4003+++ b/arch/arm/mm/ioremap.c
4004@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4005 unsigned int mtype;
4006
4007 if (cached)
4008- mtype = MT_MEMORY;
4009+ mtype = MT_MEMORY_RX;
4010 else
4011- mtype = MT_MEMORY_NONCACHED;
4012+ mtype = MT_MEMORY_NONCACHED_RX;
4013
4014 return __arm_ioremap_caller(phys_addr, size, mtype,
4015 __builtin_return_address(0));
4016diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4017index 10062ce..8695745 100644
4018--- a/arch/arm/mm/mmap.c
4019+++ b/arch/arm/mm/mmap.c
4020@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4021 struct vm_area_struct *vma;
4022 int do_align = 0;
4023 int aliasing = cache_is_vipt_aliasing();
4024+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4025 struct vm_unmapped_area_info info;
4026
4027 /*
4028@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4029 if (len > TASK_SIZE)
4030 return -ENOMEM;
4031
4032+#ifdef CONFIG_PAX_RANDMMAP
4033+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4034+#endif
4035+
4036 if (addr) {
4037 if (do_align)
4038 addr = COLOUR_ALIGN(addr, pgoff);
4039@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4040 addr = PAGE_ALIGN(addr);
4041
4042 vma = find_vma(mm, addr);
4043- if (TASK_SIZE - len >= addr &&
4044- (!vma || addr + len <= vma->vm_start))
4045+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4046 return addr;
4047 }
4048
4049@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4050 info.high_limit = TASK_SIZE;
4051 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4052 info.align_offset = pgoff << PAGE_SHIFT;
4053+ info.threadstack_offset = offset;
4054 return vm_unmapped_area(&info);
4055 }
4056
4057@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4058 unsigned long addr = addr0;
4059 int do_align = 0;
4060 int aliasing = cache_is_vipt_aliasing();
4061+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4062 struct vm_unmapped_area_info info;
4063
4064 /*
4065@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4066 return addr;
4067 }
4068
4069+#ifdef CONFIG_PAX_RANDMMAP
4070+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4071+#endif
4072+
4073 /* requesting a specific address */
4074 if (addr) {
4075 if (do_align)
4076@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4077 else
4078 addr = PAGE_ALIGN(addr);
4079 vma = find_vma(mm, addr);
4080- if (TASK_SIZE - len >= addr &&
4081- (!vma || addr + len <= vma->vm_start))
4082+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4083 return addr;
4084 }
4085
4086@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4087 info.high_limit = mm->mmap_base;
4088 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4089 info.align_offset = pgoff << PAGE_SHIFT;
4090+ info.threadstack_offset = offset;
4091 addr = vm_unmapped_area(&info);
4092
4093 /*
4094@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4095 {
4096 unsigned long random_factor = 0UL;
4097
4098+#ifdef CONFIG_PAX_RANDMMAP
4099+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4100+#endif
4101+
4102 /* 8 bits of randomness in 20 address space bits */
4103 if ((current->flags & PF_RANDOMIZE) &&
4104 !(current->personality & ADDR_NO_RANDOMIZE))
4105@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4106
4107 if (mmap_is_legacy()) {
4108 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4109+
4110+#ifdef CONFIG_PAX_RANDMMAP
4111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4112+ mm->mmap_base += mm->delta_mmap;
4113+#endif
4114+
4115 mm->get_unmapped_area = arch_get_unmapped_area;
4116 mm->unmap_area = arch_unmap_area;
4117 } else {
4118 mm->mmap_base = mmap_base(random_factor);
4119+
4120+#ifdef CONFIG_PAX_RANDMMAP
4121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4122+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4123+#endif
4124+
4125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4126 mm->unmap_area = arch_unmap_area_topdown;
4127 }
4128diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4129index daf336f..4e6392c 100644
4130--- a/arch/arm/mm/mmu.c
4131+++ b/arch/arm/mm/mmu.c
4132@@ -36,6 +36,22 @@
4133 #include "mm.h"
4134 #include "tcm.h"
4135
4136+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4137+void modify_domain(unsigned int dom, unsigned int type)
4138+{
4139+ struct thread_info *thread = current_thread_info();
4140+ unsigned int domain = thread->cpu_domain;
4141+ /*
4142+ * DOMAIN_MANAGER might be defined to some other value,
4143+ * use the arch-defined constant
4144+ */
4145+ domain &= ~domain_val(dom, 3);
4146+ thread->cpu_domain = domain | domain_val(dom, type);
4147+ set_domain(thread->cpu_domain);
4148+}
4149+EXPORT_SYMBOL(modify_domain);
4150+#endif
4151+
4152 /*
4153 * empty_zero_page is a special page that is used for
4154 * zero-initialized data and COW.
4155@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4156
4157 #endif /* ifdef CONFIG_CPU_CP15 / else */
4158
4159-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4160+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4161 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4162
4163-static struct mem_type mem_types[] = {
4164+#ifdef CONFIG_PAX_KERNEXEC
4165+#define L_PTE_KERNEXEC L_PTE_RDONLY
4166+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4167+#else
4168+#define L_PTE_KERNEXEC L_PTE_DIRTY
4169+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4170+#endif
4171+
4172+static struct mem_type mem_types[] __read_only = {
4173 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4174 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4175 L_PTE_SHARED,
4176@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4177 [MT_UNCACHED] = {
4178 .prot_pte = PROT_PTE_DEVICE,
4179 .prot_l1 = PMD_TYPE_TABLE,
4180- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4181+ .prot_sect = PROT_SECT_DEVICE,
4182 .domain = DOMAIN_IO,
4183 },
4184 [MT_CACHECLEAN] = {
4185- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4186+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4187 .domain = DOMAIN_KERNEL,
4188 },
4189 #ifndef CONFIG_ARM_LPAE
4190 [MT_MINICLEAN] = {
4191- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4192+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4193 .domain = DOMAIN_KERNEL,
4194 },
4195 #endif
4196@@ -277,36 +301,54 @@ static struct mem_type mem_types[] = {
4197 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4198 L_PTE_RDONLY,
4199 .prot_l1 = PMD_TYPE_TABLE,
4200- .domain = DOMAIN_USER,
4201+ .domain = DOMAIN_VECTORS,
4202 },
4203 [MT_HIGH_VECTORS] = {
4204 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4205 L_PTE_USER | L_PTE_RDONLY,
4206 .prot_l1 = PMD_TYPE_TABLE,
4207- .domain = DOMAIN_USER,
4208+ .domain = DOMAIN_VECTORS,
4209 },
4210- [MT_MEMORY] = {
4211+ [MT_MEMORY_RWX] = {
4212 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4213 .prot_l1 = PMD_TYPE_TABLE,
4214 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4215 .domain = DOMAIN_KERNEL,
4216 },
4217+ [MT_MEMORY_RW] = {
4218+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4219+ .prot_l1 = PMD_TYPE_TABLE,
4220+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4221+ .domain = DOMAIN_KERNEL,
4222+ },
4223+ [MT_MEMORY_RX] = {
4224+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4225+ .prot_l1 = PMD_TYPE_TABLE,
4226+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4227+ .domain = DOMAIN_KERNEL,
4228+ },
4229 [MT_ROM] = {
4230- .prot_sect = PMD_TYPE_SECT,
4231+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4232 .domain = DOMAIN_KERNEL,
4233 },
4234- [MT_MEMORY_NONCACHED] = {
4235+ [MT_MEMORY_NONCACHED_RW] = {
4236 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4237 L_PTE_MT_BUFFERABLE,
4238 .prot_l1 = PMD_TYPE_TABLE,
4239 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4240 .domain = DOMAIN_KERNEL,
4241 },
4242+ [MT_MEMORY_NONCACHED_RX] = {
4243+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4244+ L_PTE_MT_BUFFERABLE,
4245+ .prot_l1 = PMD_TYPE_TABLE,
4246+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4247+ .domain = DOMAIN_KERNEL,
4248+ },
4249 [MT_MEMORY_DTCM] = {
4250- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4251- L_PTE_XN,
4252+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4253 .prot_l1 = PMD_TYPE_TABLE,
4254- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4255+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4256 .domain = DOMAIN_KERNEL,
4257 },
4258 [MT_MEMORY_ITCM] = {
4259@@ -316,10 +358,10 @@ static struct mem_type mem_types[] = {
4260 },
4261 [MT_MEMORY_SO] = {
4262 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4263- L_PTE_MT_UNCACHED | L_PTE_XN,
4264+ L_PTE_MT_UNCACHED,
4265 .prot_l1 = PMD_TYPE_TABLE,
4266 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4267- PMD_SECT_UNCACHED | PMD_SECT_XN,
4268+ PMD_SECT_UNCACHED,
4269 .domain = DOMAIN_KERNEL,
4270 },
4271 [MT_MEMORY_DMA_READY] = {
4272@@ -405,9 +447,35 @@ static void __init build_mem_type_table(void)
4273 * to prevent speculative instruction fetches.
4274 */
4275 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4276+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4277 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4278+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4279 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4280+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4281 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4282+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4283+
4284+ /* Mark other regions on ARMv6+ as execute-never */
4285+
4286+#ifdef CONFIG_PAX_KERNEXEC
4287+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4288+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4289+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4290+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4291+#ifndef CONFIG_ARM_LPAE
4292+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4293+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4294+#endif
4295+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4296+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4297+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4298+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4299+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4300+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4301+#endif
4302+
4303+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4304+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4305 }
4306 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4307 /*
4308@@ -468,6 +536,9 @@ static void __init build_mem_type_table(void)
4309 * from SVC mode and no access from userspace.
4310 */
4311 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4312+#ifdef CONFIG_PAX_KERNEXEC
4313+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4314+#endif
4315 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4316 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4317 #endif
4318@@ -485,11 +556,17 @@ static void __init build_mem_type_table(void)
4319 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4320 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4321 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4322- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4323- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4324+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4325+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4326+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4327+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4328+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4329+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4330 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4331- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4332- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4333+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4334+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4335+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4336+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4337 }
4338 }
4339
4340@@ -500,15 +577,20 @@ static void __init build_mem_type_table(void)
4341 if (cpu_arch >= CPU_ARCH_ARMv6) {
4342 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4343 /* Non-cacheable Normal is XCB = 001 */
4344- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4345+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4346+ PMD_SECT_BUFFERED;
4347+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4348 PMD_SECT_BUFFERED;
4349 } else {
4350 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4351- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4352+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4353+ PMD_SECT_TEX(1);
4354+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4355 PMD_SECT_TEX(1);
4356 }
4357 } else {
4358- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4359+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4360+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4361 }
4362
4363 #ifdef CONFIG_ARM_LPAE
4364@@ -524,6 +606,8 @@ static void __init build_mem_type_table(void)
4365 vecs_pgprot |= PTE_EXT_AF;
4366 #endif
4367
4368+ user_pgprot |= __supported_pte_mask;
4369+
4370 for (i = 0; i < 16; i++) {
4371 pteval_t v = pgprot_val(protection_map[i]);
4372 protection_map[i] = __pgprot(v | user_pgprot);
4373@@ -541,10 +625,15 @@ static void __init build_mem_type_table(void)
4374
4375 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4376 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4377- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4378- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4379+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4380+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4381+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4382+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4383+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4384+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4385 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4386- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4387+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4388+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4389 mem_types[MT_ROM].prot_sect |= cp->pmd;
4390
4391 switch (cp->pmd) {
4392@@ -1166,18 +1255,15 @@ void __init arm_mm_memblock_reserve(void)
4393 * called function. This means you can't use any function or debugging
4394 * method which may touch any device, otherwise the kernel _will_ crash.
4395 */
4396+
4397+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4398+
4399 static void __init devicemaps_init(struct machine_desc *mdesc)
4400 {
4401 struct map_desc map;
4402 unsigned long addr;
4403- void *vectors;
4404
4405- /*
4406- * Allocate the vector page early.
4407- */
4408- vectors = early_alloc(PAGE_SIZE * 2);
4409-
4410- early_trap_init(vectors);
4411+ early_trap_init(&vectors);
4412
4413 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4414 pmd_clear(pmd_off_k(addr));
4415@@ -1217,7 +1303,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4416 * location (0xffff0000). If we aren't using high-vectors, also
4417 * create a mapping at the low-vectors virtual address.
4418 */
4419- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4420+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4421 map.virtual = 0xffff0000;
4422 map.length = PAGE_SIZE;
4423 #ifdef CONFIG_KUSER_HELPERS
4424@@ -1287,8 +1373,39 @@ static void __init map_lowmem(void)
4425 map.pfn = __phys_to_pfn(start);
4426 map.virtual = __phys_to_virt(start);
4427 map.length = end - start;
4428- map.type = MT_MEMORY;
4429
4430+#ifdef CONFIG_PAX_KERNEXEC
4431+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4432+ struct map_desc kernel;
4433+ struct map_desc initmap;
4434+
4435+ /* when freeing initmem we will make this RW */
4436+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4437+ initmap.virtual = (unsigned long)__init_begin;
4438+ initmap.length = _sdata - __init_begin;
4439+ initmap.type = MT_MEMORY_RWX;
4440+ create_mapping(&initmap);
4441+
4442+ /* when freeing initmem we will make this RX */
4443+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4444+ kernel.virtual = (unsigned long)_stext;
4445+ kernel.length = __init_begin - _stext;
4446+ kernel.type = MT_MEMORY_RWX;
4447+ create_mapping(&kernel);
4448+
4449+ if (map.virtual < (unsigned long)_stext) {
4450+ map.length = (unsigned long)_stext - map.virtual;
4451+ map.type = MT_MEMORY_RWX;
4452+ create_mapping(&map);
4453+ }
4454+
4455+ map.pfn = __phys_to_pfn(__pa(_sdata));
4456+ map.virtual = (unsigned long)_sdata;
4457+ map.length = end - __pa(_sdata);
4458+ }
4459+#endif
4460+
4461+ map.type = MT_MEMORY_RW;
4462 create_mapping(&map);
4463 }
4464 }
4465diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4466index a5bc92d..0bb4730 100644
4467--- a/arch/arm/plat-omap/sram.c
4468+++ b/arch/arm/plat-omap/sram.c
4469@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4470 * Looks like we need to preserve some bootloader code at the
4471 * beginning of SRAM for jumping to flash for reboot to work...
4472 */
4473+ pax_open_kernel();
4474 memset_io(omap_sram_base + omap_sram_skip, 0,
4475 omap_sram_size - omap_sram_skip);
4476+ pax_close_kernel();
4477 }
4478diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4479index ce6d763..cfea917 100644
4480--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4481+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4482@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4483 int (*started)(unsigned ch);
4484 int (*flush)(unsigned ch);
4485 int (*stop)(unsigned ch);
4486-};
4487+} __no_const;
4488
4489 extern void *samsung_dmadev_get_ops(void);
4490 extern void *s3c_dma_get_ops(void);
4491diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4492index f4726dc..39ed646 100644
4493--- a/arch/arm64/kernel/debug-monitors.c
4494+++ b/arch/arm64/kernel/debug-monitors.c
4495@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4496 return NOTIFY_OK;
4497 }
4498
4499-static struct notifier_block __cpuinitdata os_lock_nb = {
4500+static struct notifier_block os_lock_nb = {
4501 .notifier_call = os_lock_notify,
4502 };
4503
4504diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4505index 5ab825c..96aaec8 100644
4506--- a/arch/arm64/kernel/hw_breakpoint.c
4507+++ b/arch/arm64/kernel/hw_breakpoint.c
4508@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4509 return NOTIFY_OK;
4510 }
4511
4512-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4513+static struct notifier_block hw_breakpoint_reset_nb = {
4514 .notifier_call = hw_breakpoint_reset_notify,
4515 };
4516
4517diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4518index c3a58a1..78fbf54 100644
4519--- a/arch/avr32/include/asm/cache.h
4520+++ b/arch/avr32/include/asm/cache.h
4521@@ -1,8 +1,10 @@
4522 #ifndef __ASM_AVR32_CACHE_H
4523 #define __ASM_AVR32_CACHE_H
4524
4525+#include <linux/const.h>
4526+
4527 #define L1_CACHE_SHIFT 5
4528-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4529+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4530
4531 /*
4532 * Memory returned by kmalloc() may be used for DMA, so we must make
4533diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4534index d232888..87c8df1 100644
4535--- a/arch/avr32/include/asm/elf.h
4536+++ b/arch/avr32/include/asm/elf.h
4537@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4538 the loader. We need to make sure that it is out of the way of the program
4539 that it will "exec", and that there is sufficient room for the brk. */
4540
4541-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4542+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4543
4544+#ifdef CONFIG_PAX_ASLR
4545+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4546+
4547+#define PAX_DELTA_MMAP_LEN 15
4548+#define PAX_DELTA_STACK_LEN 15
4549+#endif
4550
4551 /* This yields a mask that user programs can use to figure out what
4552 instruction set this CPU supports. This could be done in user space,
4553diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4554index 479330b..53717a8 100644
4555--- a/arch/avr32/include/asm/kmap_types.h
4556+++ b/arch/avr32/include/asm/kmap_types.h
4557@@ -2,9 +2,9 @@
4558 #define __ASM_AVR32_KMAP_TYPES_H
4559
4560 #ifdef CONFIG_DEBUG_HIGHMEM
4561-# define KM_TYPE_NR 29
4562+# define KM_TYPE_NR 30
4563 #else
4564-# define KM_TYPE_NR 14
4565+# define KM_TYPE_NR 15
4566 #endif
4567
4568 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4569diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4570index b2f2d2d..d1c85cb 100644
4571--- a/arch/avr32/mm/fault.c
4572+++ b/arch/avr32/mm/fault.c
4573@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4574
4575 int exception_trace = 1;
4576
4577+#ifdef CONFIG_PAX_PAGEEXEC
4578+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4579+{
4580+ unsigned long i;
4581+
4582+ printk(KERN_ERR "PAX: bytes at PC: ");
4583+ for (i = 0; i < 20; i++) {
4584+ unsigned char c;
4585+ if (get_user(c, (unsigned char *)pc+i))
4586+ printk(KERN_CONT "???????? ");
4587+ else
4588+ printk(KERN_CONT "%02x ", c);
4589+ }
4590+ printk("\n");
4591+}
4592+#endif
4593+
4594 /*
4595 * This routine handles page faults. It determines the address and the
4596 * problem, and then passes it off to one of the appropriate routines.
4597@@ -174,6 +191,16 @@ bad_area:
4598 up_read(&mm->mmap_sem);
4599
4600 if (user_mode(regs)) {
4601+
4602+#ifdef CONFIG_PAX_PAGEEXEC
4603+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4604+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4605+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4606+ do_group_exit(SIGKILL);
4607+ }
4608+ }
4609+#endif
4610+
4611 if (exception_trace && printk_ratelimit())
4612 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4613 "sp %08lx ecr %lu\n",
4614diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4615index 568885a..f8008df 100644
4616--- a/arch/blackfin/include/asm/cache.h
4617+++ b/arch/blackfin/include/asm/cache.h
4618@@ -7,6 +7,7 @@
4619 #ifndef __ARCH_BLACKFIN_CACHE_H
4620 #define __ARCH_BLACKFIN_CACHE_H
4621
4622+#include <linux/const.h>
4623 #include <linux/linkage.h> /* for asmlinkage */
4624
4625 /*
4626@@ -14,7 +15,7 @@
4627 * Blackfin loads 32 bytes for cache
4628 */
4629 #define L1_CACHE_SHIFT 5
4630-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4631+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4632 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4633
4634 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4635diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4636index aea2718..3639a60 100644
4637--- a/arch/cris/include/arch-v10/arch/cache.h
4638+++ b/arch/cris/include/arch-v10/arch/cache.h
4639@@ -1,8 +1,9 @@
4640 #ifndef _ASM_ARCH_CACHE_H
4641 #define _ASM_ARCH_CACHE_H
4642
4643+#include <linux/const.h>
4644 /* Etrax 100LX have 32-byte cache-lines. */
4645-#define L1_CACHE_BYTES 32
4646 #define L1_CACHE_SHIFT 5
4647+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4648
4649 #endif /* _ASM_ARCH_CACHE_H */
4650diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4651index 7caf25d..ee65ac5 100644
4652--- a/arch/cris/include/arch-v32/arch/cache.h
4653+++ b/arch/cris/include/arch-v32/arch/cache.h
4654@@ -1,11 +1,12 @@
4655 #ifndef _ASM_CRIS_ARCH_CACHE_H
4656 #define _ASM_CRIS_ARCH_CACHE_H
4657
4658+#include <linux/const.h>
4659 #include <arch/hwregs/dma.h>
4660
4661 /* A cache-line is 32 bytes. */
4662-#define L1_CACHE_BYTES 32
4663 #define L1_CACHE_SHIFT 5
4664+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4665
4666 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4667
4668diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4669index b86329d..6709906 100644
4670--- a/arch/frv/include/asm/atomic.h
4671+++ b/arch/frv/include/asm/atomic.h
4672@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4673 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4674 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4675
4676+#define atomic64_read_unchecked(v) atomic64_read(v)
4677+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4678+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4679+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4680+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4681+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4682+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4683+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4684+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4685+
4686 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4687 {
4688 int c, old;
4689diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4690index 2797163..c2a401d 100644
4691--- a/arch/frv/include/asm/cache.h
4692+++ b/arch/frv/include/asm/cache.h
4693@@ -12,10 +12,11 @@
4694 #ifndef __ASM_CACHE_H
4695 #define __ASM_CACHE_H
4696
4697+#include <linux/const.h>
4698
4699 /* bytes per L1 cache line */
4700 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4701-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4702+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4703
4704 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4705 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4706diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4707index 43901f2..0d8b865 100644
4708--- a/arch/frv/include/asm/kmap_types.h
4709+++ b/arch/frv/include/asm/kmap_types.h
4710@@ -2,6 +2,6 @@
4711 #ifndef _ASM_KMAP_TYPES_H
4712 #define _ASM_KMAP_TYPES_H
4713
4714-#define KM_TYPE_NR 17
4715+#define KM_TYPE_NR 18
4716
4717 #endif
4718diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4719index 836f147..4cf23f5 100644
4720--- a/arch/frv/mm/elf-fdpic.c
4721+++ b/arch/frv/mm/elf-fdpic.c
4722@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4723 {
4724 struct vm_area_struct *vma;
4725 struct vm_unmapped_area_info info;
4726+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4727
4728 if (len > TASK_SIZE)
4729 return -ENOMEM;
4730@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4731 if (addr) {
4732 addr = PAGE_ALIGN(addr);
4733 vma = find_vma(current->mm, addr);
4734- if (TASK_SIZE - len >= addr &&
4735- (!vma || addr + len <= vma->vm_start))
4736+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4737 goto success;
4738 }
4739
4740@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4741 info.high_limit = (current->mm->start_stack - 0x00200000);
4742 info.align_mask = 0;
4743 info.align_offset = 0;
4744+ info.threadstack_offset = offset;
4745 addr = vm_unmapped_area(&info);
4746 if (!(addr & ~PAGE_MASK))
4747 goto success;
4748diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4749index f4ca594..adc72fd6 100644
4750--- a/arch/hexagon/include/asm/cache.h
4751+++ b/arch/hexagon/include/asm/cache.h
4752@@ -21,9 +21,11 @@
4753 #ifndef __ASM_CACHE_H
4754 #define __ASM_CACHE_H
4755
4756+#include <linux/const.h>
4757+
4758 /* Bytes per L1 cache line */
4759-#define L1_CACHE_SHIFT (5)
4760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4761+#define L1_CACHE_SHIFT 5
4762+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4763
4764 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4765 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4766diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4767index 6e6fe18..a6ae668 100644
4768--- a/arch/ia64/include/asm/atomic.h
4769+++ b/arch/ia64/include/asm/atomic.h
4770@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4771 #define atomic64_inc(v) atomic64_add(1, (v))
4772 #define atomic64_dec(v) atomic64_sub(1, (v))
4773
4774+#define atomic64_read_unchecked(v) atomic64_read(v)
4775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4783+
4784 /* Atomic operations are already serializing */
4785 #define smp_mb__before_atomic_dec() barrier()
4786 #define smp_mb__after_atomic_dec() barrier()
4787diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4788index 988254a..e1ee885 100644
4789--- a/arch/ia64/include/asm/cache.h
4790+++ b/arch/ia64/include/asm/cache.h
4791@@ -1,6 +1,7 @@
4792 #ifndef _ASM_IA64_CACHE_H
4793 #define _ASM_IA64_CACHE_H
4794
4795+#include <linux/const.h>
4796
4797 /*
4798 * Copyright (C) 1998-2000 Hewlett-Packard Co
4799@@ -9,7 +10,7 @@
4800
4801 /* Bytes per L1 (data) cache line. */
4802 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4803-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4804+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4805
4806 #ifdef CONFIG_SMP
4807 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4808diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4809index 5a83c5c..4d7f553 100644
4810--- a/arch/ia64/include/asm/elf.h
4811+++ b/arch/ia64/include/asm/elf.h
4812@@ -42,6 +42,13 @@
4813 */
4814 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4815
4816+#ifdef CONFIG_PAX_ASLR
4817+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4818+
4819+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4820+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4821+#endif
4822+
4823 #define PT_IA_64_UNWIND 0x70000001
4824
4825 /* IA-64 relocations: */
4826diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4827index 96a8d92..617a1cf 100644
4828--- a/arch/ia64/include/asm/pgalloc.h
4829+++ b/arch/ia64/include/asm/pgalloc.h
4830@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4831 pgd_val(*pgd_entry) = __pa(pud);
4832 }
4833
4834+static inline void
4835+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4836+{
4837+ pgd_populate(mm, pgd_entry, pud);
4838+}
4839+
4840 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4841 {
4842 return quicklist_alloc(0, GFP_KERNEL, NULL);
4843@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4844 pud_val(*pud_entry) = __pa(pmd);
4845 }
4846
4847+static inline void
4848+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4849+{
4850+ pud_populate(mm, pud_entry, pmd);
4851+}
4852+
4853 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4854 {
4855 return quicklist_alloc(0, GFP_KERNEL, NULL);
4856diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4857index 815810c..d60bd4c 100644
4858--- a/arch/ia64/include/asm/pgtable.h
4859+++ b/arch/ia64/include/asm/pgtable.h
4860@@ -12,7 +12,7 @@
4861 * David Mosberger-Tang <davidm@hpl.hp.com>
4862 */
4863
4864-
4865+#include <linux/const.h>
4866 #include <asm/mman.h>
4867 #include <asm/page.h>
4868 #include <asm/processor.h>
4869@@ -142,6 +142,17 @@
4870 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4871 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4872 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4873+
4874+#ifdef CONFIG_PAX_PAGEEXEC
4875+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4876+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4877+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4878+#else
4879+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4880+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4881+# define PAGE_COPY_NOEXEC PAGE_COPY
4882+#endif
4883+
4884 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4885 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4886 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4887diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4888index 54ff557..70c88b7 100644
4889--- a/arch/ia64/include/asm/spinlock.h
4890+++ b/arch/ia64/include/asm/spinlock.h
4891@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4892 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4893
4894 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4895- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4896+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4897 }
4898
4899 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4900diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4901index 449c8c0..18965fb 100644
4902--- a/arch/ia64/include/asm/uaccess.h
4903+++ b/arch/ia64/include/asm/uaccess.h
4904@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4905 static inline unsigned long
4906 __copy_to_user (void __user *to, const void *from, unsigned long count)
4907 {
4908+ if (count > INT_MAX)
4909+ return count;
4910+
4911+ if (!__builtin_constant_p(count))
4912+ check_object_size(from, count, true);
4913+
4914 return __copy_user(to, (__force void __user *) from, count);
4915 }
4916
4917 static inline unsigned long
4918 __copy_from_user (void *to, const void __user *from, unsigned long count)
4919 {
4920+ if (count > INT_MAX)
4921+ return count;
4922+
4923+ if (!__builtin_constant_p(count))
4924+ check_object_size(to, count, false);
4925+
4926 return __copy_user((__force void __user *) to, from, count);
4927 }
4928
4929@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4930 ({ \
4931 void __user *__cu_to = (to); \
4932 const void *__cu_from = (from); \
4933- long __cu_len = (n); \
4934+ unsigned long __cu_len = (n); \
4935 \
4936- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4937+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4938+ if (!__builtin_constant_p(n)) \
4939+ check_object_size(__cu_from, __cu_len, true); \
4940 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4941+ } \
4942 __cu_len; \
4943 })
4944
4945@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4946 ({ \
4947 void *__cu_to = (to); \
4948 const void __user *__cu_from = (from); \
4949- long __cu_len = (n); \
4950+ unsigned long __cu_len = (n); \
4951 \
4952 __chk_user_ptr(__cu_from); \
4953- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4954+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4955+ if (!__builtin_constant_p(n)) \
4956+ check_object_size(__cu_to, __cu_len, false); \
4957 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4958+ } \
4959 __cu_len; \
4960 })
4961
4962diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4963index 2d67317..07d8bfa 100644
4964--- a/arch/ia64/kernel/err_inject.c
4965+++ b/arch/ia64/kernel/err_inject.c
4966@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4967 return NOTIFY_OK;
4968 }
4969
4970-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4971+static struct notifier_block err_inject_cpu_notifier =
4972 {
4973 .notifier_call = err_inject_cpu_callback,
4974 };
4975diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4976index d7396db..b33e873 100644
4977--- a/arch/ia64/kernel/mca.c
4978+++ b/arch/ia64/kernel/mca.c
4979@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4980 return NOTIFY_OK;
4981 }
4982
4983-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4984+static struct notifier_block mca_cpu_notifier = {
4985 .notifier_call = mca_cpu_callback
4986 };
4987
4988diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4989index 24603be..948052d 100644
4990--- a/arch/ia64/kernel/module.c
4991+++ b/arch/ia64/kernel/module.c
4992@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4993 void
4994 module_free (struct module *mod, void *module_region)
4995 {
4996- if (mod && mod->arch.init_unw_table &&
4997- module_region == mod->module_init) {
4998+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4999 unw_remove_unwind_table(mod->arch.init_unw_table);
5000 mod->arch.init_unw_table = NULL;
5001 }
5002@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5003 }
5004
5005 static inline int
5006+in_init_rx (const struct module *mod, uint64_t addr)
5007+{
5008+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5009+}
5010+
5011+static inline int
5012+in_init_rw (const struct module *mod, uint64_t addr)
5013+{
5014+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5015+}
5016+
5017+static inline int
5018 in_init (const struct module *mod, uint64_t addr)
5019 {
5020- return addr - (uint64_t) mod->module_init < mod->init_size;
5021+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5022+}
5023+
5024+static inline int
5025+in_core_rx (const struct module *mod, uint64_t addr)
5026+{
5027+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5028+}
5029+
5030+static inline int
5031+in_core_rw (const struct module *mod, uint64_t addr)
5032+{
5033+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5034 }
5035
5036 static inline int
5037 in_core (const struct module *mod, uint64_t addr)
5038 {
5039- return addr - (uint64_t) mod->module_core < mod->core_size;
5040+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5041 }
5042
5043 static inline int
5044@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5045 break;
5046
5047 case RV_BDREL:
5048- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5049+ if (in_init_rx(mod, val))
5050+ val -= (uint64_t) mod->module_init_rx;
5051+ else if (in_init_rw(mod, val))
5052+ val -= (uint64_t) mod->module_init_rw;
5053+ else if (in_core_rx(mod, val))
5054+ val -= (uint64_t) mod->module_core_rx;
5055+ else if (in_core_rw(mod, val))
5056+ val -= (uint64_t) mod->module_core_rw;
5057 break;
5058
5059 case RV_LTV:
5060@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5061 * addresses have been selected...
5062 */
5063 uint64_t gp;
5064- if (mod->core_size > MAX_LTOFF)
5065+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5066 /*
5067 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5068 * at the end of the module.
5069 */
5070- gp = mod->core_size - MAX_LTOFF / 2;
5071+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5072 else
5073- gp = mod->core_size / 2;
5074- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5075+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5076+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5077 mod->arch.gp = gp;
5078 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5079 }
5080diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5081index 2b3c2d7..a318d84 100644
5082--- a/arch/ia64/kernel/palinfo.c
5083+++ b/arch/ia64/kernel/palinfo.c
5084@@ -980,7 +980,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5085 return NOTIFY_OK;
5086 }
5087
5088-static struct notifier_block __refdata palinfo_cpu_notifier =
5089+static struct notifier_block palinfo_cpu_notifier =
5090 {
5091 .notifier_call = palinfo_cpu_callback,
5092 .priority = 0,
5093diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5094index 4bc580a..7767f24 100644
5095--- a/arch/ia64/kernel/salinfo.c
5096+++ b/arch/ia64/kernel/salinfo.c
5097@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5098 return NOTIFY_OK;
5099 }
5100
5101-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5102+static struct notifier_block salinfo_cpu_notifier =
5103 {
5104 .notifier_call = salinfo_cpu_callback,
5105 .priority = 0,
5106diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5107index 41e33f8..65180b2 100644
5108--- a/arch/ia64/kernel/sys_ia64.c
5109+++ b/arch/ia64/kernel/sys_ia64.c
5110@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5111 unsigned long align_mask = 0;
5112 struct mm_struct *mm = current->mm;
5113 struct vm_unmapped_area_info info;
5114+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5115
5116 if (len > RGN_MAP_LIMIT)
5117 return -ENOMEM;
5118@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5119 if (REGION_NUMBER(addr) == RGN_HPAGE)
5120 addr = 0;
5121 #endif
5122+
5123+#ifdef CONFIG_PAX_RANDMMAP
5124+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5125+ addr = mm->free_area_cache;
5126+ else
5127+#endif
5128+
5129 if (!addr)
5130 addr = TASK_UNMAPPED_BASE;
5131
5132@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5133 info.high_limit = TASK_SIZE;
5134 info.align_mask = align_mask;
5135 info.align_offset = 0;
5136+ info.threadstack_offset = offset;
5137 return vm_unmapped_area(&info);
5138 }
5139
5140diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5141index dc00b2c..cce53c2 100644
5142--- a/arch/ia64/kernel/topology.c
5143+++ b/arch/ia64/kernel/topology.c
5144@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5145 return NOTIFY_OK;
5146 }
5147
5148-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5149+static struct notifier_block cache_cpu_notifier =
5150 {
5151 .notifier_call = cache_cpu_callback
5152 };
5153diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5154index 0ccb28f..8992469 100644
5155--- a/arch/ia64/kernel/vmlinux.lds.S
5156+++ b/arch/ia64/kernel/vmlinux.lds.S
5157@@ -198,7 +198,7 @@ SECTIONS {
5158 /* Per-cpu data: */
5159 . = ALIGN(PERCPU_PAGE_SIZE);
5160 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5161- __phys_per_cpu_start = __per_cpu_load;
5162+ __phys_per_cpu_start = per_cpu_load;
5163 /*
5164 * ensure percpu data fits
5165 * into percpu page size
5166diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5167index 6cf0341..d352594 100644
5168--- a/arch/ia64/mm/fault.c
5169+++ b/arch/ia64/mm/fault.c
5170@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5171 return pte_present(pte);
5172 }
5173
5174+#ifdef CONFIG_PAX_PAGEEXEC
5175+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5176+{
5177+ unsigned long i;
5178+
5179+ printk(KERN_ERR "PAX: bytes at PC: ");
5180+ for (i = 0; i < 8; i++) {
5181+ unsigned int c;
5182+ if (get_user(c, (unsigned int *)pc+i))
5183+ printk(KERN_CONT "???????? ");
5184+ else
5185+ printk(KERN_CONT "%08x ", c);
5186+ }
5187+ printk("\n");
5188+}
5189+#endif
5190+
5191 # define VM_READ_BIT 0
5192 # define VM_WRITE_BIT 1
5193 # define VM_EXEC_BIT 2
5194@@ -149,8 +166,21 @@ retry:
5195 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5196 goto bad_area;
5197
5198- if ((vma->vm_flags & mask) != mask)
5199+ if ((vma->vm_flags & mask) != mask) {
5200+
5201+#ifdef CONFIG_PAX_PAGEEXEC
5202+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5203+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5204+ goto bad_area;
5205+
5206+ up_read(&mm->mmap_sem);
5207+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5208+ do_group_exit(SIGKILL);
5209+ }
5210+#endif
5211+
5212 goto bad_area;
5213+ }
5214
5215 /*
5216 * If for any reason at all we couldn't handle the fault, make
5217diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5218index 76069c1..c2aa816 100644
5219--- a/arch/ia64/mm/hugetlbpage.c
5220+++ b/arch/ia64/mm/hugetlbpage.c
5221@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5222 unsigned long pgoff, unsigned long flags)
5223 {
5224 struct vm_unmapped_area_info info;
5225+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5226
5227 if (len > RGN_MAP_LIMIT)
5228 return -ENOMEM;
5229@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5230 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5231 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5232 info.align_offset = 0;
5233+ info.threadstack_offset = offset;
5234 return vm_unmapped_area(&info);
5235 }
5236
5237diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5238index d1fe4b4..2628f37 100644
5239--- a/arch/ia64/mm/init.c
5240+++ b/arch/ia64/mm/init.c
5241@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5242 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5243 vma->vm_end = vma->vm_start + PAGE_SIZE;
5244 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5245+
5246+#ifdef CONFIG_PAX_PAGEEXEC
5247+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5248+ vma->vm_flags &= ~VM_EXEC;
5249+
5250+#ifdef CONFIG_PAX_MPROTECT
5251+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5252+ vma->vm_flags &= ~VM_MAYEXEC;
5253+#endif
5254+
5255+ }
5256+#endif
5257+
5258 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5259 down_write(&current->mm->mmap_sem);
5260 if (insert_vm_struct(current->mm, vma)) {
5261diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5262index 40b3ee9..8c2c112 100644
5263--- a/arch/m32r/include/asm/cache.h
5264+++ b/arch/m32r/include/asm/cache.h
5265@@ -1,8 +1,10 @@
5266 #ifndef _ASM_M32R_CACHE_H
5267 #define _ASM_M32R_CACHE_H
5268
5269+#include <linux/const.h>
5270+
5271 /* L1 cache line size */
5272 #define L1_CACHE_SHIFT 4
5273-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5274+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5275
5276 #endif /* _ASM_M32R_CACHE_H */
5277diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5278index 82abd15..d95ae5d 100644
5279--- a/arch/m32r/lib/usercopy.c
5280+++ b/arch/m32r/lib/usercopy.c
5281@@ -14,6 +14,9 @@
5282 unsigned long
5283 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5284 {
5285+ if ((long)n < 0)
5286+ return n;
5287+
5288 prefetch(from);
5289 if (access_ok(VERIFY_WRITE, to, n))
5290 __copy_user(to,from,n);
5291@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5292 unsigned long
5293 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5294 {
5295+ if ((long)n < 0)
5296+ return n;
5297+
5298 prefetchw(to);
5299 if (access_ok(VERIFY_READ, from, n))
5300 __copy_user_zeroing(to,from,n);
5301diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5302index 0395c51..5f26031 100644
5303--- a/arch/m68k/include/asm/cache.h
5304+++ b/arch/m68k/include/asm/cache.h
5305@@ -4,9 +4,11 @@
5306 #ifndef __ARCH_M68K_CACHE_H
5307 #define __ARCH_M68K_CACHE_H
5308
5309+#include <linux/const.h>
5310+
5311 /* bytes per L1 cache line */
5312 #define L1_CACHE_SHIFT 4
5313-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5314+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5315
5316 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5317
5318diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5319index 3c52fa6..11b2ad8 100644
5320--- a/arch/metag/mm/hugetlbpage.c
5321+++ b/arch/metag/mm/hugetlbpage.c
5322@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5323 info.high_limit = TASK_SIZE;
5324 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5325 info.align_offset = 0;
5326+ info.threadstack_offset = 0;
5327 return vm_unmapped_area(&info);
5328 }
5329
5330diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5331index 4efe96a..60e8699 100644
5332--- a/arch/microblaze/include/asm/cache.h
5333+++ b/arch/microblaze/include/asm/cache.h
5334@@ -13,11 +13,12 @@
5335 #ifndef _ASM_MICROBLAZE_CACHE_H
5336 #define _ASM_MICROBLAZE_CACHE_H
5337
5338+#include <linux/const.h>
5339 #include <asm/registers.h>
5340
5341 #define L1_CACHE_SHIFT 5
5342 /* word-granular cache in microblaze */
5343-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5344+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5345
5346 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5347
5348diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5349index 08b6079..e94e6da 100644
5350--- a/arch/mips/include/asm/atomic.h
5351+++ b/arch/mips/include/asm/atomic.h
5352@@ -21,15 +21,39 @@
5353 #include <asm/cmpxchg.h>
5354 #include <asm/war.h>
5355
5356+#ifdef CONFIG_GENERIC_ATOMIC64
5357+#include <asm-generic/atomic64.h>
5358+#endif
5359+
5360 #define ATOMIC_INIT(i) { (i) }
5361
5362+#ifdef CONFIG_64BIT
5363+#define _ASM_EXTABLE(from, to) \
5364+" .section __ex_table,\"a\"\n" \
5365+" .dword " #from ", " #to"\n" \
5366+" .previous\n"
5367+#else
5368+#define _ASM_EXTABLE(from, to) \
5369+" .section __ex_table,\"a\"\n" \
5370+" .word " #from ", " #to"\n" \
5371+" .previous\n"
5372+#endif
5373+
5374 /*
5375 * atomic_read - read atomic variable
5376 * @v: pointer of type atomic_t
5377 *
5378 * Atomically reads the value of @v.
5379 */
5380-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5381+static inline int atomic_read(const atomic_t *v)
5382+{
5383+ return (*(volatile const int *) &v->counter);
5384+}
5385+
5386+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5387+{
5388+ return (*(volatile const int *) &v->counter);
5389+}
5390
5391 /*
5392 * atomic_set - set atomic variable
5393@@ -38,7 +62,15 @@
5394 *
5395 * Atomically sets the value of @v to @i.
5396 */
5397-#define atomic_set(v, i) ((v)->counter = (i))
5398+static inline void atomic_set(atomic_t *v, int i)
5399+{
5400+ v->counter = i;
5401+}
5402+
5403+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5404+{
5405+ v->counter = i;
5406+}
5407
5408 /*
5409 * atomic_add - add integer to atomic variable
5410@@ -47,7 +79,67 @@
5411 *
5412 * Atomically adds @i to @v.
5413 */
5414-static __inline__ void atomic_add(int i, atomic_t * v)
5415+static __inline__ void atomic_add(int i, atomic_t *v)
5416+{
5417+ int temp;
5418+
5419+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5420+ __asm__ __volatile__(
5421+ " .set mips3 \n"
5422+ "1: ll %0, %1 # atomic_add \n"
5423+#ifdef CONFIG_PAX_REFCOUNT
5424+ /* Exception on overflow. */
5425+ "2: add %0, %2 \n"
5426+#else
5427+ " addu %0, %2 \n"
5428+#endif
5429+ " sc %0, %1 \n"
5430+ " beqzl %0, 1b \n"
5431+#ifdef CONFIG_PAX_REFCOUNT
5432+ "3: \n"
5433+ _ASM_EXTABLE(2b, 3b)
5434+#endif
5435+ " .set mips0 \n"
5436+ : "=&r" (temp), "+m" (v->counter)
5437+ : "Ir" (i));
5438+ } else if (kernel_uses_llsc) {
5439+ __asm__ __volatile__(
5440+ " .set mips3 \n"
5441+ "1: ll %0, %1 # atomic_add \n"
5442+#ifdef CONFIG_PAX_REFCOUNT
5443+ /* Exception on overflow. */
5444+ "2: add %0, %2 \n"
5445+#else
5446+ " addu %0, %2 \n"
5447+#endif
5448+ " sc %0, %1 \n"
5449+ " beqz %0, 1b \n"
5450+#ifdef CONFIG_PAX_REFCOUNT
5451+ "3: \n"
5452+ _ASM_EXTABLE(2b, 3b)
5453+#endif
5454+ " .set mips0 \n"
5455+ : "=&r" (temp), "+m" (v->counter)
5456+ : "Ir" (i));
5457+ } else {
5458+ unsigned long flags;
5459+
5460+ raw_local_irq_save(flags);
5461+ __asm__ __volatile__(
5462+#ifdef CONFIG_PAX_REFCOUNT
5463+ /* Exception on overflow. */
5464+ "1: add %0, %1 \n"
5465+ "2: \n"
5466+ _ASM_EXTABLE(1b, 2b)
5467+#else
5468+ " addu %0, %1 \n"
5469+#endif
5470+ : "+r" (v->counter) : "Ir" (i));
5471+ raw_local_irq_restore(flags);
5472+ }
5473+}
5474+
5475+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5476 {
5477 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5478 int temp;
5479@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5480 *
5481 * Atomically subtracts @i from @v.
5482 */
5483-static __inline__ void atomic_sub(int i, atomic_t * v)
5484+static __inline__ void atomic_sub(int i, atomic_t *v)
5485+{
5486+ int temp;
5487+
5488+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5489+ __asm__ __volatile__(
5490+ " .set mips3 \n"
5491+ "1: ll %0, %1 # atomic64_sub \n"
5492+#ifdef CONFIG_PAX_REFCOUNT
5493+ /* Exception on overflow. */
5494+ "2: sub %0, %2 \n"
5495+#else
5496+ " subu %0, %2 \n"
5497+#endif
5498+ " sc %0, %1 \n"
5499+ " beqzl %0, 1b \n"
5500+#ifdef CONFIG_PAX_REFCOUNT
5501+ "3: \n"
5502+ _ASM_EXTABLE(2b, 3b)
5503+#endif
5504+ " .set mips0 \n"
5505+ : "=&r" (temp), "+m" (v->counter)
5506+ : "Ir" (i));
5507+ } else if (kernel_uses_llsc) {
5508+ __asm__ __volatile__(
5509+ " .set mips3 \n"
5510+ "1: ll %0, %1 # atomic64_sub \n"
5511+#ifdef CONFIG_PAX_REFCOUNT
5512+ /* Exception on overflow. */
5513+ "2: sub %0, %2 \n"
5514+#else
5515+ " subu %0, %2 \n"
5516+#endif
5517+ " sc %0, %1 \n"
5518+ " beqz %0, 1b \n"
5519+#ifdef CONFIG_PAX_REFCOUNT
5520+ "3: \n"
5521+ _ASM_EXTABLE(2b, 3b)
5522+#endif
5523+ " .set mips0 \n"
5524+ : "=&r" (temp), "+m" (v->counter)
5525+ : "Ir" (i));
5526+ } else {
5527+ unsigned long flags;
5528+
5529+ raw_local_irq_save(flags);
5530+ __asm__ __volatile__(
5531+#ifdef CONFIG_PAX_REFCOUNT
5532+ /* Exception on overflow. */
5533+ "1: sub %0, %1 \n"
5534+ "2: \n"
5535+ _ASM_EXTABLE(1b, 2b)
5536+#else
5537+ " subu %0, %1 \n"
5538+#endif
5539+ : "+r" (v->counter) : "Ir" (i));
5540+ raw_local_irq_restore(flags);
5541+ }
5542+}
5543+
5544+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5545 {
5546 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5547 int temp;
5548@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5549 /*
5550 * Same as above, but return the result value
5551 */
5552-static __inline__ int atomic_add_return(int i, atomic_t * v)
5553+static __inline__ int atomic_add_return(int i, atomic_t *v)
5554+{
5555+ int result;
5556+ int temp;
5557+
5558+ smp_mb__before_llsc();
5559+
5560+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5561+ __asm__ __volatile__(
5562+ " .set mips3 \n"
5563+ "1: ll %1, %2 # atomic_add_return \n"
5564+#ifdef CONFIG_PAX_REFCOUNT
5565+ "2: add %0, %1, %3 \n"
5566+#else
5567+ " addu %0, %1, %3 \n"
5568+#endif
5569+ " sc %0, %2 \n"
5570+ " beqzl %0, 1b \n"
5571+#ifdef CONFIG_PAX_REFCOUNT
5572+ " b 4f \n"
5573+ " .set noreorder \n"
5574+ "3: b 5f \n"
5575+ " move %0, %1 \n"
5576+ " .set reorder \n"
5577+ _ASM_EXTABLE(2b, 3b)
5578+#endif
5579+ "4: addu %0, %1, %3 \n"
5580+#ifdef CONFIG_PAX_REFCOUNT
5581+ "5: \n"
5582+#endif
5583+ " .set mips0 \n"
5584+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5585+ : "Ir" (i));
5586+ } else if (kernel_uses_llsc) {
5587+ __asm__ __volatile__(
5588+ " .set mips3 \n"
5589+ "1: ll %1, %2 # atomic_add_return \n"
5590+#ifdef CONFIG_PAX_REFCOUNT
5591+ "2: add %0, %1, %3 \n"
5592+#else
5593+ " addu %0, %1, %3 \n"
5594+#endif
5595+ " sc %0, %2 \n"
5596+ " bnez %0, 4f \n"
5597+ " b 1b \n"
5598+#ifdef CONFIG_PAX_REFCOUNT
5599+ " .set noreorder \n"
5600+ "3: b 5f \n"
5601+ " move %0, %1 \n"
5602+ " .set reorder \n"
5603+ _ASM_EXTABLE(2b, 3b)
5604+#endif
5605+ "4: addu %0, %1, %3 \n"
5606+#ifdef CONFIG_PAX_REFCOUNT
5607+ "5: \n"
5608+#endif
5609+ " .set mips0 \n"
5610+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5611+ : "Ir" (i));
5612+ } else {
5613+ unsigned long flags;
5614+
5615+ raw_local_irq_save(flags);
5616+ __asm__ __volatile__(
5617+ " lw %0, %1 \n"
5618+#ifdef CONFIG_PAX_REFCOUNT
5619+ /* Exception on overflow. */
5620+ "1: add %0, %2 \n"
5621+#else
5622+ " addu %0, %2 \n"
5623+#endif
5624+ " sw %0, %1 \n"
5625+#ifdef CONFIG_PAX_REFCOUNT
5626+ /* Note: Dest reg is not modified on overflow */
5627+ "2: \n"
5628+ _ASM_EXTABLE(1b, 2b)
5629+#endif
5630+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5631+ raw_local_irq_restore(flags);
5632+ }
5633+
5634+ smp_llsc_mb();
5635+
5636+ return result;
5637+}
5638+
5639+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5640 {
5641 int result;
5642
5643@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5644 return result;
5645 }
5646
5647-static __inline__ int atomic_sub_return(int i, atomic_t * v)
5648+static __inline__ int atomic_sub_return(int i, atomic_t *v)
5649+{
5650+ int result;
5651+ int temp;
5652+
5653+ smp_mb__before_llsc();
5654+
5655+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5656+ __asm__ __volatile__(
5657+ " .set mips3 \n"
5658+ "1: ll %1, %2 # atomic_sub_return \n"
5659+#ifdef CONFIG_PAX_REFCOUNT
5660+ "2: sub %0, %1, %3 \n"
5661+#else
5662+ " subu %0, %1, %3 \n"
5663+#endif
5664+ " sc %0, %2 \n"
5665+ " beqzl %0, 1b \n"
5666+#ifdef CONFIG_PAX_REFCOUNT
5667+ " b 4f \n"
5668+ " .set noreorder \n"
5669+ "3: b 5f \n"
5670+ " move %0, %1 \n"
5671+ " .set reorder \n"
5672+ _ASM_EXTABLE(2b, 3b)
5673+#endif
5674+ "4: subu %0, %1, %3 \n"
5675+#ifdef CONFIG_PAX_REFCOUNT
5676+ "5: \n"
5677+#endif
5678+ " .set mips0 \n"
5679+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5680+ : "Ir" (i), "m" (v->counter)
5681+ : "memory");
5682+ } else if (kernel_uses_llsc) {
5683+ __asm__ __volatile__(
5684+ " .set mips3 \n"
5685+ "1: ll %1, %2 # atomic_sub_return \n"
5686+#ifdef CONFIG_PAX_REFCOUNT
5687+ "2: sub %0, %1, %3 \n"
5688+#else
5689+ " subu %0, %1, %3 \n"
5690+#endif
5691+ " sc %0, %2 \n"
5692+ " bnez %0, 4f \n"
5693+ " b 1b \n"
5694+#ifdef CONFIG_PAX_REFCOUNT
5695+ " .set noreorder \n"
5696+ "3: b 5f \n"
5697+ " move %0, %1 \n"
5698+ " .set reorder \n"
5699+ _ASM_EXTABLE(2b, 3b)
5700+#endif
5701+ "4: subu %0, %1, %3 \n"
5702+#ifdef CONFIG_PAX_REFCOUNT
5703+ "5: \n"
5704+#endif
5705+ " .set mips0 \n"
5706+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5707+ : "Ir" (i));
5708+ } else {
5709+ unsigned long flags;
5710+
5711+ raw_local_irq_save(flags);
5712+ __asm__ __volatile__(
5713+ " lw %0, %1 \n"
5714+#ifdef CONFIG_PAX_REFCOUNT
5715+ /* Exception on overflow. */
5716+ "1: sub %0, %2 \n"
5717+#else
5718+ " subu %0, %2 \n"
5719+#endif
5720+ " sw %0, %1 \n"
5721+#ifdef CONFIG_PAX_REFCOUNT
5722+ /* Note: Dest reg is not modified on overflow */
5723+ "2: \n"
5724+ _ASM_EXTABLE(1b, 2b)
5725+#endif
5726+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5727+ raw_local_irq_restore(flags);
5728+ }
5729+
5730+ smp_llsc_mb();
5731+
5732+ return result;
5733+}
5734+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5735 {
5736 int result;
5737
5738@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5739 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5740 * The function returns the old value of @v minus @i.
5741 */
5742-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5743+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5744 {
5745 int result;
5746
5747@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5748 return result;
5749 }
5750
5751-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5752-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5753+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5754+{
5755+ return cmpxchg(&v->counter, old, new);
5756+}
5757+
5758+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5759+ int new)
5760+{
5761+ return cmpxchg(&(v->counter), old, new);
5762+}
5763+
5764+static inline int atomic_xchg(atomic_t *v, int new)
5765+{
5766+ return xchg(&v->counter, new);
5767+}
5768+
5769+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5770+{
5771+ return xchg(&(v->counter), new);
5772+}
5773
5774 /**
5775 * __atomic_add_unless - add unless the number is a given value
5776@@ -324,6 +666,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5777
5778 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5779 #define atomic_inc_return(v) atomic_add_return(1, (v))
5780+#define atomic_inc_return_unchecked(v) atomic_add_return_unchecked(1, (v))
5781
5782 /*
5783 * atomic_sub_and_test - subtract value from variable and test result
5784@@ -345,6 +688,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5785 * other cases.
5786 */
5787 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5788+#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, (v)) == 0)
5789
5790 /*
5791 * atomic_dec_and_test - decrement by 1 and test
5792@@ -369,6 +713,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5793 * Atomically increments @v by 1.
5794 */
5795 #define atomic_inc(v) atomic_add(1, (v))
5796+#define atomic_inc_unchecked(v) atomic_add_unchecked(1, (v))
5797
5798 /*
5799 * atomic_dec - decrement and test
5800@@ -377,6 +722,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5801 * Atomically decrements @v by 1.
5802 */
5803 #define atomic_dec(v) atomic_sub(1, (v))
5804+#define atomic_dec_unchecked(v) atomic_sub_return_unchecked(1, (v))
5805
5806 /*
5807 * atomic_add_negative - add and test if negative
5808@@ -398,14 +744,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5809 * @v: pointer of type atomic64_t
5810 *
5811 */
5812-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5813+static inline long atomic64_read(const atomic64_t *v)
5814+{
5815+ return (*(volatile const long *) &v->counter);
5816+}
5817+
5818+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5819+{
5820+ return (*(volatile const long *) &v->counter);
5821+}
5822
5823 /*
5824 * atomic64_set - set atomic variable
5825 * @v: pointer of type atomic64_t
5826 * @i: required value
5827 */
5828-#define atomic64_set(v, i) ((v)->counter = (i))
5829+static inline void atomic64_set(atomic64_t *v, long i)
5830+{
5831+ v->counter = i;
5832+}
5833+
5834+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5835+{
5836+ v->counter = i;
5837+}
5838
5839 /*
5840 * atomic64_add - add integer to atomic variable
5841@@ -414,7 +776,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5842 *
5843 * Atomically adds @i to @v.
5844 */
5845-static __inline__ void atomic64_add(long i, atomic64_t * v)
5846+static __inline__ void atomic64_add(long i, atomic64_t *v)
5847+{
5848+ long temp;
5849+
5850+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5851+ __asm__ __volatile__(
5852+ " .set mips3 \n"
5853+ "1: lld %0, %1 # atomic64_add \n"
5854+#ifdef CONFIG_PAX_REFCOUNT
5855+ /* Exception on overflow. */
5856+ "2: dadd %0, %2 \n"
5857+#else
5858+ " daddu %0, %2 \n"
5859+#endif
5860+ " scd %0, %1 \n"
5861+ " beqzl %0, 1b \n"
5862+#ifdef CONFIG_PAX_REFCOUNT
5863+ "3: \n"
5864+ _ASM_EXTABLE(2b, 3b)
5865+#endif
5866+ " .set mips0 \n"
5867+ : "=&r" (temp), "+m" (v->counter)
5868+ : "Ir" (i));
5869+ } else if (kernel_uses_llsc) {
5870+ __asm__ __volatile__(
5871+ " .set mips3 \n"
5872+ "1: lld %0, %1 # atomic64_add \n"
5873+#ifdef CONFIG_PAX_REFCOUNT
5874+ /* Exception on overflow. */
5875+ "2: dadd %0, %2 \n"
5876+#else
5877+ " daddu %0, %2 \n"
5878+#endif
5879+ " scd %0, %1 \n"
5880+ " beqz %0, 1b \n"
5881+#ifdef CONFIG_PAX_REFCOUNT
5882+ "3: \n"
5883+ _ASM_EXTABLE(2b, 3b)
5884+#endif
5885+ " .set mips0 \n"
5886+ : "=&r" (temp), "+m" (v->counter)
5887+ : "Ir" (i));
5888+ } else {
5889+ unsigned long flags;
5890+
5891+ raw_local_irq_save(flags);
5892+ __asm__ __volatile__(
5893+#ifdef CONFIG_PAX_REFCOUNT
5894+ /* Exception on overflow. */
5895+ "1: dadd %0, %1 \n"
5896+ "2: \n"
5897+ _ASM_EXTABLE(1b, 2b)
5898+#else
5899+ " daddu %0, %1 \n"
5900+#endif
5901+ : "+r" (v->counter) : "Ir" (i));
5902+ raw_local_irq_restore(flags);
5903+ }
5904+}
5905+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
5906 {
5907 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5908 long temp;
5909@@ -457,7 +878,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
5910 *
5911 * Atomically subtracts @i from @v.
5912 */
5913-static __inline__ void atomic64_sub(long i, atomic64_t * v)
5914+static __inline__ void atomic64_sub(long i, atomic64_t *v)
5915+{
5916+ long temp;
5917+
5918+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5919+ __asm__ __volatile__(
5920+ " .set mips3 \n"
5921+ "1: lld %0, %1 # atomic64_sub \n"
5922+#ifdef CONFIG_PAX_REFCOUNT
5923+ /* Exception on overflow. */
5924+ "2: dsub %0, %2 \n"
5925+#else
5926+ " dsubu %0, %2 \n"
5927+#endif
5928+ " scd %0, %1 \n"
5929+ " beqzl %0, 1b \n"
5930+#ifdef CONFIG_PAX_REFCOUNT
5931+ "3: \n"
5932+ _ASM_EXTABLE(2b, 3b)
5933+#endif
5934+ " .set mips0 \n"
5935+ : "=&r" (temp), "+m" (v->counter)
5936+ : "Ir" (i));
5937+ } else if (kernel_uses_llsc) {
5938+ __asm__ __volatile__(
5939+ " .set mips3 \n"
5940+ "1: lld %0, %1 # atomic64_sub \n"
5941+#ifdef CONFIG_PAX_REFCOUNT
5942+ /* Exception on overflow. */
5943+ "2: dsub %0, %2 \n"
5944+#else
5945+ " dsubu %0, %2 \n"
5946+#endif
5947+ " scd %0, %1 \n"
5948+ " beqz %0, 1b \n"
5949+#ifdef CONFIG_PAX_REFCOUNT
5950+ "3: \n"
5951+ _ASM_EXTABLE(2b, 3b)
5952+#endif
5953+ " .set mips0 \n"
5954+ : "=&r" (temp), "+m" (v->counter)
5955+ : "Ir" (i));
5956+ } else {
5957+ unsigned long flags;
5958+
5959+ raw_local_irq_save(flags);
5960+ __asm__ __volatile__(
5961+#ifdef CONFIG_PAX_REFCOUNT
5962+ /* Exception on overflow. */
5963+ "1: dsub %0, %1 \n"
5964+ "2: \n"
5965+ _ASM_EXTABLE(1b, 2b)
5966+#else
5967+ " dsubu %0, %1 \n"
5968+#endif
5969+ : "+r" (v->counter) : "Ir" (i));
5970+ raw_local_irq_restore(flags);
5971+ }
5972+}
5973+
5974+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
5975 {
5976 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5977 long temp;
5978@@ -496,7 +977,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
5979 /*
5980 * Same as above, but return the result value
5981 */
5982-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
5983+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
5984+{
5985+ long result;
5986+ long temp;
5987+
5988+ smp_mb__before_llsc();
5989+
5990+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5991+ __asm__ __volatile__(
5992+ " .set mips3 \n"
5993+ "1: lld %1, %2 # atomic64_add_return \n"
5994+#ifdef CONFIG_PAX_REFCOUNT
5995+ "2: dadd %0, %1, %3 \n"
5996+#else
5997+ " daddu %0, %1, %3 \n"
5998+#endif
5999+ " scd %0, %2 \n"
6000+ " beqzl %0, 1b \n"
6001+#ifdef CONFIG_PAX_REFCOUNT
6002+ " b 4f \n"
6003+ " .set noreorder \n"
6004+ "3: b 5f \n"
6005+ " move %0, %1 \n"
6006+ " .set reorder \n"
6007+ _ASM_EXTABLE(2b, 3b)
6008+#endif
6009+ "4: daddu %0, %1, %3 \n"
6010+#ifdef CONFIG_PAX_REFCOUNT
6011+ "5: \n"
6012+#endif
6013+ " .set mips0 \n"
6014+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6015+ : "Ir" (i));
6016+ } else if (kernel_uses_llsc) {
6017+ __asm__ __volatile__(
6018+ " .set mips3 \n"
6019+ "1: lld %1, %2 # atomic64_add_return \n"
6020+#ifdef CONFIG_PAX_REFCOUNT
6021+ "2: dadd %0, %1, %3 \n"
6022+#else
6023+ " daddu %0, %1, %3 \n"
6024+#endif
6025+ " scd %0, %2 \n"
6026+ " bnez %0, 4f \n"
6027+ " b 1b \n"
6028+#ifdef CONFIG_PAX_REFCOUNT
6029+ " .set noreorder \n"
6030+ "3: b 5f \n"
6031+ " move %0, %1 \n"
6032+ " .set reorder \n"
6033+ _ASM_EXTABLE(2b, 3b)
6034+#endif
6035+ "4: daddu %0, %1, %3 \n"
6036+#ifdef CONFIG_PAX_REFCOUNT
6037+ "5: \n"
6038+#endif
6039+ " .set mips0 \n"
6040+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6041+ : "Ir" (i), "m" (v->counter)
6042+ : "memory");
6043+ } else {
6044+ unsigned long flags;
6045+
6046+ raw_local_irq_save(flags);
6047+ __asm__ __volatile__(
6048+ " ld %0, %1 \n"
6049+#ifdef CONFIG_PAX_REFCOUNT
6050+ /* Exception on overflow. */
6051+ "1: dadd %0, %2 \n"
6052+#else
6053+ " daddu %0, %2 \n"
6054+#endif
6055+ " sd %0, %1 \n"
6056+#ifdef CONFIG_PAX_REFCOUNT
6057+ /* Note: Dest reg is not modified on overflow */
6058+ "2: \n"
6059+ _ASM_EXTABLE(1b, 2b)
6060+#endif
6061+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6062+ raw_local_irq_restore(flags);
6063+ }
6064+
6065+ smp_llsc_mb();
6066+
6067+ return result;
6068+}
6069+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6070 {
6071 long result;
6072
6073@@ -546,7 +1113,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6074 return result;
6075 }
6076
6077-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6078+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6079+{
6080+ long result;
6081+ long temp;
6082+
6083+ smp_mb__before_llsc();
6084+
6085+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6086+ long temp;
6087+
6088+ __asm__ __volatile__(
6089+ " .set mips3 \n"
6090+ "1: lld %1, %2 # atomic64_sub_return \n"
6091+#ifdef CONFIG_PAX_REFCOUNT
6092+ "2: dsub %0, %1, %3 \n"
6093+#else
6094+ " dsubu %0, %1, %3 \n"
6095+#endif
6096+ " scd %0, %2 \n"
6097+ " beqzl %0, 1b \n"
6098+#ifdef CONFIG_PAX_REFCOUNT
6099+ " b 4f \n"
6100+ " .set noreorder \n"
6101+ "3: b 5f \n"
6102+ " move %0, %1 \n"
6103+ " .set reorder \n"
6104+ _ASM_EXTABLE(2b, 3b)
6105+#endif
6106+ "4: dsubu %0, %1, %3 \n"
6107+#ifdef CONFIG_PAX_REFCOUNT
6108+ "5: \n"
6109+#endif
6110+ " .set mips0 \n"
6111+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6112+ : "Ir" (i), "m" (v->counter)
6113+ : "memory");
6114+ } else if (kernel_uses_llsc) {
6115+ __asm__ __volatile__(
6116+ " .set mips3 \n"
6117+ "1: lld %1, %2 # atomic64_sub_return \n"
6118+#ifdef CONFIG_PAX_REFCOUNT
6119+ "2: dsub %0, %1, %3 \n"
6120+#else
6121+ " dsubu %0, %1, %3 \n"
6122+#endif
6123+ " scd %0, %2 \n"
6124+ " bnez %0, 4f \n"
6125+ " b 1b \n"
6126+#ifdef CONFIG_PAX_REFCOUNT
6127+ " .set noreorder \n"
6128+ "3: b 5f \n"
6129+ " move %0, %1 \n"
6130+ " .set reorder \n"
6131+ _ASM_EXTABLE(2b, 3b)
6132+#endif
6133+ "4: dsubu %0, %1, %3 \n"
6134+#ifdef CONFIG_PAX_REFCOUNT
6135+ "5: \n"
6136+#endif
6137+ " .set mips0 \n"
6138+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6139+ : "Ir" (i), "m" (v->counter)
6140+ : "memory");
6141+ } else {
6142+ unsigned long flags;
6143+
6144+ raw_local_irq_save(flags);
6145+ __asm__ __volatile__(
6146+ " ld %0, %1 \n"
6147+#ifdef CONFIG_PAX_REFCOUNT
6148+ /* Exception on overflow. */
6149+ "1: dsub %0, %2 \n"
6150+#else
6151+ " dsubu %0, %2 \n"
6152+#endif
6153+ " sd %0, %1 \n"
6154+#ifdef CONFIG_PAX_REFCOUNT
6155+ /* Note: Dest reg is not modified on overflow */
6156+ "2: \n"
6157+ _ASM_EXTABLE(1b, 2b)
6158+#endif
6159+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6160+ raw_local_irq_restore(flags);
6161+ }
6162+
6163+ smp_llsc_mb();
6164+
6165+ return result;
6166+}
6167+
6168+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6169 {
6170 long result;
6171
6172@@ -605,7 +1262,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6173 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6174 * The function returns the old value of @v minus @i.
6175 */
6176-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6177+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6178 {
6179 long result;
6180
6181@@ -662,9 +1319,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6182 return result;
6183 }
6184
6185-#define atomic64_cmpxchg(v, o, n) \
6186- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6187-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6188+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6189+{
6190+ return cmpxchg(&v->counter, old, new);
6191+}
6192+
6193+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6194+ long new)
6195+{
6196+ return cmpxchg(&(v->counter), old, new);
6197+}
6198+
6199+static inline long atomic64_xchg(atomic64_t *v, long new)
6200+{
6201+ return xchg(&v->counter, new);
6202+}
6203+
6204+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6205+{
6206+ return xchg(&(v->counter), new);
6207+}
6208
6209 /**
6210 * atomic64_add_unless - add unless the number is a given value
6211@@ -694,6 +1368,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6212
6213 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6214 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6215+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6216
6217 /*
6218 * atomic64_sub_and_test - subtract value from variable and test result
6219@@ -715,6 +1390,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6220 * other cases.
6221 */
6222 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6223+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6224
6225 /*
6226 * atomic64_dec_and_test - decrement by 1 and test
6227@@ -739,6 +1415,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6228 * Atomically increments @v by 1.
6229 */
6230 #define atomic64_inc(v) atomic64_add(1, (v))
6231+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6232
6233 /*
6234 * atomic64_dec - decrement and test
6235@@ -747,6 +1424,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6236 * Atomically decrements @v by 1.
6237 */
6238 #define atomic64_dec(v) atomic64_sub(1, (v))
6239+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6240
6241 /*
6242 * atomic64_add_negative - add and test if negative
6243diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6244index b4db69f..8f3b093 100644
6245--- a/arch/mips/include/asm/cache.h
6246+++ b/arch/mips/include/asm/cache.h
6247@@ -9,10 +9,11 @@
6248 #ifndef _ASM_CACHE_H
6249 #define _ASM_CACHE_H
6250
6251+#include <linux/const.h>
6252 #include <kmalloc.h>
6253
6254 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6255-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6256+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6257
6258 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6259 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6260diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6261index cf3ae24..238d22f 100644
6262--- a/arch/mips/include/asm/elf.h
6263+++ b/arch/mips/include/asm/elf.h
6264@@ -372,13 +372,16 @@ extern const char *__elf_platform;
6265 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6266 #endif
6267
6268+#ifdef CONFIG_PAX_ASLR
6269+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6270+
6271+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6272+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6273+#endif
6274+
6275 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6276 struct linux_binprm;
6277 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6278 int uses_interp);
6279
6280-struct mm_struct;
6281-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6282-#define arch_randomize_brk arch_randomize_brk
6283-
6284 #endif /* _ASM_ELF_H */
6285diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6286index c1f6afa..38cc6e9 100644
6287--- a/arch/mips/include/asm/exec.h
6288+++ b/arch/mips/include/asm/exec.h
6289@@ -12,6 +12,6 @@
6290 #ifndef _ASM_EXEC_H
6291 #define _ASM_EXEC_H
6292
6293-extern unsigned long arch_align_stack(unsigned long sp);
6294+#define arch_align_stack(x) ((x) & ~0xfUL)
6295
6296 #endif /* _ASM_EXEC_H */
6297diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6298index d44622c..64990d2 100644
6299--- a/arch/mips/include/asm/local.h
6300+++ b/arch/mips/include/asm/local.h
6301@@ -12,15 +12,25 @@ typedef struct
6302 atomic_long_t a;
6303 } local_t;
6304
6305+typedef struct {
6306+ atomic_long_unchecked_t a;
6307+} local_unchecked_t;
6308+
6309 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6310
6311 #define local_read(l) atomic_long_read(&(l)->a)
6312+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6313 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6314+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6315
6316 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6317+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6318 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6319+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6320 #define local_inc(l) atomic_long_inc(&(l)->a)
6321+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6322 #define local_dec(l) atomic_long_dec(&(l)->a)
6323+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6324
6325 /*
6326 * Same as above, but return the result value
6327@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6328 return result;
6329 }
6330
6331+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6332+{
6333+ unsigned long result;
6334+
6335+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6336+ unsigned long temp;
6337+
6338+ __asm__ __volatile__(
6339+ " .set mips3 \n"
6340+ "1:" __LL "%1, %2 # local_add_return \n"
6341+ " addu %0, %1, %3 \n"
6342+ __SC "%0, %2 \n"
6343+ " beqzl %0, 1b \n"
6344+ " addu %0, %1, %3 \n"
6345+ " .set mips0 \n"
6346+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6347+ : "Ir" (i), "m" (l->a.counter)
6348+ : "memory");
6349+ } else if (kernel_uses_llsc) {
6350+ unsigned long temp;
6351+
6352+ __asm__ __volatile__(
6353+ " .set mips3 \n"
6354+ "1:" __LL "%1, %2 # local_add_return \n"
6355+ " addu %0, %1, %3 \n"
6356+ __SC "%0, %2 \n"
6357+ " beqz %0, 1b \n"
6358+ " addu %0, %1, %3 \n"
6359+ " .set mips0 \n"
6360+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6361+ : "Ir" (i), "m" (l->a.counter)
6362+ : "memory");
6363+ } else {
6364+ unsigned long flags;
6365+
6366+ local_irq_save(flags);
6367+ result = l->a.counter;
6368+ result += i;
6369+ l->a.counter = result;
6370+ local_irq_restore(flags);
6371+ }
6372+
6373+ return result;
6374+}
6375+
6376 static __inline__ long local_sub_return(long i, local_t * l)
6377 {
6378 unsigned long result;
6379@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6380
6381 #define local_cmpxchg(l, o, n) \
6382 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6383+#define local_cmpxchg_unchecked(l, o, n) \
6384+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6385 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6386
6387 /**
6388diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6389index f59552f..3abe9b9 100644
6390--- a/arch/mips/include/asm/page.h
6391+++ b/arch/mips/include/asm/page.h
6392@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6393 #ifdef CONFIG_CPU_MIPS32
6394 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6395 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6396- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6397+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6398 #else
6399 typedef struct { unsigned long long pte; } pte_t;
6400 #define pte_val(x) ((x).pte)
6401diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6402index 881d18b..cea38bc 100644
6403--- a/arch/mips/include/asm/pgalloc.h
6404+++ b/arch/mips/include/asm/pgalloc.h
6405@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6406 {
6407 set_pud(pud, __pud((unsigned long)pmd));
6408 }
6409+
6410+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6411+{
6412+ pud_populate(mm, pud, pmd);
6413+}
6414 #endif
6415
6416 /*
6417diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6418index 895320e..bf63e10 100644
6419--- a/arch/mips/include/asm/thread_info.h
6420+++ b/arch/mips/include/asm/thread_info.h
6421@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
6422 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
6423 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6424 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6425+/* li takes a 32bit immediate */
6426+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6427 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6428
6429 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6430@@ -130,15 +132,18 @@ static inline struct thread_info *current_thread_info(void)
6431 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
6432 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6433 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6434+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6435+
6436+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6437
6438 /* work to do in syscall_trace_leave() */
6439-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
6440+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6441
6442 /* work to do on interrupt/exception return */
6443 #define _TIF_WORK_MASK \
6444 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
6445 /* work to do on any return to u-space */
6446-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
6447+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
6448
6449 #endif /* __KERNEL__ */
6450
6451diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6452index 1188e00..41cf144 100644
6453--- a/arch/mips/kernel/binfmt_elfn32.c
6454+++ b/arch/mips/kernel/binfmt_elfn32.c
6455@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6456 #undef ELF_ET_DYN_BASE
6457 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6458
6459+#ifdef CONFIG_PAX_ASLR
6460+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6461+
6462+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6463+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6464+#endif
6465+
6466 #include <asm/processor.h>
6467 #include <linux/module.h>
6468 #include <linux/elfcore.h>
6469diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6470index 202e581..689ca79 100644
6471--- a/arch/mips/kernel/binfmt_elfo32.c
6472+++ b/arch/mips/kernel/binfmt_elfo32.c
6473@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6474 #undef ELF_ET_DYN_BASE
6475 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6476
6477+#ifdef CONFIG_PAX_ASLR
6478+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6479+
6480+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6481+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6482+#endif
6483+
6484 #include <asm/processor.h>
6485
6486 /*
6487diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6488index c6a041d..b3e7318 100644
6489--- a/arch/mips/kernel/process.c
6490+++ b/arch/mips/kernel/process.c
6491@@ -563,15 +563,3 @@ unsigned long get_wchan(struct task_struct *task)
6492 out:
6493 return pc;
6494 }
6495-
6496-/*
6497- * Don't forget that the stack pointer must be aligned on a 8 bytes
6498- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6499- */
6500-unsigned long arch_align_stack(unsigned long sp)
6501-{
6502- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6503- sp -= get_random_int() & ~PAGE_MASK;
6504-
6505- return sp & ALMASK;
6506-}
6507diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6508index 9c6299c..2fb4c22 100644
6509--- a/arch/mips/kernel/ptrace.c
6510+++ b/arch/mips/kernel/ptrace.c
6511@@ -528,6 +528,10 @@ static inline int audit_arch(void)
6512 return arch;
6513 }
6514
6515+#ifdef CONFIG_GRKERNSEC_SETXID
6516+extern void gr_delayed_cred_worker(void);
6517+#endif
6518+
6519 /*
6520 * Notification of system call entry/exit
6521 * - triggered by current->work.syscall_trace
6522@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6523 /* do the secure computing check first */
6524 secure_computing_strict(regs->regs[2]);
6525
6526+#ifdef CONFIG_GRKERNSEC_SETXID
6527+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6528+ gr_delayed_cred_worker();
6529+#endif
6530+
6531 if (!(current->ptrace & PT_PTRACED))
6532 goto out;
6533
6534diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
6535index 9b36424..e7f4154 100644
6536--- a/arch/mips/kernel/scall32-o32.S
6537+++ b/arch/mips/kernel/scall32-o32.S
6538@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
6539
6540 stack_done:
6541 lw t0, TI_FLAGS($28) # syscall tracing enabled?
6542- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6543+ li t1, _TIF_SYSCALL_WORK
6544 and t0, t1
6545 bnez t0, syscall_trace_entry # -> yes
6546
6547diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
6548index 97a5909..59622f8 100644
6549--- a/arch/mips/kernel/scall64-64.S
6550+++ b/arch/mips/kernel/scall64-64.S
6551@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
6552
6553 sd a3, PT_R26(sp) # save a3 for syscall restarting
6554
6555- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6556+ li t1, _TIF_SYSCALL_WORK
6557 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6558 and t0, t1, t0
6559 bnez t0, syscall_trace_entry
6560diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
6561index edcb659..fb2ab09 100644
6562--- a/arch/mips/kernel/scall64-n32.S
6563+++ b/arch/mips/kernel/scall64-n32.S
6564@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
6565
6566 sd a3, PT_R26(sp) # save a3 for syscall restarting
6567
6568- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6569+ li t1, _TIF_SYSCALL_WORK
6570 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6571 and t0, t1, t0
6572 bnez t0, n32_syscall_trace_entry
6573diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
6574index 74f485d..47d2c38 100644
6575--- a/arch/mips/kernel/scall64-o32.S
6576+++ b/arch/mips/kernel/scall64-o32.S
6577@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
6578 PTR 4b, bad_stack
6579 .previous
6580
6581- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
6582+ li t1, _TIF_SYSCALL_WORK
6583 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
6584 and t0, t1, t0
6585 bnez t0, trace_a_syscall
6586diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6587index a75ae40..0d0f56a 100644
6588--- a/arch/mips/kernel/traps.c
6589+++ b/arch/mips/kernel/traps.c
6590@@ -675,7 +675,17 @@ asmlinkage void do_ov(struct pt_regs *regs)
6591 {
6592 siginfo_t info;
6593
6594- die_if_kernel("Integer overflow", regs);
6595+ if (unlikely(!user_mode(regs))) {
6596+
6597+#ifdef CONFIG_PAX_REFCOUNT
6598+ if (fixup_exception(regs)) {
6599+ pax_report_refcount_overflow(regs);
6600+ return;
6601+ }
6602+#endif
6603+
6604+ die("Integer overflow", regs);
6605+ }
6606
6607 info.si_code = FPE_INTOVF;
6608 info.si_signo = SIGFPE;
6609diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6610index 0fead53..eeb00a6 100644
6611--- a/arch/mips/mm/fault.c
6612+++ b/arch/mips/mm/fault.c
6613@@ -27,6 +27,23 @@
6614 #include <asm/highmem.h> /* For VMALLOC_END */
6615 #include <linux/kdebug.h>
6616
6617+#ifdef CONFIG_PAX_PAGEEXEC
6618+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6619+{
6620+ unsigned long i;
6621+
6622+ printk(KERN_ERR "PAX: bytes at PC: ");
6623+ for (i = 0; i < 5; i++) {
6624+ unsigned int c;
6625+ if (get_user(c, (unsigned int *)pc+i))
6626+ printk(KERN_CONT "???????? ");
6627+ else
6628+ printk(KERN_CONT "%08x ", c);
6629+ }
6630+ printk("\n");
6631+}
6632+#endif
6633+
6634 /*
6635 * This routine handles page faults. It determines the address,
6636 * and the problem, and then passes it off to one of the appropriate
6637@@ -196,6 +213,14 @@ bad_area:
6638 bad_area_nosemaphore:
6639 /* User mode accesses just cause a SIGSEGV */
6640 if (user_mode(regs)) {
6641+
6642+#ifdef CONFIG_PAX_PAGEEXEC
6643+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6644+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6645+ do_group_exit(SIGKILL);
6646+ }
6647+#endif
6648+
6649 tsk->thread.cp0_badvaddr = address;
6650 tsk->thread.error_code = write;
6651 #if 0
6652diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6653index 7e5fe27..9656513 100644
6654--- a/arch/mips/mm/mmap.c
6655+++ b/arch/mips/mm/mmap.c
6656@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6657 struct vm_area_struct *vma;
6658 unsigned long addr = addr0;
6659 int do_color_align;
6660+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6661 struct vm_unmapped_area_info info;
6662
6663 if (unlikely(len > TASK_SIZE))
6664@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6665 do_color_align = 1;
6666
6667 /* requesting a specific address */
6668+
6669+#ifdef CONFIG_PAX_RANDMMAP
6670+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6671+#endif
6672+
6673 if (addr) {
6674 if (do_color_align)
6675 addr = COLOUR_ALIGN(addr, pgoff);
6676@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6677 addr = PAGE_ALIGN(addr);
6678
6679 vma = find_vma(mm, addr);
6680- if (TASK_SIZE - len >= addr &&
6681- (!vma || addr + len <= vma->vm_start))
6682+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
6683 return addr;
6684 }
6685
6686 info.length = len;
6687 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6688 info.align_offset = pgoff << PAGE_SHIFT;
6689+ info.threadstack_offset = offset;
6690
6691 if (dir == DOWN) {
6692 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6693@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6694 {
6695 unsigned long random_factor = 0UL;
6696
6697+#ifdef CONFIG_PAX_RANDMMAP
6698+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6699+#endif
6700+
6701 if (current->flags & PF_RANDOMIZE) {
6702 random_factor = get_random_int();
6703 random_factor = random_factor << PAGE_SHIFT;
6704@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6705
6706 if (mmap_is_legacy()) {
6707 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6708+
6709+#ifdef CONFIG_PAX_RANDMMAP
6710+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6711+ mm->mmap_base += mm->delta_mmap;
6712+#endif
6713+
6714 mm->get_unmapped_area = arch_get_unmapped_area;
6715 mm->unmap_area = arch_unmap_area;
6716 } else {
6717 mm->mmap_base = mmap_base(random_factor);
6718+
6719+#ifdef CONFIG_PAX_RANDMMAP
6720+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6721+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6722+#endif
6723+
6724 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6725 mm->unmap_area = arch_unmap_area_topdown;
6726 }
6727 }
6728
6729-static inline unsigned long brk_rnd(void)
6730-{
6731- unsigned long rnd = get_random_int();
6732-
6733- rnd = rnd << PAGE_SHIFT;
6734- /* 8MB for 32bit, 256MB for 64bit */
6735- if (TASK_IS_32BIT_ADDR)
6736- rnd = rnd & 0x7ffffful;
6737- else
6738- rnd = rnd & 0xffffffful;
6739-
6740- return rnd;
6741-}
6742-
6743-unsigned long arch_randomize_brk(struct mm_struct *mm)
6744-{
6745- unsigned long base = mm->brk;
6746- unsigned long ret;
6747-
6748- ret = PAGE_ALIGN(base + brk_rnd());
6749-
6750- if (ret < mm->brk)
6751- return mm->brk;
6752-
6753- return ret;
6754-}
6755-
6756 int __virt_addr_valid(const volatile void *kaddr)
6757 {
6758 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
6759diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6760index 967d144..db12197 100644
6761--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
6762+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6763@@ -11,12 +11,14 @@
6764 #ifndef _ASM_PROC_CACHE_H
6765 #define _ASM_PROC_CACHE_H
6766
6767+#include <linux/const.h>
6768+
6769 /* L1 cache */
6770
6771 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6772 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
6773-#define L1_CACHE_BYTES 16 /* bytes per entry */
6774 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
6775+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6776 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
6777
6778 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6779diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6780index bcb5df2..84fabd2 100644
6781--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6782+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6783@@ -16,13 +16,15 @@
6784 #ifndef _ASM_PROC_CACHE_H
6785 #define _ASM_PROC_CACHE_H
6786
6787+#include <linux/const.h>
6788+
6789 /*
6790 * L1 cache
6791 */
6792 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6793 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
6794-#define L1_CACHE_BYTES 32 /* bytes per entry */
6795 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
6796+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6797 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
6798
6799 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6800diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
6801index 4ce7a01..449202a 100644
6802--- a/arch/openrisc/include/asm/cache.h
6803+++ b/arch/openrisc/include/asm/cache.h
6804@@ -19,11 +19,13 @@
6805 #ifndef __ASM_OPENRISC_CACHE_H
6806 #define __ASM_OPENRISC_CACHE_H
6807
6808+#include <linux/const.h>
6809+
6810 /* FIXME: How can we replace these with values from the CPU...
6811 * they shouldn't be hard-coded!
6812 */
6813
6814-#define L1_CACHE_BYTES 16
6815 #define L1_CACHE_SHIFT 4
6816+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6817
6818 #endif /* __ASM_OPENRISC_CACHE_H */
6819diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
6820index 472886c..00e7df9 100644
6821--- a/arch/parisc/include/asm/atomic.h
6822+++ b/arch/parisc/include/asm/atomic.h
6823@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
6824 return dec;
6825 }
6826
6827+#define atomic64_read_unchecked(v) atomic64_read(v)
6828+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6829+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6830+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6831+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6832+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6833+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6834+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6835+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6836+
6837 #endif /* !CONFIG_64BIT */
6838
6839
6840diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
6841index 47f11c7..3420df2 100644
6842--- a/arch/parisc/include/asm/cache.h
6843+++ b/arch/parisc/include/asm/cache.h
6844@@ -5,6 +5,7 @@
6845 #ifndef __ARCH_PARISC_CACHE_H
6846 #define __ARCH_PARISC_CACHE_H
6847
6848+#include <linux/const.h>
6849
6850 /*
6851 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
6852@@ -15,13 +16,13 @@
6853 * just ruin performance.
6854 */
6855 #ifdef CONFIG_PA20
6856-#define L1_CACHE_BYTES 64
6857 #define L1_CACHE_SHIFT 6
6858 #else
6859-#define L1_CACHE_BYTES 32
6860 #define L1_CACHE_SHIFT 5
6861 #endif
6862
6863+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6864+
6865 #ifndef __ASSEMBLY__
6866
6867 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6868diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
6869index ad2b503..bdf1651 100644
6870--- a/arch/parisc/include/asm/elf.h
6871+++ b/arch/parisc/include/asm/elf.h
6872@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
6873
6874 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
6875
6876+#ifdef CONFIG_PAX_ASLR
6877+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6878+
6879+#define PAX_DELTA_MMAP_LEN 16
6880+#define PAX_DELTA_STACK_LEN 16
6881+#endif
6882+
6883 /* This yields a mask that user programs can use to figure out what
6884 instruction set this CPU supports. This could be done in user space,
6885 but it's not easy, and we've already done it here. */
6886diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
6887index fc987a1..6e068ef 100644
6888--- a/arch/parisc/include/asm/pgalloc.h
6889+++ b/arch/parisc/include/asm/pgalloc.h
6890@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6891 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
6892 }
6893
6894+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6895+{
6896+ pgd_populate(mm, pgd, pmd);
6897+}
6898+
6899 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6900 {
6901 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
6902@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
6903 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
6904 #define pmd_free(mm, x) do { } while (0)
6905 #define pgd_populate(mm, pmd, pte) BUG()
6906+#define pgd_populate_kernel(mm, pmd, pte) BUG()
6907
6908 #endif
6909
6910diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
6911index 1e40d7f..a3eb445 100644
6912--- a/arch/parisc/include/asm/pgtable.h
6913+++ b/arch/parisc/include/asm/pgtable.h
6914@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6915 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6916 #define PAGE_COPY PAGE_EXECREAD
6917 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6918+
6919+#ifdef CONFIG_PAX_PAGEEXEC
6920+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6921+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6922+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6923+#else
6924+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6925+# define PAGE_COPY_NOEXEC PAGE_COPY
6926+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6927+#endif
6928+
6929 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6930 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6931 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6932diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6933index e0a8235..ce2f1e1 100644
6934--- a/arch/parisc/include/asm/uaccess.h
6935+++ b/arch/parisc/include/asm/uaccess.h
6936@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6937 const void __user *from,
6938 unsigned long n)
6939 {
6940- int sz = __compiletime_object_size(to);
6941+ size_t sz = __compiletime_object_size(to);
6942 int ret = -EFAULT;
6943
6944- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6945+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6946 ret = __copy_from_user(to, from, n);
6947 else
6948 copy_from_user_overflow();
6949diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6950index 2a625fb..9908930 100644
6951--- a/arch/parisc/kernel/module.c
6952+++ b/arch/parisc/kernel/module.c
6953@@ -98,16 +98,38 @@
6954
6955 /* three functions to determine where in the module core
6956 * or init pieces the location is */
6957+static inline int in_init_rx(struct module *me, void *loc)
6958+{
6959+ return (loc >= me->module_init_rx &&
6960+ loc < (me->module_init_rx + me->init_size_rx));
6961+}
6962+
6963+static inline int in_init_rw(struct module *me, void *loc)
6964+{
6965+ return (loc >= me->module_init_rw &&
6966+ loc < (me->module_init_rw + me->init_size_rw));
6967+}
6968+
6969 static inline int in_init(struct module *me, void *loc)
6970 {
6971- return (loc >= me->module_init &&
6972- loc <= (me->module_init + me->init_size));
6973+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6974+}
6975+
6976+static inline int in_core_rx(struct module *me, void *loc)
6977+{
6978+ return (loc >= me->module_core_rx &&
6979+ loc < (me->module_core_rx + me->core_size_rx));
6980+}
6981+
6982+static inline int in_core_rw(struct module *me, void *loc)
6983+{
6984+ return (loc >= me->module_core_rw &&
6985+ loc < (me->module_core_rw + me->core_size_rw));
6986 }
6987
6988 static inline int in_core(struct module *me, void *loc)
6989 {
6990- return (loc >= me->module_core &&
6991- loc <= (me->module_core + me->core_size));
6992+ return in_core_rx(me, loc) || in_core_rw(me, loc);
6993 }
6994
6995 static inline int in_local(struct module *me, void *loc)
6996@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
6997 }
6998
6999 /* align things a bit */
7000- me->core_size = ALIGN(me->core_size, 16);
7001- me->arch.got_offset = me->core_size;
7002- me->core_size += gots * sizeof(struct got_entry);
7003+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7004+ me->arch.got_offset = me->core_size_rw;
7005+ me->core_size_rw += gots * sizeof(struct got_entry);
7006
7007- me->core_size = ALIGN(me->core_size, 16);
7008- me->arch.fdesc_offset = me->core_size;
7009- me->core_size += fdescs * sizeof(Elf_Fdesc);
7010+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7011+ me->arch.fdesc_offset = me->core_size_rw;
7012+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7013
7014 me->arch.got_max = gots;
7015 me->arch.fdesc_max = fdescs;
7016@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7017
7018 BUG_ON(value == 0);
7019
7020- got = me->module_core + me->arch.got_offset;
7021+ got = me->module_core_rw + me->arch.got_offset;
7022 for (i = 0; got[i].addr; i++)
7023 if (got[i].addr == value)
7024 goto out;
7025@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7026 #ifdef CONFIG_64BIT
7027 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7028 {
7029- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7030+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7031
7032 if (!value) {
7033 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7034@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7035
7036 /* Create new one */
7037 fdesc->addr = value;
7038- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7039+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7040 return (Elf_Addr)fdesc;
7041 }
7042 #endif /* CONFIG_64BIT */
7043@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7044
7045 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7046 end = table + sechdrs[me->arch.unwind_section].sh_size;
7047- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7048+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7049
7050 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7051 me->arch.unwind_section, table, end, gp);
7052diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7053index 5dfd248..64914ac 100644
7054--- a/arch/parisc/kernel/sys_parisc.c
7055+++ b/arch/parisc/kernel/sys_parisc.c
7056@@ -33,9 +33,11 @@
7057 #include <linux/utsname.h>
7058 #include <linux/personality.h>
7059
7060-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7061+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
7062+ unsigned long flags)
7063 {
7064 struct vm_unmapped_area_info info;
7065+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7066
7067 info.flags = 0;
7068 info.length = len;
7069@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7070 info.high_limit = TASK_SIZE;
7071 info.align_mask = 0;
7072 info.align_offset = 0;
7073+ info.threadstack_offset = offset;
7074 return vm_unmapped_area(&info);
7075 }
7076
7077@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
7078 return (unsigned long) mapping >> 8;
7079 }
7080
7081-static unsigned long get_shared_area(struct address_space *mapping,
7082- unsigned long addr, unsigned long len, unsigned long pgoff)
7083+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
7084+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
7085 {
7086 struct vm_unmapped_area_info info;
7087+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7088
7089 info.flags = 0;
7090 info.length = len;
7091@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
7092 info.high_limit = TASK_SIZE;
7093 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7094 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
7095+ info.threadstack_offset = offset;
7096 return vm_unmapped_area(&info);
7097 }
7098
7099@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7100 return -EINVAL;
7101 return addr;
7102 }
7103- if (!addr)
7104+ if (!addr) {
7105 addr = TASK_UNMAPPED_BASE;
7106
7107+#ifdef CONFIG_PAX_RANDMMAP
7108+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7109+ addr += current->mm->delta_mmap;
7110+#endif
7111+
7112+ }
7113+
7114 if (filp) {
7115- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
7116+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
7117 } else if(flags & MAP_SHARED) {
7118- addr = get_shared_area(NULL, addr, len, pgoff);
7119+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
7120 } else {
7121- addr = get_unshared_area(addr, len);
7122+ addr = get_unshared_area(filp, addr, len, flags);
7123 }
7124 return addr;
7125 }
7126diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7127index 04e47c6..7a8faf6 100644
7128--- a/arch/parisc/kernel/traps.c
7129+++ b/arch/parisc/kernel/traps.c
7130@@ -727,9 +727,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7131
7132 down_read(&current->mm->mmap_sem);
7133 vma = find_vma(current->mm,regs->iaoq[0]);
7134- if (vma && (regs->iaoq[0] >= vma->vm_start)
7135- && (vma->vm_flags & VM_EXEC)) {
7136-
7137+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7138 fault_address = regs->iaoq[0];
7139 fault_space = regs->iasq[0];
7140
7141diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7142index f247a34..dc0f219 100644
7143--- a/arch/parisc/mm/fault.c
7144+++ b/arch/parisc/mm/fault.c
7145@@ -15,6 +15,7 @@
7146 #include <linux/sched.h>
7147 #include <linux/interrupt.h>
7148 #include <linux/module.h>
7149+#include <linux/unistd.h>
7150
7151 #include <asm/uaccess.h>
7152 #include <asm/traps.h>
7153@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7154 static unsigned long
7155 parisc_acctyp(unsigned long code, unsigned int inst)
7156 {
7157- if (code == 6 || code == 16)
7158+ if (code == 6 || code == 7 || code == 16)
7159 return VM_EXEC;
7160
7161 switch (inst & 0xf0000000) {
7162@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7163 }
7164 #endif
7165
7166+#ifdef CONFIG_PAX_PAGEEXEC
7167+/*
7168+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7169+ *
7170+ * returns 1 when task should be killed
7171+ * 2 when rt_sigreturn trampoline was detected
7172+ * 3 when unpatched PLT trampoline was detected
7173+ */
7174+static int pax_handle_fetch_fault(struct pt_regs *regs)
7175+{
7176+
7177+#ifdef CONFIG_PAX_EMUPLT
7178+ int err;
7179+
7180+ do { /* PaX: unpatched PLT emulation */
7181+ unsigned int bl, depwi;
7182+
7183+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7184+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7185+
7186+ if (err)
7187+ break;
7188+
7189+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7190+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7191+
7192+ err = get_user(ldw, (unsigned int *)addr);
7193+ err |= get_user(bv, (unsigned int *)(addr+4));
7194+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7195+
7196+ if (err)
7197+ break;
7198+
7199+ if (ldw == 0x0E801096U &&
7200+ bv == 0xEAC0C000U &&
7201+ ldw2 == 0x0E881095U)
7202+ {
7203+ unsigned int resolver, map;
7204+
7205+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7206+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7207+ if (err)
7208+ break;
7209+
7210+ regs->gr[20] = instruction_pointer(regs)+8;
7211+ regs->gr[21] = map;
7212+ regs->gr[22] = resolver;
7213+ regs->iaoq[0] = resolver | 3UL;
7214+ regs->iaoq[1] = regs->iaoq[0] + 4;
7215+ return 3;
7216+ }
7217+ }
7218+ } while (0);
7219+#endif
7220+
7221+#ifdef CONFIG_PAX_EMUTRAMP
7222+
7223+#ifndef CONFIG_PAX_EMUSIGRT
7224+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7225+ return 1;
7226+#endif
7227+
7228+ do { /* PaX: rt_sigreturn emulation */
7229+ unsigned int ldi1, ldi2, bel, nop;
7230+
7231+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7232+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7233+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7234+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7235+
7236+ if (err)
7237+ break;
7238+
7239+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7240+ ldi2 == 0x3414015AU &&
7241+ bel == 0xE4008200U &&
7242+ nop == 0x08000240U)
7243+ {
7244+ regs->gr[25] = (ldi1 & 2) >> 1;
7245+ regs->gr[20] = __NR_rt_sigreturn;
7246+ regs->gr[31] = regs->iaoq[1] + 16;
7247+ regs->sr[0] = regs->iasq[1];
7248+ regs->iaoq[0] = 0x100UL;
7249+ regs->iaoq[1] = regs->iaoq[0] + 4;
7250+ regs->iasq[0] = regs->sr[2];
7251+ regs->iasq[1] = regs->sr[2];
7252+ return 2;
7253+ }
7254+ } while (0);
7255+#endif
7256+
7257+ return 1;
7258+}
7259+
7260+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7261+{
7262+ unsigned long i;
7263+
7264+ printk(KERN_ERR "PAX: bytes at PC: ");
7265+ for (i = 0; i < 5; i++) {
7266+ unsigned int c;
7267+ if (get_user(c, (unsigned int *)pc+i))
7268+ printk(KERN_CONT "???????? ");
7269+ else
7270+ printk(KERN_CONT "%08x ", c);
7271+ }
7272+ printk("\n");
7273+}
7274+#endif
7275+
7276 int fixup_exception(struct pt_regs *regs)
7277 {
7278 const struct exception_table_entry *fix;
7279@@ -194,8 +305,33 @@ good_area:
7280
7281 acc_type = parisc_acctyp(code,regs->iir);
7282
7283- if ((vma->vm_flags & acc_type) != acc_type)
7284+ if ((vma->vm_flags & acc_type) != acc_type) {
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7288+ (address & ~3UL) == instruction_pointer(regs))
7289+ {
7290+ up_read(&mm->mmap_sem);
7291+ switch (pax_handle_fetch_fault(regs)) {
7292+
7293+#ifdef CONFIG_PAX_EMUPLT
7294+ case 3:
7295+ return;
7296+#endif
7297+
7298+#ifdef CONFIG_PAX_EMUTRAMP
7299+ case 2:
7300+ return;
7301+#endif
7302+
7303+ }
7304+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7305+ do_group_exit(SIGKILL);
7306+ }
7307+#endif
7308+
7309 goto bad_area;
7310+ }
7311
7312 /*
7313 * If for any reason at all we couldn't handle the fault, make
7314diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7315index e3b1d41..8e81edf 100644
7316--- a/arch/powerpc/include/asm/atomic.h
7317+++ b/arch/powerpc/include/asm/atomic.h
7318@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7319 return t1;
7320 }
7321
7322+#define atomic64_read_unchecked(v) atomic64_read(v)
7323+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7324+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7325+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7326+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7327+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7328+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7329+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7330+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7331+
7332 #endif /* __powerpc64__ */
7333
7334 #endif /* __KERNEL__ */
7335diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7336index 9e495c9..b6878e5 100644
7337--- a/arch/powerpc/include/asm/cache.h
7338+++ b/arch/powerpc/include/asm/cache.h
7339@@ -3,6 +3,7 @@
7340
7341 #ifdef __KERNEL__
7342
7343+#include <linux/const.h>
7344
7345 /* bytes per L1 cache line */
7346 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7347@@ -22,7 +23,7 @@
7348 #define L1_CACHE_SHIFT 7
7349 #endif
7350
7351-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7352+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7353
7354 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7355
7356diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7357index cc0655a..13eac2e 100644
7358--- a/arch/powerpc/include/asm/elf.h
7359+++ b/arch/powerpc/include/asm/elf.h
7360@@ -28,8 +28,19 @@
7361 the loader. We need to make sure that it is out of the way of the program
7362 that it will "exec", and that there is sufficient room for the brk. */
7363
7364-extern unsigned long randomize_et_dyn(unsigned long base);
7365-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7366+#define ELF_ET_DYN_BASE (0x20000000)
7367+
7368+#ifdef CONFIG_PAX_ASLR
7369+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7370+
7371+#ifdef __powerpc64__
7372+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7373+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7374+#else
7375+#define PAX_DELTA_MMAP_LEN 15
7376+#define PAX_DELTA_STACK_LEN 15
7377+#endif
7378+#endif
7379
7380 /*
7381 * Our registers are always unsigned longs, whether we're a 32 bit
7382@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7383 (0x7ff >> (PAGE_SHIFT - 12)) : \
7384 (0x3ffff >> (PAGE_SHIFT - 12)))
7385
7386-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7387-#define arch_randomize_brk arch_randomize_brk
7388-
7389-
7390 #ifdef CONFIG_SPU_BASE
7391 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7392 #define NT_SPU 1
7393diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7394index 8196e9c..d83a9f3 100644
7395--- a/arch/powerpc/include/asm/exec.h
7396+++ b/arch/powerpc/include/asm/exec.h
7397@@ -4,6 +4,6 @@
7398 #ifndef _ASM_POWERPC_EXEC_H
7399 #define _ASM_POWERPC_EXEC_H
7400
7401-extern unsigned long arch_align_stack(unsigned long sp);
7402+#define arch_align_stack(x) ((x) & ~0xfUL)
7403
7404 #endif /* _ASM_POWERPC_EXEC_H */
7405diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
7406index 5acabbd..7ea14fa 100644
7407--- a/arch/powerpc/include/asm/kmap_types.h
7408+++ b/arch/powerpc/include/asm/kmap_types.h
7409@@ -10,7 +10,7 @@
7410 * 2 of the License, or (at your option) any later version.
7411 */
7412
7413-#define KM_TYPE_NR 16
7414+#define KM_TYPE_NR 17
7415
7416 #endif /* __KERNEL__ */
7417 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
7418diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
7419index 8565c25..2865190 100644
7420--- a/arch/powerpc/include/asm/mman.h
7421+++ b/arch/powerpc/include/asm/mman.h
7422@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
7423 }
7424 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
7425
7426-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
7427+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
7428 {
7429 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
7430 }
7431diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
7432index 988c812..63c7d70 100644
7433--- a/arch/powerpc/include/asm/page.h
7434+++ b/arch/powerpc/include/asm/page.h
7435@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
7436 * and needs to be executable. This means the whole heap ends
7437 * up being executable.
7438 */
7439-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7440- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7441+#define VM_DATA_DEFAULT_FLAGS32 \
7442+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7443+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7444
7445 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7446 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7447@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
7448 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
7449 #endif
7450
7451+#define ktla_ktva(addr) (addr)
7452+#define ktva_ktla(addr) (addr)
7453+
7454 #ifndef CONFIG_PPC_BOOK3S_64
7455 /*
7456 * Use the top bit of the higher-level page table entries to indicate whether
7457diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
7458index 88693ce..ac6f9ab 100644
7459--- a/arch/powerpc/include/asm/page_64.h
7460+++ b/arch/powerpc/include/asm/page_64.h
7461@@ -153,15 +153,18 @@ do { \
7462 * stack by default, so in the absence of a PT_GNU_STACK program header
7463 * we turn execute permission off.
7464 */
7465-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7466- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7467+#define VM_STACK_DEFAULT_FLAGS32 \
7468+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7469+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7470
7471 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7472 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7473
7474+#ifndef CONFIG_PAX_PAGEEXEC
7475 #define VM_STACK_DEFAULT_FLAGS \
7476 (is_32bit_task() ? \
7477 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
7478+#endif
7479
7480 #include <asm-generic/getorder.h>
7481
7482diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
7483index b66ae72..4a378cd 100644
7484--- a/arch/powerpc/include/asm/pgalloc-64.h
7485+++ b/arch/powerpc/include/asm/pgalloc-64.h
7486@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7487 #ifndef CONFIG_PPC_64K_PAGES
7488
7489 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
7490+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
7491
7492 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
7493 {
7494@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7495 pud_set(pud, (unsigned long)pmd);
7496 }
7497
7498+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7499+{
7500+ pud_populate(mm, pud, pmd);
7501+}
7502+
7503 #define pmd_populate(mm, pmd, pte_page) \
7504 pmd_populate_kernel(mm, pmd, page_address(pte_page))
7505 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
7506@@ -171,6 +177,7 @@ extern void __tlb_remove_table(void *_table);
7507 #endif
7508
7509 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
7510+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7511
7512 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7513 pte_t *pte)
7514diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
7515index 7aeb955..19f748e 100644
7516--- a/arch/powerpc/include/asm/pgtable.h
7517+++ b/arch/powerpc/include/asm/pgtable.h
7518@@ -2,6 +2,7 @@
7519 #define _ASM_POWERPC_PGTABLE_H
7520 #ifdef __KERNEL__
7521
7522+#include <linux/const.h>
7523 #ifndef __ASSEMBLY__
7524 #include <asm/processor.h> /* For TASK_SIZE */
7525 #include <asm/mmu.h>
7526diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
7527index 4aad413..85d86bf 100644
7528--- a/arch/powerpc/include/asm/pte-hash32.h
7529+++ b/arch/powerpc/include/asm/pte-hash32.h
7530@@ -21,6 +21,7 @@
7531 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
7532 #define _PAGE_USER 0x004 /* usermode access allowed */
7533 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
7534+#define _PAGE_EXEC _PAGE_GUARDED
7535 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
7536 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
7537 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
7538diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
7539index e1fb161..2290d1d 100644
7540--- a/arch/powerpc/include/asm/reg.h
7541+++ b/arch/powerpc/include/asm/reg.h
7542@@ -234,6 +234,7 @@
7543 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
7544 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
7545 #define DSISR_NOHPTE 0x40000000 /* no translation found */
7546+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
7547 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
7548 #define DSISR_ISSTORE 0x02000000 /* access was a store */
7549 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
7550diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
7551index 48cfc85..891382f 100644
7552--- a/arch/powerpc/include/asm/smp.h
7553+++ b/arch/powerpc/include/asm/smp.h
7554@@ -50,7 +50,7 @@ struct smp_ops_t {
7555 int (*cpu_disable)(void);
7556 void (*cpu_die)(unsigned int nr);
7557 int (*cpu_bootable)(unsigned int nr);
7558-};
7559+} __no_const;
7560
7561 extern void smp_send_debugger_break(void);
7562 extern void start_secondary_resume(void);
7563diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
7564index ba7b197..d292e26 100644
7565--- a/arch/powerpc/include/asm/thread_info.h
7566+++ b/arch/powerpc/include/asm/thread_info.h
7567@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
7568 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
7569 TIF_NEED_RESCHED */
7570 #define TIF_32BIT 4 /* 32 bit binary */
7571-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
7572 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
7573 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
7574 #define TIF_SINGLESTEP 8 /* singlestepping active */
7575@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
7576 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
7577 for stack store? */
7578 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
7579+#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
7580+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
7581+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
7582
7583 /* as above, but as bit values */
7584 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
7585@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
7586 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7587 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
7588 #define _TIF_NOHZ (1<<TIF_NOHZ)
7589+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7590 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
7591 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
7592- _TIF_NOHZ)
7593+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
7594
7595 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
7596 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
7597diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
7598index 4db4959..aba5c41 100644
7599--- a/arch/powerpc/include/asm/uaccess.h
7600+++ b/arch/powerpc/include/asm/uaccess.h
7601@@ -318,52 +318,6 @@ do { \
7602 extern unsigned long __copy_tofrom_user(void __user *to,
7603 const void __user *from, unsigned long size);
7604
7605-#ifndef __powerpc64__
7606-
7607-static inline unsigned long copy_from_user(void *to,
7608- const void __user *from, unsigned long n)
7609-{
7610- unsigned long over;
7611-
7612- if (access_ok(VERIFY_READ, from, n))
7613- return __copy_tofrom_user((__force void __user *)to, from, n);
7614- if ((unsigned long)from < TASK_SIZE) {
7615- over = (unsigned long)from + n - TASK_SIZE;
7616- return __copy_tofrom_user((__force void __user *)to, from,
7617- n - over) + over;
7618- }
7619- return n;
7620-}
7621-
7622-static inline unsigned long copy_to_user(void __user *to,
7623- const void *from, unsigned long n)
7624-{
7625- unsigned long over;
7626-
7627- if (access_ok(VERIFY_WRITE, to, n))
7628- return __copy_tofrom_user(to, (__force void __user *)from, n);
7629- if ((unsigned long)to < TASK_SIZE) {
7630- over = (unsigned long)to + n - TASK_SIZE;
7631- return __copy_tofrom_user(to, (__force void __user *)from,
7632- n - over) + over;
7633- }
7634- return n;
7635-}
7636-
7637-#else /* __powerpc64__ */
7638-
7639-#define __copy_in_user(to, from, size) \
7640- __copy_tofrom_user((to), (from), (size))
7641-
7642-extern unsigned long copy_from_user(void *to, const void __user *from,
7643- unsigned long n);
7644-extern unsigned long copy_to_user(void __user *to, const void *from,
7645- unsigned long n);
7646-extern unsigned long copy_in_user(void __user *to, const void __user *from,
7647- unsigned long n);
7648-
7649-#endif /* __powerpc64__ */
7650-
7651 static inline unsigned long __copy_from_user_inatomic(void *to,
7652 const void __user *from, unsigned long n)
7653 {
7654@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
7655 if (ret == 0)
7656 return 0;
7657 }
7658+
7659+ if (!__builtin_constant_p(n))
7660+ check_object_size(to, n, false);
7661+
7662 return __copy_tofrom_user((__force void __user *)to, from, n);
7663 }
7664
7665@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
7666 if (ret == 0)
7667 return 0;
7668 }
7669+
7670+ if (!__builtin_constant_p(n))
7671+ check_object_size(from, n, true);
7672+
7673 return __copy_tofrom_user(to, (__force const void __user *)from, n);
7674 }
7675
7676@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
7677 return __copy_to_user_inatomic(to, from, size);
7678 }
7679
7680+#ifndef __powerpc64__
7681+
7682+static inline unsigned long __must_check copy_from_user(void *to,
7683+ const void __user *from, unsigned long n)
7684+{
7685+ unsigned long over;
7686+
7687+ if ((long)n < 0)
7688+ return n;
7689+
7690+ if (access_ok(VERIFY_READ, from, n)) {
7691+ if (!__builtin_constant_p(n))
7692+ check_object_size(to, n, false);
7693+ return __copy_tofrom_user((__force void __user *)to, from, n);
7694+ }
7695+ if ((unsigned long)from < TASK_SIZE) {
7696+ over = (unsigned long)from + n - TASK_SIZE;
7697+ if (!__builtin_constant_p(n - over))
7698+ check_object_size(to, n - over, false);
7699+ return __copy_tofrom_user((__force void __user *)to, from,
7700+ n - over) + over;
7701+ }
7702+ return n;
7703+}
7704+
7705+static inline unsigned long __must_check copy_to_user(void __user *to,
7706+ const void *from, unsigned long n)
7707+{
7708+ unsigned long over;
7709+
7710+ if ((long)n < 0)
7711+ return n;
7712+
7713+ if (access_ok(VERIFY_WRITE, to, n)) {
7714+ if (!__builtin_constant_p(n))
7715+ check_object_size(from, n, true);
7716+ return __copy_tofrom_user(to, (__force void __user *)from, n);
7717+ }
7718+ if ((unsigned long)to < TASK_SIZE) {
7719+ over = (unsigned long)to + n - TASK_SIZE;
7720+ if (!__builtin_constant_p(n))
7721+ check_object_size(from, n - over, true);
7722+ return __copy_tofrom_user(to, (__force void __user *)from,
7723+ n - over) + over;
7724+ }
7725+ return n;
7726+}
7727+
7728+#else /* __powerpc64__ */
7729+
7730+#define __copy_in_user(to, from, size) \
7731+ __copy_tofrom_user((to), (from), (size))
7732+
7733+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
7734+{
7735+ if ((long)n < 0 || n > INT_MAX)
7736+ return n;
7737+
7738+ if (!__builtin_constant_p(n))
7739+ check_object_size(to, n, false);
7740+
7741+ if (likely(access_ok(VERIFY_READ, from, n)))
7742+ n = __copy_from_user(to, from, n);
7743+ else
7744+ memset(to, 0, n);
7745+ return n;
7746+}
7747+
7748+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
7749+{
7750+ if ((long)n < 0 || n > INT_MAX)
7751+ return n;
7752+
7753+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
7754+ if (!__builtin_constant_p(n))
7755+ check_object_size(from, n, true);
7756+ n = __copy_to_user(to, from, n);
7757+ }
7758+ return n;
7759+}
7760+
7761+extern unsigned long copy_in_user(void __user *to, const void __user *from,
7762+ unsigned long n);
7763+
7764+#endif /* __powerpc64__ */
7765+
7766 extern unsigned long __clear_user(void __user *addr, unsigned long size);
7767
7768 static inline unsigned long clear_user(void __user *addr, unsigned long size)
7769diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
7770index 645170a..6cf0271 100644
7771--- a/arch/powerpc/kernel/exceptions-64e.S
7772+++ b/arch/powerpc/kernel/exceptions-64e.S
7773@@ -757,6 +757,7 @@ storage_fault_common:
7774 std r14,_DAR(r1)
7775 std r15,_DSISR(r1)
7776 addi r3,r1,STACK_FRAME_OVERHEAD
7777+ bl .save_nvgprs
7778 mr r4,r14
7779 mr r5,r15
7780 ld r14,PACA_EXGEN+EX_R14(r13)
7781@@ -765,8 +766,7 @@ storage_fault_common:
7782 cmpdi r3,0
7783 bne- 1f
7784 b .ret_from_except_lite
7785-1: bl .save_nvgprs
7786- mr r5,r3
7787+1: mr r5,r3
7788 addi r3,r1,STACK_FRAME_OVERHEAD
7789 ld r4,_DAR(r1)
7790 bl .bad_page_fault
7791diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
7792index 902ca3c..e942155 100644
7793--- a/arch/powerpc/kernel/exceptions-64s.S
7794+++ b/arch/powerpc/kernel/exceptions-64s.S
7795@@ -1357,10 +1357,10 @@ handle_page_fault:
7796 11: ld r4,_DAR(r1)
7797 ld r5,_DSISR(r1)
7798 addi r3,r1,STACK_FRAME_OVERHEAD
7799+ bl .save_nvgprs
7800 bl .do_page_fault
7801 cmpdi r3,0
7802 beq+ 12f
7803- bl .save_nvgprs
7804 mr r5,r3
7805 addi r3,r1,STACK_FRAME_OVERHEAD
7806 lwz r4,_DAR(r1)
7807diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
7808index 2e3200c..72095ce 100644
7809--- a/arch/powerpc/kernel/module_32.c
7810+++ b/arch/powerpc/kernel/module_32.c
7811@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
7812 me->arch.core_plt_section = i;
7813 }
7814 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
7815- printk("Module doesn't contain .plt or .init.plt sections.\n");
7816+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
7817 return -ENOEXEC;
7818 }
7819
7820@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
7821
7822 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
7823 /* Init, or core PLT? */
7824- if (location >= mod->module_core
7825- && location < mod->module_core + mod->core_size)
7826+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
7827+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
7828 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
7829- else
7830+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
7831+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
7832 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
7833+ else {
7834+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
7835+ return ~0UL;
7836+ }
7837
7838 /* Find this entry, or if that fails, the next avail. entry */
7839 while (entry->jump[0]) {
7840diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
7841index 7baa27b..f6b394a 100644
7842--- a/arch/powerpc/kernel/process.c
7843+++ b/arch/powerpc/kernel/process.c
7844@@ -884,8 +884,8 @@ void show_regs(struct pt_regs * regs)
7845 * Lookup NIP late so we have the best change of getting the
7846 * above info out without failing
7847 */
7848- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
7849- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
7850+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
7851+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
7852 #endif
7853 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
7854 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
7855@@ -1345,10 +1345,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7856 newsp = stack[0];
7857 ip = stack[STACK_FRAME_LR_SAVE];
7858 if (!firstframe || ip != lr) {
7859- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
7860+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
7861 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7862 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
7863- printk(" (%pS)",
7864+ printk(" (%pA)",
7865 (void *)current->ret_stack[curr_frame].ret);
7866 curr_frame--;
7867 }
7868@@ -1368,7 +1368,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7869 struct pt_regs *regs = (struct pt_regs *)
7870 (sp + STACK_FRAME_OVERHEAD);
7871 lr = regs->link;
7872- printk("--- Exception: %lx at %pS\n LR = %pS\n",
7873+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
7874 regs->trap, (void *)regs->nip, (void *)lr);
7875 firstframe = 1;
7876 }
7877@@ -1404,58 +1404,3 @@ void notrace __ppc64_runlatch_off(void)
7878 mtspr(SPRN_CTRLT, ctrl);
7879 }
7880 #endif /* CONFIG_PPC64 */
7881-
7882-unsigned long arch_align_stack(unsigned long sp)
7883-{
7884- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7885- sp -= get_random_int() & ~PAGE_MASK;
7886- return sp & ~0xf;
7887-}
7888-
7889-static inline unsigned long brk_rnd(void)
7890-{
7891- unsigned long rnd = 0;
7892-
7893- /* 8MB for 32bit, 1GB for 64bit */
7894- if (is_32bit_task())
7895- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
7896- else
7897- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7898-
7899- return rnd << PAGE_SHIFT;
7900-}
7901-
7902-unsigned long arch_randomize_brk(struct mm_struct *mm)
7903-{
7904- unsigned long base = mm->brk;
7905- unsigned long ret;
7906-
7907-#ifdef CONFIG_PPC_STD_MMU_64
7908- /*
7909- * If we are using 1TB segments and we are allowed to randomise
7910- * the heap, we can put it above 1TB so it is backed by a 1TB
7911- * segment. Otherwise the heap will be in the bottom 1TB
7912- * which always uses 256MB segments and this may result in a
7913- * performance penalty.
7914- */
7915- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7916- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7917-#endif
7918-
7919- ret = PAGE_ALIGN(base + brk_rnd());
7920-
7921- if (ret < mm->brk)
7922- return mm->brk;
7923-
7924- return ret;
7925-}
7926-
7927-unsigned long randomize_et_dyn(unsigned long base)
7928-{
7929- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7930-
7931- if (ret < base)
7932- return base;
7933-
7934- return ret;
7935-}
7936diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7937index 64f7bd5..8dd550f 100644
7938--- a/arch/powerpc/kernel/ptrace.c
7939+++ b/arch/powerpc/kernel/ptrace.c
7940@@ -1783,6 +1783,10 @@ long arch_ptrace(struct task_struct *child, long request,
7941 return ret;
7942 }
7943
7944+#ifdef CONFIG_GRKERNSEC_SETXID
7945+extern void gr_delayed_cred_worker(void);
7946+#endif
7947+
7948 /*
7949 * We must return the syscall number to actually look up in the table.
7950 * This can be -1L to skip running any syscall at all.
7951@@ -1795,6 +1799,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7952
7953 secure_computing_strict(regs->gpr[0]);
7954
7955+#ifdef CONFIG_GRKERNSEC_SETXID
7956+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7957+ gr_delayed_cred_worker();
7958+#endif
7959+
7960 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7961 tracehook_report_syscall_entry(regs))
7962 /*
7963@@ -1829,6 +1838,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7964 {
7965 int step;
7966
7967+#ifdef CONFIG_GRKERNSEC_SETXID
7968+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7969+ gr_delayed_cred_worker();
7970+#endif
7971+
7972 audit_syscall_exit(regs);
7973
7974 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7975diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7976index 0f83122..c0aca6a 100644
7977--- a/arch/powerpc/kernel/signal_32.c
7978+++ b/arch/powerpc/kernel/signal_32.c
7979@@ -987,7 +987,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7980 /* Save user registers on the stack */
7981 frame = &rt_sf->uc.uc_mcontext;
7982 addr = frame;
7983- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7984+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7985 sigret = 0;
7986 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7987 } else {
7988diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
7989index 887e99d..310bc11 100644
7990--- a/arch/powerpc/kernel/signal_64.c
7991+++ b/arch/powerpc/kernel/signal_64.c
7992@@ -751,7 +751,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
7993 #endif
7994
7995 /* Set up to return from userspace. */
7996- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
7997+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7998 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
7999 } else {
8000 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8001diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
8002index e68a845..8b140e6 100644
8003--- a/arch/powerpc/kernel/sysfs.c
8004+++ b/arch/powerpc/kernel/sysfs.c
8005@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8006 return NOTIFY_OK;
8007 }
8008
8009-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8010+static struct notifier_block sysfs_cpu_nb = {
8011 .notifier_call = sysfs_cpu_notify,
8012 };
8013
8014diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8015index 88929b1..bece8f8 100644
8016--- a/arch/powerpc/kernel/traps.c
8017+++ b/arch/powerpc/kernel/traps.c
8018@@ -141,6 +141,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8019 return flags;
8020 }
8021
8022+extern void gr_handle_kernel_exploit(void);
8023+
8024 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8025 int signr)
8026 {
8027@@ -190,6 +192,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8028 panic("Fatal exception in interrupt");
8029 if (panic_on_oops)
8030 panic("Fatal exception");
8031+
8032+ gr_handle_kernel_exploit();
8033+
8034 do_exit(signr);
8035 }
8036
8037diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8038index d4f463a..8fb7431 100644
8039--- a/arch/powerpc/kernel/vdso.c
8040+++ b/arch/powerpc/kernel/vdso.c
8041@@ -34,6 +34,7 @@
8042 #include <asm/firmware.h>
8043 #include <asm/vdso.h>
8044 #include <asm/vdso_datapage.h>
8045+#include <asm/mman.h>
8046
8047 #include "setup.h"
8048
8049@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8050 vdso_base = VDSO32_MBASE;
8051 #endif
8052
8053- current->mm->context.vdso_base = 0;
8054+ current->mm->context.vdso_base = ~0UL;
8055
8056 /* vDSO has a problem and was disabled, just don't "enable" it for the
8057 * process
8058@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8059 vdso_base = get_unmapped_area(NULL, vdso_base,
8060 (vdso_pages << PAGE_SHIFT) +
8061 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8062- 0, 0);
8063+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
8064 if (IS_ERR_VALUE(vdso_base)) {
8065 rc = vdso_base;
8066 goto fail_mmapsem;
8067diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8068index 5eea6f3..5d10396 100644
8069--- a/arch/powerpc/lib/usercopy_64.c
8070+++ b/arch/powerpc/lib/usercopy_64.c
8071@@ -9,22 +9,6 @@
8072 #include <linux/module.h>
8073 #include <asm/uaccess.h>
8074
8075-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8076-{
8077- if (likely(access_ok(VERIFY_READ, from, n)))
8078- n = __copy_from_user(to, from, n);
8079- else
8080- memset(to, 0, n);
8081- return n;
8082-}
8083-
8084-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8085-{
8086- if (likely(access_ok(VERIFY_WRITE, to, n)))
8087- n = __copy_to_user(to, from, n);
8088- return n;
8089-}
8090-
8091 unsigned long copy_in_user(void __user *to, const void __user *from,
8092 unsigned long n)
8093 {
8094@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8095 return n;
8096 }
8097
8098-EXPORT_SYMBOL(copy_from_user);
8099-EXPORT_SYMBOL(copy_to_user);
8100 EXPORT_SYMBOL(copy_in_user);
8101
8102diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8103index 8726779..a33c512 100644
8104--- a/arch/powerpc/mm/fault.c
8105+++ b/arch/powerpc/mm/fault.c
8106@@ -33,6 +33,10 @@
8107 #include <linux/magic.h>
8108 #include <linux/ratelimit.h>
8109 #include <linux/context_tracking.h>
8110+#include <linux/slab.h>
8111+#include <linux/pagemap.h>
8112+#include <linux/compiler.h>
8113+#include <linux/unistd.h>
8114
8115 #include <asm/firmware.h>
8116 #include <asm/page.h>
8117@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8118 }
8119 #endif
8120
8121+#ifdef CONFIG_PAX_PAGEEXEC
8122+/*
8123+ * PaX: decide what to do with offenders (regs->nip = fault address)
8124+ *
8125+ * returns 1 when task should be killed
8126+ */
8127+static int pax_handle_fetch_fault(struct pt_regs *regs)
8128+{
8129+ return 1;
8130+}
8131+
8132+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8133+{
8134+ unsigned long i;
8135+
8136+ printk(KERN_ERR "PAX: bytes at PC: ");
8137+ for (i = 0; i < 5; i++) {
8138+ unsigned int c;
8139+ if (get_user(c, (unsigned int __user *)pc+i))
8140+ printk(KERN_CONT "???????? ");
8141+ else
8142+ printk(KERN_CONT "%08x ", c);
8143+ }
8144+ printk("\n");
8145+}
8146+#endif
8147+
8148 /*
8149 * Check whether the instruction at regs->nip is a store using
8150 * an update addressing form which will update r1.
8151@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8152 * indicate errors in DSISR but can validly be set in SRR1.
8153 */
8154 if (trap == 0x400)
8155- error_code &= 0x48200000;
8156+ error_code &= 0x58200000;
8157 else
8158 is_write = error_code & DSISR_ISSTORE;
8159 #else
8160@@ -371,7 +402,7 @@ good_area:
8161 * "undefined". Of those that can be set, this is the only
8162 * one which seems bad.
8163 */
8164- if (error_code & 0x10000000)
8165+ if (error_code & DSISR_GUARDED)
8166 /* Guarded storage error. */
8167 goto bad_area;
8168 #endif /* CONFIG_8xx */
8169@@ -386,7 +417,7 @@ good_area:
8170 * processors use the same I/D cache coherency mechanism
8171 * as embedded.
8172 */
8173- if (error_code & DSISR_PROTFAULT)
8174+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8175 goto bad_area;
8176 #endif /* CONFIG_PPC_STD_MMU */
8177
8178@@ -471,6 +502,23 @@ bad_area:
8179 bad_area_nosemaphore:
8180 /* User mode accesses cause a SIGSEGV */
8181 if (user_mode(regs)) {
8182+
8183+#ifdef CONFIG_PAX_PAGEEXEC
8184+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8185+#ifdef CONFIG_PPC_STD_MMU
8186+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8187+#else
8188+ if (is_exec && regs->nip == address) {
8189+#endif
8190+ switch (pax_handle_fetch_fault(regs)) {
8191+ }
8192+
8193+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8194+ do_group_exit(SIGKILL);
8195+ }
8196+ }
8197+#endif
8198+
8199 _exception(SIGSEGV, regs, code, address);
8200 goto bail;
8201 }
8202diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
8203index 67a42ed..cd463e0 100644
8204--- a/arch/powerpc/mm/mmap_64.c
8205+++ b/arch/powerpc/mm/mmap_64.c
8206@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
8207 {
8208 unsigned long rnd = 0;
8209
8210+#ifdef CONFIG_PAX_RANDMMAP
8211+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8212+#endif
8213+
8214 if (current->flags & PF_RANDOMIZE) {
8215 /* 8MB for 32bit, 1GB for 64bit */
8216 if (is_32bit_task())
8217@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8218 */
8219 if (mmap_is_legacy()) {
8220 mm->mmap_base = TASK_UNMAPPED_BASE;
8221+
8222+#ifdef CONFIG_PAX_RANDMMAP
8223+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8224+ mm->mmap_base += mm->delta_mmap;
8225+#endif
8226+
8227 mm->get_unmapped_area = arch_get_unmapped_area;
8228 mm->unmap_area = arch_unmap_area;
8229 } else {
8230 mm->mmap_base = mmap_base();
8231+
8232+#ifdef CONFIG_PAX_RANDMMAP
8233+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8234+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8235+#endif
8236+
8237 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8238 mm->unmap_area = arch_unmap_area_topdown;
8239 }
8240diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
8241index e779642..e5bb889 100644
8242--- a/arch/powerpc/mm/mmu_context_nohash.c
8243+++ b/arch/powerpc/mm/mmu_context_nohash.c
8244@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
8245 return NOTIFY_OK;
8246 }
8247
8248-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
8249+static struct notifier_block mmu_context_cpu_nb = {
8250 .notifier_call = mmu_context_cpu_notify,
8251 };
8252
8253diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
8254index cafad40..9cbc0fc 100644
8255--- a/arch/powerpc/mm/numa.c
8256+++ b/arch/powerpc/mm/numa.c
8257@@ -920,7 +920,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
8258 return ret;
8259 }
8260
8261-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
8262+static struct notifier_block ppc64_numa_nb = {
8263 .notifier_call = cpu_numa_callback,
8264 .priority = 1 /* Must run before sched domains notifier. */
8265 };
8266diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8267index 3e99c14..f00953c 100644
8268--- a/arch/powerpc/mm/slice.c
8269+++ b/arch/powerpc/mm/slice.c
8270@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8271 if ((mm->task_size - len) < addr)
8272 return 0;
8273 vma = find_vma(mm, addr);
8274- return (!vma || (addr + len) <= vma->vm_start);
8275+ return check_heap_stack_gap(vma, addr, len, 0);
8276 }
8277
8278 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8279@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8280 info.align_offset = 0;
8281
8282 addr = TASK_UNMAPPED_BASE;
8283+
8284+#ifdef CONFIG_PAX_RANDMMAP
8285+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8286+ addr += mm->delta_mmap;
8287+#endif
8288+
8289 while (addr < TASK_SIZE) {
8290 info.low_limit = addr;
8291 if (!slice_scan_available(addr, available, 1, &addr))
8292@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8293 if (fixed && addr > (mm->task_size - len))
8294 return -EINVAL;
8295
8296+#ifdef CONFIG_PAX_RANDMMAP
8297+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8298+ addr = 0;
8299+#endif
8300+
8301 /* If hint, make sure it matches our alignment restrictions */
8302 if (!fixed && addr) {
8303 addr = _ALIGN_UP(addr, 1ul << pshift);
8304diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
8305index 9098692..3d54cd1 100644
8306--- a/arch/powerpc/platforms/cell/spufs/file.c
8307+++ b/arch/powerpc/platforms/cell/spufs/file.c
8308@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8309 return VM_FAULT_NOPAGE;
8310 }
8311
8312-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
8313+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
8314 unsigned long address,
8315- void *buf, int len, int write)
8316+ void *buf, size_t len, int write)
8317 {
8318 struct spu_context *ctx = vma->vm_file->private_data;
8319 unsigned long offset = address - vma->vm_start;
8320diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
8321index bdb738a..49c9f95 100644
8322--- a/arch/powerpc/platforms/powermac/smp.c
8323+++ b/arch/powerpc/platforms/powermac/smp.c
8324@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
8325 return NOTIFY_OK;
8326 }
8327
8328-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
8329+static struct notifier_block smp_core99_cpu_nb = {
8330 .notifier_call = smp_core99_cpu_notify,
8331 };
8332 #endif /* CONFIG_HOTPLUG_CPU */
8333diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
8334index c797832..ce575c8 100644
8335--- a/arch/s390/include/asm/atomic.h
8336+++ b/arch/s390/include/asm/atomic.h
8337@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
8338 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
8339 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8340
8341+#define atomic64_read_unchecked(v) atomic64_read(v)
8342+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8343+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8344+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8345+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8346+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8347+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8348+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8349+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8350+
8351 #define smp_mb__before_atomic_dec() smp_mb()
8352 #define smp_mb__after_atomic_dec() smp_mb()
8353 #define smp_mb__before_atomic_inc() smp_mb()
8354diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
8355index 4d7ccac..d03d0ad 100644
8356--- a/arch/s390/include/asm/cache.h
8357+++ b/arch/s390/include/asm/cache.h
8358@@ -9,8 +9,10 @@
8359 #ifndef __ARCH_S390_CACHE_H
8360 #define __ARCH_S390_CACHE_H
8361
8362-#define L1_CACHE_BYTES 256
8363+#include <linux/const.h>
8364+
8365 #define L1_CACHE_SHIFT 8
8366+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8367 #define NET_SKB_PAD 32
8368
8369 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8370diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
8371index 78f4f87..598ce39 100644
8372--- a/arch/s390/include/asm/elf.h
8373+++ b/arch/s390/include/asm/elf.h
8374@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
8375 the loader. We need to make sure that it is out of the way of the program
8376 that it will "exec", and that there is sufficient room for the brk. */
8377
8378-extern unsigned long randomize_et_dyn(unsigned long base);
8379-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
8380+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
8381+
8382+#ifdef CONFIG_PAX_ASLR
8383+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
8384+
8385+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8386+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8387+#endif
8388
8389 /* This yields a mask that user programs can use to figure out what
8390 instruction set this CPU supports. */
8391@@ -222,9 +228,6 @@ struct linux_binprm;
8392 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
8393 int arch_setup_additional_pages(struct linux_binprm *, int);
8394
8395-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8396-#define arch_randomize_brk arch_randomize_brk
8397-
8398 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
8399
8400 #endif
8401diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
8402index c4a93d6..4d2a9b4 100644
8403--- a/arch/s390/include/asm/exec.h
8404+++ b/arch/s390/include/asm/exec.h
8405@@ -7,6 +7,6 @@
8406 #ifndef __ASM_EXEC_H
8407 #define __ASM_EXEC_H
8408
8409-extern unsigned long arch_align_stack(unsigned long sp);
8410+#define arch_align_stack(x) ((x) & ~0xfUL)
8411
8412 #endif /* __ASM_EXEC_H */
8413diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
8414index 9c33ed4..e40cbef 100644
8415--- a/arch/s390/include/asm/uaccess.h
8416+++ b/arch/s390/include/asm/uaccess.h
8417@@ -252,6 +252,10 @@ static inline unsigned long __must_check
8418 copy_to_user(void __user *to, const void *from, unsigned long n)
8419 {
8420 might_fault();
8421+
8422+ if ((long)n < 0)
8423+ return n;
8424+
8425 return __copy_to_user(to, from, n);
8426 }
8427
8428@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
8429 static inline unsigned long __must_check
8430 __copy_from_user(void *to, const void __user *from, unsigned long n)
8431 {
8432+ if ((long)n < 0)
8433+ return n;
8434+
8435 if (__builtin_constant_p(n) && (n <= 256))
8436 return uaccess.copy_from_user_small(n, from, to);
8437 else
8438@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
8439 static inline unsigned long __must_check
8440 copy_from_user(void *to, const void __user *from, unsigned long n)
8441 {
8442- unsigned int sz = __compiletime_object_size(to);
8443+ size_t sz = __compiletime_object_size(to);
8444
8445 might_fault();
8446- if (unlikely(sz != -1 && sz < n)) {
8447+
8448+ if ((long)n < 0)
8449+ return n;
8450+
8451+ if (unlikely(sz != (size_t)-1 && sz < n)) {
8452 copy_from_user_overflow();
8453 return n;
8454 }
8455diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
8456index 7845e15..59c4353 100644
8457--- a/arch/s390/kernel/module.c
8458+++ b/arch/s390/kernel/module.c
8459@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
8460
8461 /* Increase core size by size of got & plt and set start
8462 offsets for got and plt. */
8463- me->core_size = ALIGN(me->core_size, 4);
8464- me->arch.got_offset = me->core_size;
8465- me->core_size += me->arch.got_size;
8466- me->arch.plt_offset = me->core_size;
8467- me->core_size += me->arch.plt_size;
8468+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
8469+ me->arch.got_offset = me->core_size_rw;
8470+ me->core_size_rw += me->arch.got_size;
8471+ me->arch.plt_offset = me->core_size_rx;
8472+ me->core_size_rx += me->arch.plt_size;
8473 return 0;
8474 }
8475
8476@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8477 if (info->got_initialized == 0) {
8478 Elf_Addr *gotent;
8479
8480- gotent = me->module_core + me->arch.got_offset +
8481+ gotent = me->module_core_rw + me->arch.got_offset +
8482 info->got_offset;
8483 *gotent = val;
8484 info->got_initialized = 1;
8485@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8486 rc = apply_rela_bits(loc, val, 0, 64, 0);
8487 else if (r_type == R_390_GOTENT ||
8488 r_type == R_390_GOTPLTENT) {
8489- val += (Elf_Addr) me->module_core - loc;
8490+ val += (Elf_Addr) me->module_core_rw - loc;
8491 rc = apply_rela_bits(loc, val, 1, 32, 1);
8492 }
8493 break;
8494@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8495 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
8496 if (info->plt_initialized == 0) {
8497 unsigned int *ip;
8498- ip = me->module_core + me->arch.plt_offset +
8499+ ip = me->module_core_rx + me->arch.plt_offset +
8500 info->plt_offset;
8501 #ifndef CONFIG_64BIT
8502 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
8503@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8504 val - loc + 0xffffUL < 0x1ffffeUL) ||
8505 (r_type == R_390_PLT32DBL &&
8506 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
8507- val = (Elf_Addr) me->module_core +
8508+ val = (Elf_Addr) me->module_core_rx +
8509 me->arch.plt_offset +
8510 info->plt_offset;
8511 val += rela->r_addend - loc;
8512@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8513 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
8514 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
8515 val = val + rela->r_addend -
8516- ((Elf_Addr) me->module_core + me->arch.got_offset);
8517+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
8518 if (r_type == R_390_GOTOFF16)
8519 rc = apply_rela_bits(loc, val, 0, 16, 0);
8520 else if (r_type == R_390_GOTOFF32)
8521@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8522 break;
8523 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
8524 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
8525- val = (Elf_Addr) me->module_core + me->arch.got_offset +
8526+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
8527 rela->r_addend - loc;
8528 if (r_type == R_390_GOTPC)
8529 rc = apply_rela_bits(loc, val, 1, 32, 0);
8530diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
8531index 2bc3edd..ab9d598 100644
8532--- a/arch/s390/kernel/process.c
8533+++ b/arch/s390/kernel/process.c
8534@@ -236,39 +236,3 @@ unsigned long get_wchan(struct task_struct *p)
8535 }
8536 return 0;
8537 }
8538-
8539-unsigned long arch_align_stack(unsigned long sp)
8540-{
8541- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8542- sp -= get_random_int() & ~PAGE_MASK;
8543- return sp & ~0xf;
8544-}
8545-
8546-static inline unsigned long brk_rnd(void)
8547-{
8548- /* 8MB for 32bit, 1GB for 64bit */
8549- if (is_32bit_task())
8550- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
8551- else
8552- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
8553-}
8554-
8555-unsigned long arch_randomize_brk(struct mm_struct *mm)
8556-{
8557- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
8558-
8559- if (ret < mm->brk)
8560- return mm->brk;
8561- return ret;
8562-}
8563-
8564-unsigned long randomize_et_dyn(unsigned long base)
8565-{
8566- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8567-
8568- if (!(current->flags & PF_RANDOMIZE))
8569- return base;
8570- if (ret < base)
8571- return base;
8572- return ret;
8573-}
8574diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
8575index 06bafec..2bca531 100644
8576--- a/arch/s390/mm/mmap.c
8577+++ b/arch/s390/mm/mmap.c
8578@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8579 */
8580 if (mmap_is_legacy()) {
8581 mm->mmap_base = TASK_UNMAPPED_BASE;
8582+
8583+#ifdef CONFIG_PAX_RANDMMAP
8584+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8585+ mm->mmap_base += mm->delta_mmap;
8586+#endif
8587+
8588 mm->get_unmapped_area = arch_get_unmapped_area;
8589 mm->unmap_area = arch_unmap_area;
8590 } else {
8591 mm->mmap_base = mmap_base();
8592+
8593+#ifdef CONFIG_PAX_RANDMMAP
8594+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8595+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8596+#endif
8597+
8598 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8599 mm->unmap_area = arch_unmap_area_topdown;
8600 }
8601@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8602 */
8603 if (mmap_is_legacy()) {
8604 mm->mmap_base = TASK_UNMAPPED_BASE;
8605+
8606+#ifdef CONFIG_PAX_RANDMMAP
8607+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8608+ mm->mmap_base += mm->delta_mmap;
8609+#endif
8610+
8611 mm->get_unmapped_area = s390_get_unmapped_area;
8612 mm->unmap_area = arch_unmap_area;
8613 } else {
8614 mm->mmap_base = mmap_base();
8615+
8616+#ifdef CONFIG_PAX_RANDMMAP
8617+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8618+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8619+#endif
8620+
8621 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
8622 mm->unmap_area = arch_unmap_area_topdown;
8623 }
8624diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
8625index ae3d59f..f65f075 100644
8626--- a/arch/score/include/asm/cache.h
8627+++ b/arch/score/include/asm/cache.h
8628@@ -1,7 +1,9 @@
8629 #ifndef _ASM_SCORE_CACHE_H
8630 #define _ASM_SCORE_CACHE_H
8631
8632+#include <linux/const.h>
8633+
8634 #define L1_CACHE_SHIFT 4
8635-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8636+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8637
8638 #endif /* _ASM_SCORE_CACHE_H */
8639diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
8640index f9f3cd5..58ff438 100644
8641--- a/arch/score/include/asm/exec.h
8642+++ b/arch/score/include/asm/exec.h
8643@@ -1,6 +1,6 @@
8644 #ifndef _ASM_SCORE_EXEC_H
8645 #define _ASM_SCORE_EXEC_H
8646
8647-extern unsigned long arch_align_stack(unsigned long sp);
8648+#define arch_align_stack(x) (x)
8649
8650 #endif /* _ASM_SCORE_EXEC_H */
8651diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
8652index f4c6d02..e9355c3 100644
8653--- a/arch/score/kernel/process.c
8654+++ b/arch/score/kernel/process.c
8655@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
8656
8657 return task_pt_regs(task)->cp0_epc;
8658 }
8659-
8660-unsigned long arch_align_stack(unsigned long sp)
8661-{
8662- return sp;
8663-}
8664diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
8665index ef9e555..331bd29 100644
8666--- a/arch/sh/include/asm/cache.h
8667+++ b/arch/sh/include/asm/cache.h
8668@@ -9,10 +9,11 @@
8669 #define __ASM_SH_CACHE_H
8670 #ifdef __KERNEL__
8671
8672+#include <linux/const.h>
8673 #include <linux/init.h>
8674 #include <cpu/cache.h>
8675
8676-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8677+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8678
8679 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8680
8681diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8682index 03f2b55..b0270327 100644
8683--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8684+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8685@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
8686 return NOTIFY_OK;
8687 }
8688
8689-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
8690+static struct notifier_block shx3_cpu_notifier = {
8691 .notifier_call = shx3_cpu_callback,
8692 };
8693
8694diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
8695index 6777177..cb5e44f 100644
8696--- a/arch/sh/mm/mmap.c
8697+++ b/arch/sh/mm/mmap.c
8698@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8699 struct mm_struct *mm = current->mm;
8700 struct vm_area_struct *vma;
8701 int do_colour_align;
8702+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8703 struct vm_unmapped_area_info info;
8704
8705 if (flags & MAP_FIXED) {
8706@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8707 if (filp || (flags & MAP_SHARED))
8708 do_colour_align = 1;
8709
8710+#ifdef CONFIG_PAX_RANDMMAP
8711+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8712+#endif
8713+
8714 if (addr) {
8715 if (do_colour_align)
8716 addr = COLOUR_ALIGN(addr, pgoff);
8717@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8718 addr = PAGE_ALIGN(addr);
8719
8720 vma = find_vma(mm, addr);
8721- if (TASK_SIZE - len >= addr &&
8722- (!vma || addr + len <= vma->vm_start))
8723+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8724 return addr;
8725 }
8726
8727 info.flags = 0;
8728 info.length = len;
8729- info.low_limit = TASK_UNMAPPED_BASE;
8730+ info.low_limit = mm->mmap_base;
8731 info.high_limit = TASK_SIZE;
8732 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8733 info.align_offset = pgoff << PAGE_SHIFT;
8734@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8735 struct mm_struct *mm = current->mm;
8736 unsigned long addr = addr0;
8737 int do_colour_align;
8738+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8739 struct vm_unmapped_area_info info;
8740
8741 if (flags & MAP_FIXED) {
8742@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8743 if (filp || (flags & MAP_SHARED))
8744 do_colour_align = 1;
8745
8746+#ifdef CONFIG_PAX_RANDMMAP
8747+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8748+#endif
8749+
8750 /* requesting a specific address */
8751 if (addr) {
8752 if (do_colour_align)
8753@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8754 addr = PAGE_ALIGN(addr);
8755
8756 vma = find_vma(mm, addr);
8757- if (TASK_SIZE - len >= addr &&
8758- (!vma || addr + len <= vma->vm_start))
8759+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8760 return addr;
8761 }
8762
8763@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8764 VM_BUG_ON(addr != -ENOMEM);
8765 info.flags = 0;
8766 info.low_limit = TASK_UNMAPPED_BASE;
8767+
8768+#ifdef CONFIG_PAX_RANDMMAP
8769+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8770+ info.low_limit += mm->delta_mmap;
8771+#endif
8772+
8773 info.high_limit = TASK_SIZE;
8774 addr = vm_unmapped_area(&info);
8775 }
8776diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8777index be56a24..443328f 100644
8778--- a/arch/sparc/include/asm/atomic_64.h
8779+++ b/arch/sparc/include/asm/atomic_64.h
8780@@ -14,18 +14,40 @@
8781 #define ATOMIC64_INIT(i) { (i) }
8782
8783 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8784+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8785+{
8786+ return v->counter;
8787+}
8788 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8789+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8790+{
8791+ return v->counter;
8792+}
8793
8794 #define atomic_set(v, i) (((v)->counter) = i)
8795+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8796+{
8797+ v->counter = i;
8798+}
8799 #define atomic64_set(v, i) (((v)->counter) = i)
8800+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8801+{
8802+ v->counter = i;
8803+}
8804
8805 extern void atomic_add(int, atomic_t *);
8806+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
8807 extern void atomic64_add(long, atomic64_t *);
8808+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
8809 extern void atomic_sub(int, atomic_t *);
8810+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
8811 extern void atomic64_sub(long, atomic64_t *);
8812+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
8813
8814 extern int atomic_add_ret(int, atomic_t *);
8815+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
8816 extern long atomic64_add_ret(long, atomic64_t *);
8817+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
8818 extern int atomic_sub_ret(int, atomic_t *);
8819 extern long atomic64_sub_ret(long, atomic64_t *);
8820
8821@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8822 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
8823
8824 #define atomic_inc_return(v) atomic_add_ret(1, v)
8825+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8826+{
8827+ return atomic_add_ret_unchecked(1, v);
8828+}
8829 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
8830+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8831+{
8832+ return atomic64_add_ret_unchecked(1, v);
8833+}
8834
8835 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
8836 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
8837
8838 #define atomic_add_return(i, v) atomic_add_ret(i, v)
8839+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8840+{
8841+ return atomic_add_ret_unchecked(i, v);
8842+}
8843 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
8844+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8845+{
8846+ return atomic64_add_ret_unchecked(i, v);
8847+}
8848
8849 /*
8850 * atomic_inc_and_test - increment and test
8851@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8852 * other cases.
8853 */
8854 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8855+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8856+{
8857+ return atomic_inc_return_unchecked(v) == 0;
8858+}
8859 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8860
8861 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
8862@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8863 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
8864
8865 #define atomic_inc(v) atomic_add(1, v)
8866+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8867+{
8868+ atomic_add_unchecked(1, v);
8869+}
8870 #define atomic64_inc(v) atomic64_add(1, v)
8871+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8872+{
8873+ atomic64_add_unchecked(1, v);
8874+}
8875
8876 #define atomic_dec(v) atomic_sub(1, v)
8877+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8878+{
8879+ atomic_sub_unchecked(1, v);
8880+}
8881 #define atomic64_dec(v) atomic64_sub(1, v)
8882+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8883+{
8884+ atomic64_sub_unchecked(1, v);
8885+}
8886
8887 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8888 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8889
8890 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8891+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8892+{
8893+ return cmpxchg(&v->counter, old, new);
8894+}
8895 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8896+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8897+{
8898+ return xchg(&v->counter, new);
8899+}
8900
8901 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8902 {
8903- int c, old;
8904+ int c, old, new;
8905 c = atomic_read(v);
8906 for (;;) {
8907- if (unlikely(c == (u)))
8908+ if (unlikely(c == u))
8909 break;
8910- old = atomic_cmpxchg((v), c, c + (a));
8911+
8912+ asm volatile("addcc %2, %0, %0\n"
8913+
8914+#ifdef CONFIG_PAX_REFCOUNT
8915+ "tvs %%icc, 6\n"
8916+#endif
8917+
8918+ : "=r" (new)
8919+ : "0" (c), "ir" (a)
8920+ : "cc");
8921+
8922+ old = atomic_cmpxchg(v, c, new);
8923 if (likely(old == c))
8924 break;
8925 c = old;
8926@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8927 #define atomic64_cmpxchg(v, o, n) \
8928 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8929 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8930+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8931+{
8932+ return xchg(&v->counter, new);
8933+}
8934
8935 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8936 {
8937- long c, old;
8938+ long c, old, new;
8939 c = atomic64_read(v);
8940 for (;;) {
8941- if (unlikely(c == (u)))
8942+ if (unlikely(c == u))
8943 break;
8944- old = atomic64_cmpxchg((v), c, c + (a));
8945+
8946+ asm volatile("addcc %2, %0, %0\n"
8947+
8948+#ifdef CONFIG_PAX_REFCOUNT
8949+ "tvs %%xcc, 6\n"
8950+#endif
8951+
8952+ : "=r" (new)
8953+ : "0" (c), "ir" (a)
8954+ : "cc");
8955+
8956+ old = atomic64_cmpxchg(v, c, new);
8957 if (likely(old == c))
8958 break;
8959 c = old;
8960 }
8961- return c != (u);
8962+ return c != u;
8963 }
8964
8965 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8966diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8967index 5bb6991..5c2132e 100644
8968--- a/arch/sparc/include/asm/cache.h
8969+++ b/arch/sparc/include/asm/cache.h
8970@@ -7,10 +7,12 @@
8971 #ifndef _SPARC_CACHE_H
8972 #define _SPARC_CACHE_H
8973
8974+#include <linux/const.h>
8975+
8976 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8977
8978 #define L1_CACHE_SHIFT 5
8979-#define L1_CACHE_BYTES 32
8980+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8981
8982 #ifdef CONFIG_SPARC32
8983 #define SMP_CACHE_BYTES_SHIFT 5
8984diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8985index a24e41f..47677ff 100644
8986--- a/arch/sparc/include/asm/elf_32.h
8987+++ b/arch/sparc/include/asm/elf_32.h
8988@@ -114,6 +114,13 @@ typedef struct {
8989
8990 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
8991
8992+#ifdef CONFIG_PAX_ASLR
8993+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8994+
8995+#define PAX_DELTA_MMAP_LEN 16
8996+#define PAX_DELTA_STACK_LEN 16
8997+#endif
8998+
8999 /* This yields a mask that user programs can use to figure out what
9000 instruction set this cpu supports. This can NOT be done in userspace
9001 on Sparc. */
9002diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9003index 370ca1e..d4f4a98 100644
9004--- a/arch/sparc/include/asm/elf_64.h
9005+++ b/arch/sparc/include/asm/elf_64.h
9006@@ -189,6 +189,13 @@ typedef struct {
9007 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9008 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9009
9010+#ifdef CONFIG_PAX_ASLR
9011+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9012+
9013+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9014+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9015+#endif
9016+
9017 extern unsigned long sparc64_elf_hwcap;
9018 #define ELF_HWCAP sparc64_elf_hwcap
9019
9020diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9021index 9b1c36d..209298b 100644
9022--- a/arch/sparc/include/asm/pgalloc_32.h
9023+++ b/arch/sparc/include/asm/pgalloc_32.h
9024@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9025 }
9026
9027 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9028+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9029
9030 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9031 unsigned long address)
9032diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9033index bcfe063..b333142 100644
9034--- a/arch/sparc/include/asm/pgalloc_64.h
9035+++ b/arch/sparc/include/asm/pgalloc_64.h
9036@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9037 }
9038
9039 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9040+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9041
9042 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9043 {
9044diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9045index 6fc1348..390c50a 100644
9046--- a/arch/sparc/include/asm/pgtable_32.h
9047+++ b/arch/sparc/include/asm/pgtable_32.h
9048@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9049 #define PAGE_SHARED SRMMU_PAGE_SHARED
9050 #define PAGE_COPY SRMMU_PAGE_COPY
9051 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9052+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9053+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9054+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9055 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9056
9057 /* Top-level page directory - dummy used by init-mm.
9058@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9059
9060 /* xwr */
9061 #define __P000 PAGE_NONE
9062-#define __P001 PAGE_READONLY
9063-#define __P010 PAGE_COPY
9064-#define __P011 PAGE_COPY
9065+#define __P001 PAGE_READONLY_NOEXEC
9066+#define __P010 PAGE_COPY_NOEXEC
9067+#define __P011 PAGE_COPY_NOEXEC
9068 #define __P100 PAGE_READONLY
9069 #define __P101 PAGE_READONLY
9070 #define __P110 PAGE_COPY
9071 #define __P111 PAGE_COPY
9072
9073 #define __S000 PAGE_NONE
9074-#define __S001 PAGE_READONLY
9075-#define __S010 PAGE_SHARED
9076-#define __S011 PAGE_SHARED
9077+#define __S001 PAGE_READONLY_NOEXEC
9078+#define __S010 PAGE_SHARED_NOEXEC
9079+#define __S011 PAGE_SHARED_NOEXEC
9080 #define __S100 PAGE_READONLY
9081 #define __S101 PAGE_READONLY
9082 #define __S110 PAGE_SHARED
9083diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9084index 79da178..c2eede8 100644
9085--- a/arch/sparc/include/asm/pgtsrmmu.h
9086+++ b/arch/sparc/include/asm/pgtsrmmu.h
9087@@ -115,6 +115,11 @@
9088 SRMMU_EXEC | SRMMU_REF)
9089 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9090 SRMMU_EXEC | SRMMU_REF)
9091+
9092+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9093+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9094+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9095+
9096 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9097 SRMMU_DIRTY | SRMMU_REF)
9098
9099diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9100index 9689176..63c18ea 100644
9101--- a/arch/sparc/include/asm/spinlock_64.h
9102+++ b/arch/sparc/include/asm/spinlock_64.h
9103@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9104
9105 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9106
9107-static void inline arch_read_lock(arch_rwlock_t *lock)
9108+static inline void arch_read_lock(arch_rwlock_t *lock)
9109 {
9110 unsigned long tmp1, tmp2;
9111
9112 __asm__ __volatile__ (
9113 "1: ldsw [%2], %0\n"
9114 " brlz,pn %0, 2f\n"
9115-"4: add %0, 1, %1\n"
9116+"4: addcc %0, 1, %1\n"
9117+
9118+#ifdef CONFIG_PAX_REFCOUNT
9119+" tvs %%icc, 6\n"
9120+#endif
9121+
9122 " cas [%2], %0, %1\n"
9123 " cmp %0, %1\n"
9124 " bne,pn %%icc, 1b\n"
9125@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9126 " .previous"
9127 : "=&r" (tmp1), "=&r" (tmp2)
9128 : "r" (lock)
9129- : "memory");
9130+ : "memory", "cc");
9131 }
9132
9133-static int inline arch_read_trylock(arch_rwlock_t *lock)
9134+static inline int arch_read_trylock(arch_rwlock_t *lock)
9135 {
9136 int tmp1, tmp2;
9137
9138@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9139 "1: ldsw [%2], %0\n"
9140 " brlz,a,pn %0, 2f\n"
9141 " mov 0, %0\n"
9142-" add %0, 1, %1\n"
9143+" addcc %0, 1, %1\n"
9144+
9145+#ifdef CONFIG_PAX_REFCOUNT
9146+" tvs %%icc, 6\n"
9147+#endif
9148+
9149 " cas [%2], %0, %1\n"
9150 " cmp %0, %1\n"
9151 " bne,pn %%icc, 1b\n"
9152@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9153 return tmp1;
9154 }
9155
9156-static void inline arch_read_unlock(arch_rwlock_t *lock)
9157+static inline void arch_read_unlock(arch_rwlock_t *lock)
9158 {
9159 unsigned long tmp1, tmp2;
9160
9161 __asm__ __volatile__(
9162 "1: lduw [%2], %0\n"
9163-" sub %0, 1, %1\n"
9164+" subcc %0, 1, %1\n"
9165+
9166+#ifdef CONFIG_PAX_REFCOUNT
9167+" tvs %%icc, 6\n"
9168+#endif
9169+
9170 " cas [%2], %0, %1\n"
9171 " cmp %0, %1\n"
9172 " bne,pn %%xcc, 1b\n"
9173@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9174 : "memory");
9175 }
9176
9177-static void inline arch_write_lock(arch_rwlock_t *lock)
9178+static inline void arch_write_lock(arch_rwlock_t *lock)
9179 {
9180 unsigned long mask, tmp1, tmp2;
9181
9182@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9183 : "memory");
9184 }
9185
9186-static void inline arch_write_unlock(arch_rwlock_t *lock)
9187+static inline void arch_write_unlock(arch_rwlock_t *lock)
9188 {
9189 __asm__ __volatile__(
9190 " stw %%g0, [%0]"
9191@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9192 : "memory");
9193 }
9194
9195-static int inline arch_write_trylock(arch_rwlock_t *lock)
9196+static inline int arch_write_trylock(arch_rwlock_t *lock)
9197 {
9198 unsigned long mask, tmp1, tmp2, result;
9199
9200diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9201index dd38075..e7cac83 100644
9202--- a/arch/sparc/include/asm/thread_info_32.h
9203+++ b/arch/sparc/include/asm/thread_info_32.h
9204@@ -49,6 +49,8 @@ struct thread_info {
9205 unsigned long w_saved;
9206
9207 struct restart_block restart_block;
9208+
9209+ unsigned long lowest_stack;
9210 };
9211
9212 /*
9213diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9214index d5e5042..9bfee76 100644
9215--- a/arch/sparc/include/asm/thread_info_64.h
9216+++ b/arch/sparc/include/asm/thread_info_64.h
9217@@ -63,6 +63,8 @@ struct thread_info {
9218 struct pt_regs *kern_una_regs;
9219 unsigned int kern_una_insn;
9220
9221+ unsigned long lowest_stack;
9222+
9223 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9224 };
9225
9226@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
9227 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9228 /* flag bit 6 is available */
9229 #define TIF_32BIT 7 /* 32-bit binary */
9230-/* flag bit 8 is available */
9231+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
9232 #define TIF_SECCOMP 9 /* secure computing */
9233 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9234 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9235+
9236 /* NOTE: Thread flags >= 12 should be ones we have no interest
9237 * in using in assembly, else we can't use the mask as
9238 * an immediate value in instructions such as andcc.
9239@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9240 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9241 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9242 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9243+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9244
9245 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9246 _TIF_DO_NOTIFY_RESUME_MASK | \
9247 _TIF_NEED_RESCHED)
9248 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9249
9250+#define _TIF_WORK_SYSCALL \
9251+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9252+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
9253+
9254+
9255 /*
9256 * Thread-synchronous status.
9257 *
9258diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9259index 0167d26..767bb0c 100644
9260--- a/arch/sparc/include/asm/uaccess.h
9261+++ b/arch/sparc/include/asm/uaccess.h
9262@@ -1,5 +1,6 @@
9263 #ifndef ___ASM_SPARC_UACCESS_H
9264 #define ___ASM_SPARC_UACCESS_H
9265+
9266 #if defined(__sparc__) && defined(__arch64__)
9267 #include <asm/uaccess_64.h>
9268 #else
9269diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9270index 53a28dd..50c38c3 100644
9271--- a/arch/sparc/include/asm/uaccess_32.h
9272+++ b/arch/sparc/include/asm/uaccess_32.h
9273@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9274
9275 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9276 {
9277- if (n && __access_ok((unsigned long) to, n))
9278+ if ((long)n < 0)
9279+ return n;
9280+
9281+ if (n && __access_ok((unsigned long) to, n)) {
9282+ if (!__builtin_constant_p(n))
9283+ check_object_size(from, n, true);
9284 return __copy_user(to, (__force void __user *) from, n);
9285- else
9286+ } else
9287 return n;
9288 }
9289
9290 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9291 {
9292+ if ((long)n < 0)
9293+ return n;
9294+
9295+ if (!__builtin_constant_p(n))
9296+ check_object_size(from, n, true);
9297+
9298 return __copy_user(to, (__force void __user *) from, n);
9299 }
9300
9301 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9302 {
9303- if (n && __access_ok((unsigned long) from, n))
9304+ if ((long)n < 0)
9305+ return n;
9306+
9307+ if (n && __access_ok((unsigned long) from, n)) {
9308+ if (!__builtin_constant_p(n))
9309+ check_object_size(to, n, false);
9310 return __copy_user((__force void __user *) to, from, n);
9311- else
9312+ } else
9313 return n;
9314 }
9315
9316 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
9317 {
9318+ if ((long)n < 0)
9319+ return n;
9320+
9321 return __copy_user((__force void __user *) to, from, n);
9322 }
9323
9324diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
9325index e562d3c..191f176 100644
9326--- a/arch/sparc/include/asm/uaccess_64.h
9327+++ b/arch/sparc/include/asm/uaccess_64.h
9328@@ -10,6 +10,7 @@
9329 #include <linux/compiler.h>
9330 #include <linux/string.h>
9331 #include <linux/thread_info.h>
9332+#include <linux/kernel.h>
9333 #include <asm/asi.h>
9334 #include <asm/spitfire.h>
9335 #include <asm-generic/uaccess-unaligned.h>
9336@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
9337 static inline unsigned long __must_check
9338 copy_from_user(void *to, const void __user *from, unsigned long size)
9339 {
9340- unsigned long ret = ___copy_from_user(to, from, size);
9341+ unsigned long ret;
9342
9343+ if ((long)size < 0 || size > INT_MAX)
9344+ return size;
9345+
9346+ if (!__builtin_constant_p(size))
9347+ check_object_size(to, size, false);
9348+
9349+ ret = ___copy_from_user(to, from, size);
9350 if (unlikely(ret))
9351 ret = copy_from_user_fixup(to, from, size);
9352
9353@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
9354 static inline unsigned long __must_check
9355 copy_to_user(void __user *to, const void *from, unsigned long size)
9356 {
9357- unsigned long ret = ___copy_to_user(to, from, size);
9358+ unsigned long ret;
9359
9360+ if ((long)size < 0 || size > INT_MAX)
9361+ return size;
9362+
9363+ if (!__builtin_constant_p(size))
9364+ check_object_size(from, size, true);
9365+
9366+ ret = ___copy_to_user(to, from, size);
9367 if (unlikely(ret))
9368 ret = copy_to_user_fixup(to, from, size);
9369 return ret;
9370diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
9371index d432fb2..6056af1 100644
9372--- a/arch/sparc/kernel/Makefile
9373+++ b/arch/sparc/kernel/Makefile
9374@@ -3,7 +3,7 @@
9375 #
9376
9377 asflags-y := -ansi
9378-ccflags-y := -Werror
9379+#ccflags-y := -Werror
9380
9381 extra-y := head_$(BITS).o
9382
9383diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
9384index 5ef48da..11d460f 100644
9385--- a/arch/sparc/kernel/ds.c
9386+++ b/arch/sparc/kernel/ds.c
9387@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
9388 char *base, *p;
9389 int msg_len, loops;
9390
9391+ if (strlen(var) + strlen(value) + 2 >
9392+ sizeof(pkt) - sizeof(pkt.header)) {
9393+ printk(KERN_ERR PFX
9394+ "contents length: %zu, which more than max: %lu,"
9395+ "so could not set (%s) variable to (%s).\n",
9396+ strlen(var) + strlen(value) + 2,
9397+ sizeof(pkt) - sizeof(pkt.header), var, value);
9398+ return;
9399+ }
9400+
9401 memset(&pkt, 0, sizeof(pkt));
9402 pkt.header.data.tag.type = DS_DATA;
9403 pkt.header.data.handle = cp->handle;
9404diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
9405index fdd819d..5af08c8 100644
9406--- a/arch/sparc/kernel/process_32.c
9407+++ b/arch/sparc/kernel/process_32.c
9408@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
9409
9410 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
9411 r->psr, r->pc, r->npc, r->y, print_tainted());
9412- printk("PC: <%pS>\n", (void *) r->pc);
9413+ printk("PC: <%pA>\n", (void *) r->pc);
9414 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9415 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
9416 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
9417 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9418 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
9419 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
9420- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
9421+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
9422
9423 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9424 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
9425@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9426 rw = (struct reg_window32 *) fp;
9427 pc = rw->ins[7];
9428 printk("[%08lx : ", pc);
9429- printk("%pS ] ", (void *) pc);
9430+ printk("%pA ] ", (void *) pc);
9431 fp = rw->ins[6];
9432 } while (++count < 16);
9433 printk("\n");
9434diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
9435index baebab2..9cd13b1 100644
9436--- a/arch/sparc/kernel/process_64.c
9437+++ b/arch/sparc/kernel/process_64.c
9438@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
9439 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
9440 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
9441 if (regs->tstate & TSTATE_PRIV)
9442- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
9443+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
9444 }
9445
9446 void show_regs(struct pt_regs *regs)
9447@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
9448
9449 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
9450 regs->tpc, regs->tnpc, regs->y, print_tainted());
9451- printk("TPC: <%pS>\n", (void *) regs->tpc);
9452+ printk("TPC: <%pA>\n", (void *) regs->tpc);
9453 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
9454 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
9455 regs->u_regs[3]);
9456@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
9457 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
9458 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
9459 regs->u_regs[15]);
9460- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
9461+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
9462 show_regwindow(regs);
9463 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
9464 }
9465@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
9466 ((tp && tp->task) ? tp->task->pid : -1));
9467
9468 if (gp->tstate & TSTATE_PRIV) {
9469- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
9470+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
9471 (void *) gp->tpc,
9472 (void *) gp->o7,
9473 (void *) gp->i7,
9474diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
9475index 79cc0d1..ec62734 100644
9476--- a/arch/sparc/kernel/prom_common.c
9477+++ b/arch/sparc/kernel/prom_common.c
9478@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
9479
9480 unsigned int prom_early_allocated __initdata;
9481
9482-static struct of_pdt_ops prom_sparc_ops __initdata = {
9483+static struct of_pdt_ops prom_sparc_ops __initconst = {
9484 .nextprop = prom_common_nextprop,
9485 .getproplen = prom_getproplen,
9486 .getproperty = prom_getproperty,
9487diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
9488index 7ff45e4..a58f271 100644
9489--- a/arch/sparc/kernel/ptrace_64.c
9490+++ b/arch/sparc/kernel/ptrace_64.c
9491@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
9492 return ret;
9493 }
9494
9495+#ifdef CONFIG_GRKERNSEC_SETXID
9496+extern void gr_delayed_cred_worker(void);
9497+#endif
9498+
9499 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9500 {
9501 int ret = 0;
9502@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9503 /* do the secure computing check first */
9504 secure_computing_strict(regs->u_regs[UREG_G1]);
9505
9506+#ifdef CONFIG_GRKERNSEC_SETXID
9507+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9508+ gr_delayed_cred_worker();
9509+#endif
9510+
9511 if (test_thread_flag(TIF_SYSCALL_TRACE))
9512 ret = tracehook_report_syscall_entry(regs);
9513
9514@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9515
9516 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
9517 {
9518+#ifdef CONFIG_GRKERNSEC_SETXID
9519+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9520+ gr_delayed_cred_worker();
9521+#endif
9522+
9523 audit_syscall_exit(regs);
9524
9525 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9526diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
9527index 3a8d184..49498a8 100644
9528--- a/arch/sparc/kernel/sys_sparc_32.c
9529+++ b/arch/sparc/kernel/sys_sparc_32.c
9530@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9531 if (len > TASK_SIZE - PAGE_SIZE)
9532 return -ENOMEM;
9533 if (!addr)
9534- addr = TASK_UNMAPPED_BASE;
9535+ addr = current->mm->mmap_base;
9536
9537 info.flags = 0;
9538 info.length = len;
9539diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
9540index 2daaaa6..4fb84dc 100644
9541--- a/arch/sparc/kernel/sys_sparc_64.c
9542+++ b/arch/sparc/kernel/sys_sparc_64.c
9543@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9544 struct vm_area_struct * vma;
9545 unsigned long task_size = TASK_SIZE;
9546 int do_color_align;
9547+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9548 struct vm_unmapped_area_info info;
9549
9550 if (flags & MAP_FIXED) {
9551 /* We do not accept a shared mapping if it would violate
9552 * cache aliasing constraints.
9553 */
9554- if ((flags & MAP_SHARED) &&
9555+ if ((filp || (flags & MAP_SHARED)) &&
9556 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9557 return -EINVAL;
9558 return addr;
9559@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9560 if (filp || (flags & MAP_SHARED))
9561 do_color_align = 1;
9562
9563+#ifdef CONFIG_PAX_RANDMMAP
9564+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9565+#endif
9566+
9567 if (addr) {
9568 if (do_color_align)
9569 addr = COLOR_ALIGN(addr, pgoff);
9570@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9571 addr = PAGE_ALIGN(addr);
9572
9573 vma = find_vma(mm, addr);
9574- if (task_size - len >= addr &&
9575- (!vma || addr + len <= vma->vm_start))
9576+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9577 return addr;
9578 }
9579
9580 info.flags = 0;
9581 info.length = len;
9582- info.low_limit = TASK_UNMAPPED_BASE;
9583+ info.low_limit = mm->mmap_base;
9584 info.high_limit = min(task_size, VA_EXCLUDE_START);
9585 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9586 info.align_offset = pgoff << PAGE_SHIFT;
9587+ info.threadstack_offset = offset;
9588 addr = vm_unmapped_area(&info);
9589
9590 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9591 VM_BUG_ON(addr != -ENOMEM);
9592 info.low_limit = VA_EXCLUDE_END;
9593+
9594+#ifdef CONFIG_PAX_RANDMMAP
9595+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9596+ info.low_limit += mm->delta_mmap;
9597+#endif
9598+
9599 info.high_limit = task_size;
9600 addr = vm_unmapped_area(&info);
9601 }
9602@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9603 unsigned long task_size = STACK_TOP32;
9604 unsigned long addr = addr0;
9605 int do_color_align;
9606+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9607 struct vm_unmapped_area_info info;
9608
9609 /* This should only ever run for 32-bit processes. */
9610@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9611 /* We do not accept a shared mapping if it would violate
9612 * cache aliasing constraints.
9613 */
9614- if ((flags & MAP_SHARED) &&
9615+ if ((filp || (flags & MAP_SHARED)) &&
9616 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9617 return -EINVAL;
9618 return addr;
9619@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9620 if (filp || (flags & MAP_SHARED))
9621 do_color_align = 1;
9622
9623+#ifdef CONFIG_PAX_RANDMMAP
9624+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9625+#endif
9626+
9627 /* requesting a specific address */
9628 if (addr) {
9629 if (do_color_align)
9630@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9631 addr = PAGE_ALIGN(addr);
9632
9633 vma = find_vma(mm, addr);
9634- if (task_size - len >= addr &&
9635- (!vma || addr + len <= vma->vm_start))
9636+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9637 return addr;
9638 }
9639
9640@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9641 info.high_limit = mm->mmap_base;
9642 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9643 info.align_offset = pgoff << PAGE_SHIFT;
9644+ info.threadstack_offset = offset;
9645 addr = vm_unmapped_area(&info);
9646
9647 /*
9648@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9649 VM_BUG_ON(addr != -ENOMEM);
9650 info.flags = 0;
9651 info.low_limit = TASK_UNMAPPED_BASE;
9652+
9653+#ifdef CONFIG_PAX_RANDMMAP
9654+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9655+ info.low_limit += mm->delta_mmap;
9656+#endif
9657+
9658 info.high_limit = STACK_TOP32;
9659 addr = vm_unmapped_area(&info);
9660 }
9661@@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
9662 EXPORT_SYMBOL(get_fb_unmapped_area);
9663
9664 /* Essentially the same as PowerPC. */
9665-static unsigned long mmap_rnd(void)
9666+static unsigned long mmap_rnd(struct mm_struct *mm)
9667 {
9668 unsigned long rnd = 0UL;
9669
9670+#ifdef CONFIG_PAX_RANDMMAP
9671+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9672+#endif
9673+
9674 if (current->flags & PF_RANDOMIZE) {
9675 unsigned long val = get_random_int();
9676 if (test_thread_flag(TIF_32BIT))
9677@@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
9678
9679 void arch_pick_mmap_layout(struct mm_struct *mm)
9680 {
9681- unsigned long random_factor = mmap_rnd();
9682+ unsigned long random_factor = mmap_rnd(mm);
9683 unsigned long gap;
9684
9685 /*
9686@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9687 gap == RLIM_INFINITY ||
9688 sysctl_legacy_va_layout) {
9689 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9690+
9691+#ifdef CONFIG_PAX_RANDMMAP
9692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9693+ mm->mmap_base += mm->delta_mmap;
9694+#endif
9695+
9696 mm->get_unmapped_area = arch_get_unmapped_area;
9697 mm->unmap_area = arch_unmap_area;
9698 } else {
9699@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9700 gap = (task_size / 6 * 5);
9701
9702 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9703+
9704+#ifdef CONFIG_PAX_RANDMMAP
9705+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9706+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9707+#endif
9708+
9709 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9710 mm->unmap_area = arch_unmap_area_topdown;
9711 }
9712diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9713index 22a1098..6255eb9 100644
9714--- a/arch/sparc/kernel/syscalls.S
9715+++ b/arch/sparc/kernel/syscalls.S
9716@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
9717 #endif
9718 .align 32
9719 1: ldx [%g6 + TI_FLAGS], %l5
9720- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9721+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9722 be,pt %icc, rtrap
9723 nop
9724 call syscall_trace_leave
9725@@ -184,7 +184,7 @@ linux_sparc_syscall32:
9726
9727 srl %i5, 0, %o5 ! IEU1
9728 srl %i2, 0, %o2 ! IEU0 Group
9729- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9730+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9731 bne,pn %icc, linux_syscall_trace32 ! CTI
9732 mov %i0, %l5 ! IEU1
9733 call %l7 ! CTI Group brk forced
9734@@ -207,7 +207,7 @@ linux_sparc_syscall:
9735
9736 mov %i3, %o3 ! IEU1
9737 mov %i4, %o4 ! IEU0 Group
9738- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9739+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9740 bne,pn %icc, linux_syscall_trace ! CTI Group
9741 mov %i0, %l5 ! IEU0
9742 2: call %l7 ! CTI Group brk forced
9743@@ -223,7 +223,7 @@ ret_sys_call:
9744
9745 cmp %o0, -ERESTART_RESTARTBLOCK
9746 bgeu,pn %xcc, 1f
9747- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9748+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9749 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9750
9751 2:
9752diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
9753index 654e8aa..45f431b 100644
9754--- a/arch/sparc/kernel/sysfs.c
9755+++ b/arch/sparc/kernel/sysfs.c
9756@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
9757 return NOTIFY_OK;
9758 }
9759
9760-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
9761+static struct notifier_block sysfs_cpu_nb = {
9762 .notifier_call = sysfs_cpu_notify,
9763 };
9764
9765diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9766index 6629829..036032d 100644
9767--- a/arch/sparc/kernel/traps_32.c
9768+++ b/arch/sparc/kernel/traps_32.c
9769@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9770 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9771 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9772
9773+extern void gr_handle_kernel_exploit(void);
9774+
9775 void die_if_kernel(char *str, struct pt_regs *regs)
9776 {
9777 static int die_counter;
9778@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9779 count++ < 30 &&
9780 (((unsigned long) rw) >= PAGE_OFFSET) &&
9781 !(((unsigned long) rw) & 0x7)) {
9782- printk("Caller[%08lx]: %pS\n", rw->ins[7],
9783+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
9784 (void *) rw->ins[7]);
9785 rw = (struct reg_window32 *)rw->ins[6];
9786 }
9787 }
9788 printk("Instruction DUMP:");
9789 instruction_dump ((unsigned long *) regs->pc);
9790- if(regs->psr & PSR_PS)
9791+ if(regs->psr & PSR_PS) {
9792+ gr_handle_kernel_exploit();
9793 do_exit(SIGKILL);
9794+ }
9795 do_exit(SIGSEGV);
9796 }
9797
9798diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9799index b3f833a..ac74b2d 100644
9800--- a/arch/sparc/kernel/traps_64.c
9801+++ b/arch/sparc/kernel/traps_64.c
9802@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9803 i + 1,
9804 p->trapstack[i].tstate, p->trapstack[i].tpc,
9805 p->trapstack[i].tnpc, p->trapstack[i].tt);
9806- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
9807+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
9808 }
9809 }
9810
9811@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
9812
9813 lvl -= 0x100;
9814 if (regs->tstate & TSTATE_PRIV) {
9815+
9816+#ifdef CONFIG_PAX_REFCOUNT
9817+ if (lvl == 6)
9818+ pax_report_refcount_overflow(regs);
9819+#endif
9820+
9821 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
9822 die_if_kernel(buffer, regs);
9823 }
9824@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
9825 void bad_trap_tl1(struct pt_regs *regs, long lvl)
9826 {
9827 char buffer[32];
9828-
9829+
9830 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
9831 0, lvl, SIGTRAP) == NOTIFY_STOP)
9832 return;
9833
9834+#ifdef CONFIG_PAX_REFCOUNT
9835+ if (lvl == 6)
9836+ pax_report_refcount_overflow(regs);
9837+#endif
9838+
9839 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
9840
9841 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
9842@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
9843 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
9844 printk("%s" "ERROR(%d): ",
9845 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
9846- printk("TPC<%pS>\n", (void *) regs->tpc);
9847+ printk("TPC<%pA>\n", (void *) regs->tpc);
9848 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
9849 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
9850 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
9851@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9852 smp_processor_id(),
9853 (type & 0x1) ? 'I' : 'D',
9854 regs->tpc);
9855- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
9856+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
9857 panic("Irrecoverable Cheetah+ parity error.");
9858 }
9859
9860@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9861 smp_processor_id(),
9862 (type & 0x1) ? 'I' : 'D',
9863 regs->tpc);
9864- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
9865+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
9866 }
9867
9868 struct sun4v_error_entry {
9869@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
9870
9871 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
9872 regs->tpc, tl);
9873- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
9874+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
9875 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9876- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
9877+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
9878 (void *) regs->u_regs[UREG_I7]);
9879 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
9880 "pte[%lx] error[%lx]\n",
9881@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
9882
9883 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
9884 regs->tpc, tl);
9885- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
9886+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
9887 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9888- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
9889+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
9890 (void *) regs->u_regs[UREG_I7]);
9891 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
9892 "pte[%lx] error[%lx]\n",
9893@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9894 fp = (unsigned long)sf->fp + STACK_BIAS;
9895 }
9896
9897- printk(" [%016lx] %pS\n", pc, (void *) pc);
9898+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9899 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9900 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
9901 int index = tsk->curr_ret_stack;
9902 if (tsk->ret_stack && index >= graph) {
9903 pc = tsk->ret_stack[index - graph].ret;
9904- printk(" [%016lx] %pS\n", pc, (void *) pc);
9905+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9906 graph++;
9907 }
9908 }
9909@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
9910 return (struct reg_window *) (fp + STACK_BIAS);
9911 }
9912
9913+extern void gr_handle_kernel_exploit(void);
9914+
9915 void die_if_kernel(char *str, struct pt_regs *regs)
9916 {
9917 static int die_counter;
9918@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9919 while (rw &&
9920 count++ < 30 &&
9921 kstack_valid(tp, (unsigned long) rw)) {
9922- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9923+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9924 (void *) rw->ins[7]);
9925
9926 rw = kernel_stack_up(rw);
9927@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9928 }
9929 user_instruction_dump ((unsigned int __user *) regs->tpc);
9930 }
9931- if (regs->tstate & TSTATE_PRIV)
9932+ if (regs->tstate & TSTATE_PRIV) {
9933+ gr_handle_kernel_exploit();
9934 do_exit(SIGKILL);
9935+ }
9936 do_exit(SIGSEGV);
9937 }
9938 EXPORT_SYMBOL(die_if_kernel);
9939diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9940index 8201c25e..072a2a7 100644
9941--- a/arch/sparc/kernel/unaligned_64.c
9942+++ b/arch/sparc/kernel/unaligned_64.c
9943@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9944 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9945
9946 if (__ratelimit(&ratelimit)) {
9947- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9948+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9949 regs->tpc, (void *) regs->tpc);
9950 }
9951 }
9952diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9953index dbe119b..089c7c1 100644
9954--- a/arch/sparc/lib/Makefile
9955+++ b/arch/sparc/lib/Makefile
9956@@ -2,7 +2,7 @@
9957 #
9958
9959 asflags-y := -ansi -DST_DIV0=0x02
9960-ccflags-y := -Werror
9961+#ccflags-y := -Werror
9962
9963 lib-$(CONFIG_SPARC32) += ashrdi3.o
9964 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9965diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9966index 85c233d..68500e0 100644
9967--- a/arch/sparc/lib/atomic_64.S
9968+++ b/arch/sparc/lib/atomic_64.S
9969@@ -17,7 +17,12 @@
9970 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9971 BACKOFF_SETUP(%o2)
9972 1: lduw [%o1], %g1
9973- add %g1, %o0, %g7
9974+ addcc %g1, %o0, %g7
9975+
9976+#ifdef CONFIG_PAX_REFCOUNT
9977+ tvs %icc, 6
9978+#endif
9979+
9980 cas [%o1], %g1, %g7
9981 cmp %g1, %g7
9982 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9983@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9984 2: BACKOFF_SPIN(%o2, %o3, 1b)
9985 ENDPROC(atomic_add)
9986
9987+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9988+ BACKOFF_SETUP(%o2)
9989+1: lduw [%o1], %g1
9990+ add %g1, %o0, %g7
9991+ cas [%o1], %g1, %g7
9992+ cmp %g1, %g7
9993+ bne,pn %icc, 2f
9994+ nop
9995+ retl
9996+ nop
9997+2: BACKOFF_SPIN(%o2, %o3, 1b)
9998+ENDPROC(atomic_add_unchecked)
9999+
10000 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10001 BACKOFF_SETUP(%o2)
10002 1: lduw [%o1], %g1
10003- sub %g1, %o0, %g7
10004+ subcc %g1, %o0, %g7
10005+
10006+#ifdef CONFIG_PAX_REFCOUNT
10007+ tvs %icc, 6
10008+#endif
10009+
10010 cas [%o1], %g1, %g7
10011 cmp %g1, %g7
10012 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10013@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10014 2: BACKOFF_SPIN(%o2, %o3, 1b)
10015 ENDPROC(atomic_sub)
10016
10017+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10018+ BACKOFF_SETUP(%o2)
10019+1: lduw [%o1], %g1
10020+ sub %g1, %o0, %g7
10021+ cas [%o1], %g1, %g7
10022+ cmp %g1, %g7
10023+ bne,pn %icc, 2f
10024+ nop
10025+ retl
10026+ nop
10027+2: BACKOFF_SPIN(%o2, %o3, 1b)
10028+ENDPROC(atomic_sub_unchecked)
10029+
10030 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10031 BACKOFF_SETUP(%o2)
10032 1: lduw [%o1], %g1
10033- add %g1, %o0, %g7
10034+ addcc %g1, %o0, %g7
10035+
10036+#ifdef CONFIG_PAX_REFCOUNT
10037+ tvs %icc, 6
10038+#endif
10039+
10040 cas [%o1], %g1, %g7
10041 cmp %g1, %g7
10042 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10043@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10044 2: BACKOFF_SPIN(%o2, %o3, 1b)
10045 ENDPROC(atomic_add_ret)
10046
10047+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10048+ BACKOFF_SETUP(%o2)
10049+1: lduw [%o1], %g1
10050+ addcc %g1, %o0, %g7
10051+ cas [%o1], %g1, %g7
10052+ cmp %g1, %g7
10053+ bne,pn %icc, 2f
10054+ add %g7, %o0, %g7
10055+ sra %g7, 0, %o0
10056+ retl
10057+ nop
10058+2: BACKOFF_SPIN(%o2, %o3, 1b)
10059+ENDPROC(atomic_add_ret_unchecked)
10060+
10061 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10062 BACKOFF_SETUP(%o2)
10063 1: lduw [%o1], %g1
10064- sub %g1, %o0, %g7
10065+ subcc %g1, %o0, %g7
10066+
10067+#ifdef CONFIG_PAX_REFCOUNT
10068+ tvs %icc, 6
10069+#endif
10070+
10071 cas [%o1], %g1, %g7
10072 cmp %g1, %g7
10073 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10074@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10075 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10076 BACKOFF_SETUP(%o2)
10077 1: ldx [%o1], %g1
10078- add %g1, %o0, %g7
10079+ addcc %g1, %o0, %g7
10080+
10081+#ifdef CONFIG_PAX_REFCOUNT
10082+ tvs %xcc, 6
10083+#endif
10084+
10085 casx [%o1], %g1, %g7
10086 cmp %g1, %g7
10087 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10088@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10089 2: BACKOFF_SPIN(%o2, %o3, 1b)
10090 ENDPROC(atomic64_add)
10091
10092+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10093+ BACKOFF_SETUP(%o2)
10094+1: ldx [%o1], %g1
10095+ addcc %g1, %o0, %g7
10096+ casx [%o1], %g1, %g7
10097+ cmp %g1, %g7
10098+ bne,pn %xcc, 2f
10099+ nop
10100+ retl
10101+ nop
10102+2: BACKOFF_SPIN(%o2, %o3, 1b)
10103+ENDPROC(atomic64_add_unchecked)
10104+
10105 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10106 BACKOFF_SETUP(%o2)
10107 1: ldx [%o1], %g1
10108- sub %g1, %o0, %g7
10109+ subcc %g1, %o0, %g7
10110+
10111+#ifdef CONFIG_PAX_REFCOUNT
10112+ tvs %xcc, 6
10113+#endif
10114+
10115 casx [%o1], %g1, %g7
10116 cmp %g1, %g7
10117 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10118@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10119 2: BACKOFF_SPIN(%o2, %o3, 1b)
10120 ENDPROC(atomic64_sub)
10121
10122+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10123+ BACKOFF_SETUP(%o2)
10124+1: ldx [%o1], %g1
10125+ subcc %g1, %o0, %g7
10126+ casx [%o1], %g1, %g7
10127+ cmp %g1, %g7
10128+ bne,pn %xcc, 2f
10129+ nop
10130+ retl
10131+ nop
10132+2: BACKOFF_SPIN(%o2, %o3, 1b)
10133+ENDPROC(atomic64_sub_unchecked)
10134+
10135 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10136 BACKOFF_SETUP(%o2)
10137 1: ldx [%o1], %g1
10138- add %g1, %o0, %g7
10139+ addcc %g1, %o0, %g7
10140+
10141+#ifdef CONFIG_PAX_REFCOUNT
10142+ tvs %xcc, 6
10143+#endif
10144+
10145 casx [%o1], %g1, %g7
10146 cmp %g1, %g7
10147 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10148@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10149 2: BACKOFF_SPIN(%o2, %o3, 1b)
10150 ENDPROC(atomic64_add_ret)
10151
10152+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10153+ BACKOFF_SETUP(%o2)
10154+1: ldx [%o1], %g1
10155+ addcc %g1, %o0, %g7
10156+ casx [%o1], %g1, %g7
10157+ cmp %g1, %g7
10158+ bne,pn %xcc, 2f
10159+ add %g7, %o0, %g7
10160+ mov %g7, %o0
10161+ retl
10162+ nop
10163+2: BACKOFF_SPIN(%o2, %o3, 1b)
10164+ENDPROC(atomic64_add_ret_unchecked)
10165+
10166 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10167 BACKOFF_SETUP(%o2)
10168 1: ldx [%o1], %g1
10169- sub %g1, %o0, %g7
10170+ subcc %g1, %o0, %g7
10171+
10172+#ifdef CONFIG_PAX_REFCOUNT
10173+ tvs %xcc, 6
10174+#endif
10175+
10176 casx [%o1], %g1, %g7
10177 cmp %g1, %g7
10178 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10179diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10180index 0c4e35e..745d3e4 100644
10181--- a/arch/sparc/lib/ksyms.c
10182+++ b/arch/sparc/lib/ksyms.c
10183@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
10184
10185 /* Atomic counter implementation. */
10186 EXPORT_SYMBOL(atomic_add);
10187+EXPORT_SYMBOL(atomic_add_unchecked);
10188 EXPORT_SYMBOL(atomic_add_ret);
10189+EXPORT_SYMBOL(atomic_add_ret_unchecked);
10190 EXPORT_SYMBOL(atomic_sub);
10191+EXPORT_SYMBOL(atomic_sub_unchecked);
10192 EXPORT_SYMBOL(atomic_sub_ret);
10193 EXPORT_SYMBOL(atomic64_add);
10194+EXPORT_SYMBOL(atomic64_add_unchecked);
10195 EXPORT_SYMBOL(atomic64_add_ret);
10196+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10197 EXPORT_SYMBOL(atomic64_sub);
10198+EXPORT_SYMBOL(atomic64_sub_unchecked);
10199 EXPORT_SYMBOL(atomic64_sub_ret);
10200 EXPORT_SYMBOL(atomic64_dec_if_positive);
10201
10202diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10203index 30c3ecc..736f015 100644
10204--- a/arch/sparc/mm/Makefile
10205+++ b/arch/sparc/mm/Makefile
10206@@ -2,7 +2,7 @@
10207 #
10208
10209 asflags-y := -ansi
10210-ccflags-y := -Werror
10211+#ccflags-y := -Werror
10212
10213 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10214 obj-y += fault_$(BITS).o
10215diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10216index e98bfda..ea8d221 100644
10217--- a/arch/sparc/mm/fault_32.c
10218+++ b/arch/sparc/mm/fault_32.c
10219@@ -21,6 +21,9 @@
10220 #include <linux/perf_event.h>
10221 #include <linux/interrupt.h>
10222 #include <linux/kdebug.h>
10223+#include <linux/slab.h>
10224+#include <linux/pagemap.h>
10225+#include <linux/compiler.h>
10226
10227 #include <asm/page.h>
10228 #include <asm/pgtable.h>
10229@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
10230 return safe_compute_effective_address(regs, insn);
10231 }
10232
10233+#ifdef CONFIG_PAX_PAGEEXEC
10234+#ifdef CONFIG_PAX_DLRESOLVE
10235+static void pax_emuplt_close(struct vm_area_struct *vma)
10236+{
10237+ vma->vm_mm->call_dl_resolve = 0UL;
10238+}
10239+
10240+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10241+{
10242+ unsigned int *kaddr;
10243+
10244+ vmf->page = alloc_page(GFP_HIGHUSER);
10245+ if (!vmf->page)
10246+ return VM_FAULT_OOM;
10247+
10248+ kaddr = kmap(vmf->page);
10249+ memset(kaddr, 0, PAGE_SIZE);
10250+ kaddr[0] = 0x9DE3BFA8U; /* save */
10251+ flush_dcache_page(vmf->page);
10252+ kunmap(vmf->page);
10253+ return VM_FAULT_MAJOR;
10254+}
10255+
10256+static const struct vm_operations_struct pax_vm_ops = {
10257+ .close = pax_emuplt_close,
10258+ .fault = pax_emuplt_fault
10259+};
10260+
10261+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10262+{
10263+ int ret;
10264+
10265+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10266+ vma->vm_mm = current->mm;
10267+ vma->vm_start = addr;
10268+ vma->vm_end = addr + PAGE_SIZE;
10269+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10270+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10271+ vma->vm_ops = &pax_vm_ops;
10272+
10273+ ret = insert_vm_struct(current->mm, vma);
10274+ if (ret)
10275+ return ret;
10276+
10277+ ++current->mm->total_vm;
10278+ return 0;
10279+}
10280+#endif
10281+
10282+/*
10283+ * PaX: decide what to do with offenders (regs->pc = fault address)
10284+ *
10285+ * returns 1 when task should be killed
10286+ * 2 when patched PLT trampoline was detected
10287+ * 3 when unpatched PLT trampoline was detected
10288+ */
10289+static int pax_handle_fetch_fault(struct pt_regs *regs)
10290+{
10291+
10292+#ifdef CONFIG_PAX_EMUPLT
10293+ int err;
10294+
10295+ do { /* PaX: patched PLT emulation #1 */
10296+ unsigned int sethi1, sethi2, jmpl;
10297+
10298+ err = get_user(sethi1, (unsigned int *)regs->pc);
10299+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
10300+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
10301+
10302+ if (err)
10303+ break;
10304+
10305+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10306+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10307+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10308+ {
10309+ unsigned int addr;
10310+
10311+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10312+ addr = regs->u_regs[UREG_G1];
10313+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10314+ regs->pc = addr;
10315+ regs->npc = addr+4;
10316+ return 2;
10317+ }
10318+ } while (0);
10319+
10320+ do { /* PaX: patched PLT emulation #2 */
10321+ unsigned int ba;
10322+
10323+ err = get_user(ba, (unsigned int *)regs->pc);
10324+
10325+ if (err)
10326+ break;
10327+
10328+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10329+ unsigned int addr;
10330+
10331+ if ((ba & 0xFFC00000U) == 0x30800000U)
10332+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10333+ else
10334+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10335+ regs->pc = addr;
10336+ regs->npc = addr+4;
10337+ return 2;
10338+ }
10339+ } while (0);
10340+
10341+ do { /* PaX: patched PLT emulation #3 */
10342+ unsigned int sethi, bajmpl, nop;
10343+
10344+ err = get_user(sethi, (unsigned int *)regs->pc);
10345+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
10346+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10347+
10348+ if (err)
10349+ break;
10350+
10351+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10352+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10353+ nop == 0x01000000U)
10354+ {
10355+ unsigned int addr;
10356+
10357+ addr = (sethi & 0x003FFFFFU) << 10;
10358+ regs->u_regs[UREG_G1] = addr;
10359+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10360+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10361+ else
10362+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10363+ regs->pc = addr;
10364+ regs->npc = addr+4;
10365+ return 2;
10366+ }
10367+ } while (0);
10368+
10369+ do { /* PaX: unpatched PLT emulation step 1 */
10370+ unsigned int sethi, ba, nop;
10371+
10372+ err = get_user(sethi, (unsigned int *)regs->pc);
10373+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
10374+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10375+
10376+ if (err)
10377+ break;
10378+
10379+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10380+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10381+ nop == 0x01000000U)
10382+ {
10383+ unsigned int addr, save, call;
10384+
10385+ if ((ba & 0xFFC00000U) == 0x30800000U)
10386+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10387+ else
10388+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10389+
10390+ err = get_user(save, (unsigned int *)addr);
10391+ err |= get_user(call, (unsigned int *)(addr+4));
10392+ err |= get_user(nop, (unsigned int *)(addr+8));
10393+ if (err)
10394+ break;
10395+
10396+#ifdef CONFIG_PAX_DLRESOLVE
10397+ if (save == 0x9DE3BFA8U &&
10398+ (call & 0xC0000000U) == 0x40000000U &&
10399+ nop == 0x01000000U)
10400+ {
10401+ struct vm_area_struct *vma;
10402+ unsigned long call_dl_resolve;
10403+
10404+ down_read(&current->mm->mmap_sem);
10405+ call_dl_resolve = current->mm->call_dl_resolve;
10406+ up_read(&current->mm->mmap_sem);
10407+ if (likely(call_dl_resolve))
10408+ goto emulate;
10409+
10410+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10411+
10412+ down_write(&current->mm->mmap_sem);
10413+ if (current->mm->call_dl_resolve) {
10414+ call_dl_resolve = current->mm->call_dl_resolve;
10415+ up_write(&current->mm->mmap_sem);
10416+ if (vma)
10417+ kmem_cache_free(vm_area_cachep, vma);
10418+ goto emulate;
10419+ }
10420+
10421+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10422+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10423+ up_write(&current->mm->mmap_sem);
10424+ if (vma)
10425+ kmem_cache_free(vm_area_cachep, vma);
10426+ return 1;
10427+ }
10428+
10429+ if (pax_insert_vma(vma, call_dl_resolve)) {
10430+ up_write(&current->mm->mmap_sem);
10431+ kmem_cache_free(vm_area_cachep, vma);
10432+ return 1;
10433+ }
10434+
10435+ current->mm->call_dl_resolve = call_dl_resolve;
10436+ up_write(&current->mm->mmap_sem);
10437+
10438+emulate:
10439+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10440+ regs->pc = call_dl_resolve;
10441+ regs->npc = addr+4;
10442+ return 3;
10443+ }
10444+#endif
10445+
10446+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10447+ if ((save & 0xFFC00000U) == 0x05000000U &&
10448+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10449+ nop == 0x01000000U)
10450+ {
10451+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10452+ regs->u_regs[UREG_G2] = addr + 4;
10453+ addr = (save & 0x003FFFFFU) << 10;
10454+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10455+ regs->pc = addr;
10456+ regs->npc = addr+4;
10457+ return 3;
10458+ }
10459+ }
10460+ } while (0);
10461+
10462+ do { /* PaX: unpatched PLT emulation step 2 */
10463+ unsigned int save, call, nop;
10464+
10465+ err = get_user(save, (unsigned int *)(regs->pc-4));
10466+ err |= get_user(call, (unsigned int *)regs->pc);
10467+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
10468+ if (err)
10469+ break;
10470+
10471+ if (save == 0x9DE3BFA8U &&
10472+ (call & 0xC0000000U) == 0x40000000U &&
10473+ nop == 0x01000000U)
10474+ {
10475+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
10476+
10477+ regs->u_regs[UREG_RETPC] = regs->pc;
10478+ regs->pc = dl_resolve;
10479+ regs->npc = dl_resolve+4;
10480+ return 3;
10481+ }
10482+ } while (0);
10483+#endif
10484+
10485+ return 1;
10486+}
10487+
10488+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10489+{
10490+ unsigned long i;
10491+
10492+ printk(KERN_ERR "PAX: bytes at PC: ");
10493+ for (i = 0; i < 8; i++) {
10494+ unsigned int c;
10495+ if (get_user(c, (unsigned int *)pc+i))
10496+ printk(KERN_CONT "???????? ");
10497+ else
10498+ printk(KERN_CONT "%08x ", c);
10499+ }
10500+ printk("\n");
10501+}
10502+#endif
10503+
10504 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
10505 int text_fault)
10506 {
10507@@ -230,6 +504,24 @@ good_area:
10508 if (!(vma->vm_flags & VM_WRITE))
10509 goto bad_area;
10510 } else {
10511+
10512+#ifdef CONFIG_PAX_PAGEEXEC
10513+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
10514+ up_read(&mm->mmap_sem);
10515+ switch (pax_handle_fetch_fault(regs)) {
10516+
10517+#ifdef CONFIG_PAX_EMUPLT
10518+ case 2:
10519+ case 3:
10520+ return;
10521+#endif
10522+
10523+ }
10524+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
10525+ do_group_exit(SIGKILL);
10526+ }
10527+#endif
10528+
10529 /* Allow reads even for write-only mappings */
10530 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
10531 goto bad_area;
10532diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
10533index 5062ff3..e0b75f3 100644
10534--- a/arch/sparc/mm/fault_64.c
10535+++ b/arch/sparc/mm/fault_64.c
10536@@ -21,6 +21,9 @@
10537 #include <linux/kprobes.h>
10538 #include <linux/kdebug.h>
10539 #include <linux/percpu.h>
10540+#include <linux/slab.h>
10541+#include <linux/pagemap.h>
10542+#include <linux/compiler.h>
10543
10544 #include <asm/page.h>
10545 #include <asm/pgtable.h>
10546@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
10547 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
10548 regs->tpc);
10549 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
10550- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
10551+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
10552 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
10553 dump_stack();
10554 unhandled_fault(regs->tpc, current, regs);
10555@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
10556 show_regs(regs);
10557 }
10558
10559+#ifdef CONFIG_PAX_PAGEEXEC
10560+#ifdef CONFIG_PAX_DLRESOLVE
10561+static void pax_emuplt_close(struct vm_area_struct *vma)
10562+{
10563+ vma->vm_mm->call_dl_resolve = 0UL;
10564+}
10565+
10566+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10567+{
10568+ unsigned int *kaddr;
10569+
10570+ vmf->page = alloc_page(GFP_HIGHUSER);
10571+ if (!vmf->page)
10572+ return VM_FAULT_OOM;
10573+
10574+ kaddr = kmap(vmf->page);
10575+ memset(kaddr, 0, PAGE_SIZE);
10576+ kaddr[0] = 0x9DE3BFA8U; /* save */
10577+ flush_dcache_page(vmf->page);
10578+ kunmap(vmf->page);
10579+ return VM_FAULT_MAJOR;
10580+}
10581+
10582+static const struct vm_operations_struct pax_vm_ops = {
10583+ .close = pax_emuplt_close,
10584+ .fault = pax_emuplt_fault
10585+};
10586+
10587+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10588+{
10589+ int ret;
10590+
10591+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10592+ vma->vm_mm = current->mm;
10593+ vma->vm_start = addr;
10594+ vma->vm_end = addr + PAGE_SIZE;
10595+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10596+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10597+ vma->vm_ops = &pax_vm_ops;
10598+
10599+ ret = insert_vm_struct(current->mm, vma);
10600+ if (ret)
10601+ return ret;
10602+
10603+ ++current->mm->total_vm;
10604+ return 0;
10605+}
10606+#endif
10607+
10608+/*
10609+ * PaX: decide what to do with offenders (regs->tpc = fault address)
10610+ *
10611+ * returns 1 when task should be killed
10612+ * 2 when patched PLT trampoline was detected
10613+ * 3 when unpatched PLT trampoline was detected
10614+ */
10615+static int pax_handle_fetch_fault(struct pt_regs *regs)
10616+{
10617+
10618+#ifdef CONFIG_PAX_EMUPLT
10619+ int err;
10620+
10621+ do { /* PaX: patched PLT emulation #1 */
10622+ unsigned int sethi1, sethi2, jmpl;
10623+
10624+ err = get_user(sethi1, (unsigned int *)regs->tpc);
10625+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
10626+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
10627+
10628+ if (err)
10629+ break;
10630+
10631+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10632+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10633+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10634+ {
10635+ unsigned long addr;
10636+
10637+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10638+ addr = regs->u_regs[UREG_G1];
10639+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10640+
10641+ if (test_thread_flag(TIF_32BIT))
10642+ addr &= 0xFFFFFFFFUL;
10643+
10644+ regs->tpc = addr;
10645+ regs->tnpc = addr+4;
10646+ return 2;
10647+ }
10648+ } while (0);
10649+
10650+ do { /* PaX: patched PLT emulation #2 */
10651+ unsigned int ba;
10652+
10653+ err = get_user(ba, (unsigned int *)regs->tpc);
10654+
10655+ if (err)
10656+ break;
10657+
10658+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10659+ unsigned long addr;
10660+
10661+ if ((ba & 0xFFC00000U) == 0x30800000U)
10662+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10663+ else
10664+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10665+
10666+ if (test_thread_flag(TIF_32BIT))
10667+ addr &= 0xFFFFFFFFUL;
10668+
10669+ regs->tpc = addr;
10670+ regs->tnpc = addr+4;
10671+ return 2;
10672+ }
10673+ } while (0);
10674+
10675+ do { /* PaX: patched PLT emulation #3 */
10676+ unsigned int sethi, bajmpl, nop;
10677+
10678+ err = get_user(sethi, (unsigned int *)regs->tpc);
10679+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
10680+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10681+
10682+ if (err)
10683+ break;
10684+
10685+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10686+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10687+ nop == 0x01000000U)
10688+ {
10689+ unsigned long addr;
10690+
10691+ addr = (sethi & 0x003FFFFFU) << 10;
10692+ regs->u_regs[UREG_G1] = addr;
10693+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10694+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10695+ else
10696+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10697+
10698+ if (test_thread_flag(TIF_32BIT))
10699+ addr &= 0xFFFFFFFFUL;
10700+
10701+ regs->tpc = addr;
10702+ regs->tnpc = addr+4;
10703+ return 2;
10704+ }
10705+ } while (0);
10706+
10707+ do { /* PaX: patched PLT emulation #4 */
10708+ unsigned int sethi, mov1, call, mov2;
10709+
10710+ err = get_user(sethi, (unsigned int *)regs->tpc);
10711+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
10712+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
10713+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
10714+
10715+ if (err)
10716+ break;
10717+
10718+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10719+ mov1 == 0x8210000FU &&
10720+ (call & 0xC0000000U) == 0x40000000U &&
10721+ mov2 == 0x9E100001U)
10722+ {
10723+ unsigned long addr;
10724+
10725+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10726+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10727+
10728+ if (test_thread_flag(TIF_32BIT))
10729+ addr &= 0xFFFFFFFFUL;
10730+
10731+ regs->tpc = addr;
10732+ regs->tnpc = addr+4;
10733+ return 2;
10734+ }
10735+ } while (0);
10736+
10737+ do { /* PaX: patched PLT emulation #5 */
10738+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
10739+
10740+ err = get_user(sethi, (unsigned int *)regs->tpc);
10741+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10742+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10743+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
10744+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
10745+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
10746+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
10747+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
10748+
10749+ if (err)
10750+ break;
10751+
10752+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10753+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10754+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10755+ (or1 & 0xFFFFE000U) == 0x82106000U &&
10756+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10757+ sllx == 0x83287020U &&
10758+ jmpl == 0x81C04005U &&
10759+ nop == 0x01000000U)
10760+ {
10761+ unsigned long addr;
10762+
10763+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10764+ regs->u_regs[UREG_G1] <<= 32;
10765+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10766+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10767+ regs->tpc = addr;
10768+ regs->tnpc = addr+4;
10769+ return 2;
10770+ }
10771+ } while (0);
10772+
10773+ do { /* PaX: patched PLT emulation #6 */
10774+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
10775+
10776+ err = get_user(sethi, (unsigned int *)regs->tpc);
10777+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10778+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10779+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
10780+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
10781+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
10782+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
10783+
10784+ if (err)
10785+ break;
10786+
10787+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10788+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10789+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10790+ sllx == 0x83287020U &&
10791+ (or & 0xFFFFE000U) == 0x8A116000U &&
10792+ jmpl == 0x81C04005U &&
10793+ nop == 0x01000000U)
10794+ {
10795+ unsigned long addr;
10796+
10797+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
10798+ regs->u_regs[UREG_G1] <<= 32;
10799+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
10800+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10801+ regs->tpc = addr;
10802+ regs->tnpc = addr+4;
10803+ return 2;
10804+ }
10805+ } while (0);
10806+
10807+ do { /* PaX: unpatched PLT emulation step 1 */
10808+ unsigned int sethi, ba, nop;
10809+
10810+ err = get_user(sethi, (unsigned int *)regs->tpc);
10811+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10812+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10813+
10814+ if (err)
10815+ break;
10816+
10817+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10818+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10819+ nop == 0x01000000U)
10820+ {
10821+ unsigned long addr;
10822+ unsigned int save, call;
10823+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
10824+
10825+ if ((ba & 0xFFC00000U) == 0x30800000U)
10826+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10827+ else
10828+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10829+
10830+ if (test_thread_flag(TIF_32BIT))
10831+ addr &= 0xFFFFFFFFUL;
10832+
10833+ err = get_user(save, (unsigned int *)addr);
10834+ err |= get_user(call, (unsigned int *)(addr+4));
10835+ err |= get_user(nop, (unsigned int *)(addr+8));
10836+ if (err)
10837+ break;
10838+
10839+#ifdef CONFIG_PAX_DLRESOLVE
10840+ if (save == 0x9DE3BFA8U &&
10841+ (call & 0xC0000000U) == 0x40000000U &&
10842+ nop == 0x01000000U)
10843+ {
10844+ struct vm_area_struct *vma;
10845+ unsigned long call_dl_resolve;
10846+
10847+ down_read(&current->mm->mmap_sem);
10848+ call_dl_resolve = current->mm->call_dl_resolve;
10849+ up_read(&current->mm->mmap_sem);
10850+ if (likely(call_dl_resolve))
10851+ goto emulate;
10852+
10853+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10854+
10855+ down_write(&current->mm->mmap_sem);
10856+ if (current->mm->call_dl_resolve) {
10857+ call_dl_resolve = current->mm->call_dl_resolve;
10858+ up_write(&current->mm->mmap_sem);
10859+ if (vma)
10860+ kmem_cache_free(vm_area_cachep, vma);
10861+ goto emulate;
10862+ }
10863+
10864+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10865+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10866+ up_write(&current->mm->mmap_sem);
10867+ if (vma)
10868+ kmem_cache_free(vm_area_cachep, vma);
10869+ return 1;
10870+ }
10871+
10872+ if (pax_insert_vma(vma, call_dl_resolve)) {
10873+ up_write(&current->mm->mmap_sem);
10874+ kmem_cache_free(vm_area_cachep, vma);
10875+ return 1;
10876+ }
10877+
10878+ current->mm->call_dl_resolve = call_dl_resolve;
10879+ up_write(&current->mm->mmap_sem);
10880+
10881+emulate:
10882+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10883+ regs->tpc = call_dl_resolve;
10884+ regs->tnpc = addr+4;
10885+ return 3;
10886+ }
10887+#endif
10888+
10889+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10890+ if ((save & 0xFFC00000U) == 0x05000000U &&
10891+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10892+ nop == 0x01000000U)
10893+ {
10894+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10895+ regs->u_regs[UREG_G2] = addr + 4;
10896+ addr = (save & 0x003FFFFFU) << 10;
10897+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10898+
10899+ if (test_thread_flag(TIF_32BIT))
10900+ addr &= 0xFFFFFFFFUL;
10901+
10902+ regs->tpc = addr;
10903+ regs->tnpc = addr+4;
10904+ return 3;
10905+ }
10906+
10907+ /* PaX: 64-bit PLT stub */
10908+ err = get_user(sethi1, (unsigned int *)addr);
10909+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10910+ err |= get_user(or1, (unsigned int *)(addr+8));
10911+ err |= get_user(or2, (unsigned int *)(addr+12));
10912+ err |= get_user(sllx, (unsigned int *)(addr+16));
10913+ err |= get_user(add, (unsigned int *)(addr+20));
10914+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10915+ err |= get_user(nop, (unsigned int *)(addr+28));
10916+ if (err)
10917+ break;
10918+
10919+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10920+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10921+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10922+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10923+ sllx == 0x89293020U &&
10924+ add == 0x8A010005U &&
10925+ jmpl == 0x89C14000U &&
10926+ nop == 0x01000000U)
10927+ {
10928+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10929+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10930+ regs->u_regs[UREG_G4] <<= 32;
10931+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10932+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10933+ regs->u_regs[UREG_G4] = addr + 24;
10934+ addr = regs->u_regs[UREG_G5];
10935+ regs->tpc = addr;
10936+ regs->tnpc = addr+4;
10937+ return 3;
10938+ }
10939+ }
10940+ } while (0);
10941+
10942+#ifdef CONFIG_PAX_DLRESOLVE
10943+ do { /* PaX: unpatched PLT emulation step 2 */
10944+ unsigned int save, call, nop;
10945+
10946+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10947+ err |= get_user(call, (unsigned int *)regs->tpc);
10948+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10949+ if (err)
10950+ break;
10951+
10952+ if (save == 0x9DE3BFA8U &&
10953+ (call & 0xC0000000U) == 0x40000000U &&
10954+ nop == 0x01000000U)
10955+ {
10956+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10957+
10958+ if (test_thread_flag(TIF_32BIT))
10959+ dl_resolve &= 0xFFFFFFFFUL;
10960+
10961+ regs->u_regs[UREG_RETPC] = regs->tpc;
10962+ regs->tpc = dl_resolve;
10963+ regs->tnpc = dl_resolve+4;
10964+ return 3;
10965+ }
10966+ } while (0);
10967+#endif
10968+
10969+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10970+ unsigned int sethi, ba, nop;
10971+
10972+ err = get_user(sethi, (unsigned int *)regs->tpc);
10973+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10974+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10975+
10976+ if (err)
10977+ break;
10978+
10979+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10980+ (ba & 0xFFF00000U) == 0x30600000U &&
10981+ nop == 0x01000000U)
10982+ {
10983+ unsigned long addr;
10984+
10985+ addr = (sethi & 0x003FFFFFU) << 10;
10986+ regs->u_regs[UREG_G1] = addr;
10987+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10988+
10989+ if (test_thread_flag(TIF_32BIT))
10990+ addr &= 0xFFFFFFFFUL;
10991+
10992+ regs->tpc = addr;
10993+ regs->tnpc = addr+4;
10994+ return 2;
10995+ }
10996+ } while (0);
10997+
10998+#endif
10999+
11000+ return 1;
11001+}
11002+
11003+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11004+{
11005+ unsigned long i;
11006+
11007+ printk(KERN_ERR "PAX: bytes at PC: ");
11008+ for (i = 0; i < 8; i++) {
11009+ unsigned int c;
11010+ if (get_user(c, (unsigned int *)pc+i))
11011+ printk(KERN_CONT "???????? ");
11012+ else
11013+ printk(KERN_CONT "%08x ", c);
11014+ }
11015+ printk("\n");
11016+}
11017+#endif
11018+
11019 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11020 {
11021 struct mm_struct *mm = current->mm;
11022@@ -341,6 +804,29 @@ retry:
11023 if (!vma)
11024 goto bad_area;
11025
11026+#ifdef CONFIG_PAX_PAGEEXEC
11027+ /* PaX: detect ITLB misses on non-exec pages */
11028+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11029+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11030+ {
11031+ if (address != regs->tpc)
11032+ goto good_area;
11033+
11034+ up_read(&mm->mmap_sem);
11035+ switch (pax_handle_fetch_fault(regs)) {
11036+
11037+#ifdef CONFIG_PAX_EMUPLT
11038+ case 2:
11039+ case 3:
11040+ return;
11041+#endif
11042+
11043+ }
11044+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11045+ do_group_exit(SIGKILL);
11046+ }
11047+#endif
11048+
11049 /* Pure DTLB misses do not tell us whether the fault causing
11050 * load/store/atomic was a write or not, it only says that there
11051 * was no match. So in such a case we (carefully) read the
11052diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11053index d2b5944..d878f3c 100644
11054--- a/arch/sparc/mm/hugetlbpage.c
11055+++ b/arch/sparc/mm/hugetlbpage.c
11056@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11057 unsigned long addr,
11058 unsigned long len,
11059 unsigned long pgoff,
11060- unsigned long flags)
11061+ unsigned long flags,
11062+ unsigned long offset)
11063 {
11064 unsigned long task_size = TASK_SIZE;
11065 struct vm_unmapped_area_info info;
11066@@ -38,15 +39,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11067
11068 info.flags = 0;
11069 info.length = len;
11070- info.low_limit = TASK_UNMAPPED_BASE;
11071+ info.low_limit = mm->mmap_base;
11072 info.high_limit = min(task_size, VA_EXCLUDE_START);
11073 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11074 info.align_offset = 0;
11075+ info.threadstack_offset = offset;
11076 addr = vm_unmapped_area(&info);
11077
11078 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11079 VM_BUG_ON(addr != -ENOMEM);
11080 info.low_limit = VA_EXCLUDE_END;
11081+
11082+#ifdef CONFIG_PAX_RANDMMAP
11083+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11084+ info.low_limit += mm->delta_mmap;
11085+#endif
11086+
11087 info.high_limit = task_size;
11088 addr = vm_unmapped_area(&info);
11089 }
11090@@ -58,7 +66,8 @@ static unsigned long
11091 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11092 const unsigned long len,
11093 const unsigned long pgoff,
11094- const unsigned long flags)
11095+ const unsigned long flags,
11096+ const unsigned long offset)
11097 {
11098 struct mm_struct *mm = current->mm;
11099 unsigned long addr = addr0;
11100@@ -73,6 +82,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11101 info.high_limit = mm->mmap_base;
11102 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11103 info.align_offset = 0;
11104+ info.threadstack_offset = offset;
11105 addr = vm_unmapped_area(&info);
11106
11107 /*
11108@@ -85,6 +95,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11109 VM_BUG_ON(addr != -ENOMEM);
11110 info.flags = 0;
11111 info.low_limit = TASK_UNMAPPED_BASE;
11112+
11113+#ifdef CONFIG_PAX_RANDMMAP
11114+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11115+ info.low_limit += mm->delta_mmap;
11116+#endif
11117+
11118 info.high_limit = STACK_TOP32;
11119 addr = vm_unmapped_area(&info);
11120 }
11121@@ -99,6 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11122 struct mm_struct *mm = current->mm;
11123 struct vm_area_struct *vma;
11124 unsigned long task_size = TASK_SIZE;
11125+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11126
11127 if (test_thread_flag(TIF_32BIT))
11128 task_size = STACK_TOP32;
11129@@ -114,19 +131,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11130 return addr;
11131 }
11132
11133+#ifdef CONFIG_PAX_RANDMMAP
11134+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11135+#endif
11136+
11137 if (addr) {
11138 addr = ALIGN(addr, HPAGE_SIZE);
11139 vma = find_vma(mm, addr);
11140- if (task_size - len >= addr &&
11141- (!vma || addr + len <= vma->vm_start))
11142+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11143 return addr;
11144 }
11145 if (mm->get_unmapped_area == arch_get_unmapped_area)
11146 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11147- pgoff, flags);
11148+ pgoff, flags, offset);
11149 else
11150 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11151- pgoff, flags);
11152+ pgoff, flags, offset);
11153 }
11154
11155 pte_t *huge_pte_alloc(struct mm_struct *mm,
11156diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11157index f4500c6..889656c 100644
11158--- a/arch/tile/include/asm/atomic_64.h
11159+++ b/arch/tile/include/asm/atomic_64.h
11160@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11161
11162 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11163
11164+#define atomic64_read_unchecked(v) atomic64_read(v)
11165+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11166+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11167+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11168+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11169+#define atomic64_inc_unchecked(v) atomic64_inc(v)
11170+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11171+#define atomic64_dec_unchecked(v) atomic64_dec(v)
11172+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11173+
11174 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11175 #define smp_mb__before_atomic_dec() smp_mb()
11176 #define smp_mb__after_atomic_dec() smp_mb()
11177diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
11178index a9a5299..0fce79e 100644
11179--- a/arch/tile/include/asm/cache.h
11180+++ b/arch/tile/include/asm/cache.h
11181@@ -15,11 +15,12 @@
11182 #ifndef _ASM_TILE_CACHE_H
11183 #define _ASM_TILE_CACHE_H
11184
11185+#include <linux/const.h>
11186 #include <arch/chip.h>
11187
11188 /* bytes per L1 data cache line */
11189 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
11190-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11191+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11192
11193 /* bytes per L2 cache line */
11194 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
11195diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
11196index 8a082bc..7a6bf87 100644
11197--- a/arch/tile/include/asm/uaccess.h
11198+++ b/arch/tile/include/asm/uaccess.h
11199@@ -408,9 +408,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
11200 const void __user *from,
11201 unsigned long n)
11202 {
11203- int sz = __compiletime_object_size(to);
11204+ size_t sz = __compiletime_object_size(to);
11205
11206- if (likely(sz == -1 || sz >= n))
11207+ if (likely(sz == (size_t)-1 || sz >= n))
11208 n = _copy_from_user(to, from, n);
11209 else
11210 copy_from_user_overflow();
11211diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
11212index 650ccff..45fe2d6 100644
11213--- a/arch/tile/mm/hugetlbpage.c
11214+++ b/arch/tile/mm/hugetlbpage.c
11215@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
11216 info.high_limit = TASK_SIZE;
11217 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11218 info.align_offset = 0;
11219+ info.threadstack_offset = 0;
11220 return vm_unmapped_area(&info);
11221 }
11222
11223@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
11224 info.high_limit = current->mm->mmap_base;
11225 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11226 info.align_offset = 0;
11227+ info.threadstack_offset = 0;
11228 addr = vm_unmapped_area(&info);
11229
11230 /*
11231diff --git a/arch/um/Makefile b/arch/um/Makefile
11232index 133f7de..1d6f2f1 100644
11233--- a/arch/um/Makefile
11234+++ b/arch/um/Makefile
11235@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
11236 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
11237 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
11238
11239+ifdef CONSTIFY_PLUGIN
11240+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11241+endif
11242+
11243 #This will adjust *FLAGS accordingly to the platform.
11244 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
11245
11246diff --git a/arch/um/defconfig b/arch/um/defconfig
11247index 08107a7..ab22afe 100644
11248--- a/arch/um/defconfig
11249+++ b/arch/um/defconfig
11250@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
11251 CONFIG_X86_L1_CACHE_SHIFT=5
11252 CONFIG_X86_XADD=y
11253 CONFIG_X86_PPRO_FENCE=y
11254-CONFIG_X86_WP_WORKS_OK=y
11255 CONFIG_X86_INVLPG=y
11256 CONFIG_X86_BSWAP=y
11257 CONFIG_X86_POPAD_OK=y
11258diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
11259index 19e1bdd..3665b77 100644
11260--- a/arch/um/include/asm/cache.h
11261+++ b/arch/um/include/asm/cache.h
11262@@ -1,6 +1,7 @@
11263 #ifndef __UM_CACHE_H
11264 #define __UM_CACHE_H
11265
11266+#include <linux/const.h>
11267
11268 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
11269 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11270@@ -12,6 +13,6 @@
11271 # define L1_CACHE_SHIFT 5
11272 #endif
11273
11274-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11275+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11276
11277 #endif
11278diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
11279index 2e0a6b1..a64d0f5 100644
11280--- a/arch/um/include/asm/kmap_types.h
11281+++ b/arch/um/include/asm/kmap_types.h
11282@@ -8,6 +8,6 @@
11283
11284 /* No more #include "asm/arch/kmap_types.h" ! */
11285
11286-#define KM_TYPE_NR 14
11287+#define KM_TYPE_NR 15
11288
11289 #endif
11290diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
11291index 5ff53d9..5850cdf 100644
11292--- a/arch/um/include/asm/page.h
11293+++ b/arch/um/include/asm/page.h
11294@@ -14,6 +14,9 @@
11295 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
11296 #define PAGE_MASK (~(PAGE_SIZE-1))
11297
11298+#define ktla_ktva(addr) (addr)
11299+#define ktva_ktla(addr) (addr)
11300+
11301 #ifndef __ASSEMBLY__
11302
11303 struct page;
11304diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
11305index 0032f92..cd151e0 100644
11306--- a/arch/um/include/asm/pgtable-3level.h
11307+++ b/arch/um/include/asm/pgtable-3level.h
11308@@ -58,6 +58,7 @@
11309 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
11310 #define pud_populate(mm, pud, pmd) \
11311 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
11312+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
11313
11314 #ifdef CONFIG_64BIT
11315 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
11316diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
11317index bbcef52..6a2a483 100644
11318--- a/arch/um/kernel/process.c
11319+++ b/arch/um/kernel/process.c
11320@@ -367,22 +367,6 @@ int singlestepping(void * t)
11321 return 2;
11322 }
11323
11324-/*
11325- * Only x86 and x86_64 have an arch_align_stack().
11326- * All other arches have "#define arch_align_stack(x) (x)"
11327- * in their asm/system.h
11328- * As this is included in UML from asm-um/system-generic.h,
11329- * we can use it to behave as the subarch does.
11330- */
11331-#ifndef arch_align_stack
11332-unsigned long arch_align_stack(unsigned long sp)
11333-{
11334- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
11335- sp -= get_random_int() % 8192;
11336- return sp & ~0xf;
11337-}
11338-#endif
11339-
11340 unsigned long get_wchan(struct task_struct *p)
11341 {
11342 unsigned long stack_page, sp, ip;
11343diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
11344index ad8f795..2c7eec6 100644
11345--- a/arch/unicore32/include/asm/cache.h
11346+++ b/arch/unicore32/include/asm/cache.h
11347@@ -12,8 +12,10 @@
11348 #ifndef __UNICORE_CACHE_H__
11349 #define __UNICORE_CACHE_H__
11350
11351-#define L1_CACHE_SHIFT (5)
11352-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11353+#include <linux/const.h>
11354+
11355+#define L1_CACHE_SHIFT 5
11356+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11357
11358 /*
11359 * Memory returned by kmalloc() may be used for DMA, so we must make
11360diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
11361index fe120da..24177f7 100644
11362--- a/arch/x86/Kconfig
11363+++ b/arch/x86/Kconfig
11364@@ -239,7 +239,7 @@ config X86_HT
11365
11366 config X86_32_LAZY_GS
11367 def_bool y
11368- depends on X86_32 && !CC_STACKPROTECTOR
11369+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11370
11371 config ARCH_HWEIGHT_CFLAGS
11372 string
11373@@ -1073,6 +1073,7 @@ config MICROCODE_EARLY
11374
11375 config X86_MSR
11376 tristate "/dev/cpu/*/msr - Model-specific register support"
11377+ depends on !GRKERNSEC_KMEM
11378 ---help---
11379 This device gives privileged processes access to the x86
11380 Model-Specific Registers (MSRs). It is a character device with
11381@@ -1096,7 +1097,7 @@ choice
11382
11383 config NOHIGHMEM
11384 bool "off"
11385- depends on !X86_NUMAQ
11386+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11387 ---help---
11388 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11389 However, the address space of 32-bit x86 processors is only 4
11390@@ -1133,7 +1134,7 @@ config NOHIGHMEM
11391
11392 config HIGHMEM4G
11393 bool "4GB"
11394- depends on !X86_NUMAQ
11395+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11396 ---help---
11397 Select this if you have a 32-bit processor and between 1 and 4
11398 gigabytes of physical RAM.
11399@@ -1186,7 +1187,7 @@ config PAGE_OFFSET
11400 hex
11401 default 0xB0000000 if VMSPLIT_3G_OPT
11402 default 0x80000000 if VMSPLIT_2G
11403- default 0x78000000 if VMSPLIT_2G_OPT
11404+ default 0x70000000 if VMSPLIT_2G_OPT
11405 default 0x40000000 if VMSPLIT_1G
11406 default 0xC0000000
11407 depends on X86_32
11408@@ -1584,6 +1585,7 @@ config SECCOMP
11409
11410 config CC_STACKPROTECTOR
11411 bool "Enable -fstack-protector buffer overflow detection"
11412+ depends on X86_64 || !PAX_MEMORY_UDEREF
11413 ---help---
11414 This option turns on the -fstack-protector GCC feature. This
11415 feature puts, at the beginning of functions, a canary value on
11416@@ -1703,6 +1705,8 @@ config X86_NEED_RELOCS
11417 config PHYSICAL_ALIGN
11418 hex "Alignment value to which kernel should be aligned" if X86_32
11419 default "0x1000000"
11420+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
11421+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
11422 range 0x2000 0x1000000
11423 ---help---
11424 This value puts the alignment restrictions on physical address
11425@@ -1778,9 +1782,10 @@ config DEBUG_HOTPLUG_CPU0
11426 If unsure, say N.
11427
11428 config COMPAT_VDSO
11429- def_bool y
11430+ def_bool n
11431 prompt "Compat VDSO support"
11432 depends on X86_32 || IA32_EMULATION
11433+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
11434 ---help---
11435 Map the 32-bit VDSO to the predictable old-style address too.
11436
11437diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
11438index c026cca..14657ae 100644
11439--- a/arch/x86/Kconfig.cpu
11440+++ b/arch/x86/Kconfig.cpu
11441@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
11442
11443 config X86_F00F_BUG
11444 def_bool y
11445- depends on M586MMX || M586TSC || M586 || M486
11446+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
11447
11448 config X86_INVD_BUG
11449 def_bool y
11450@@ -327,7 +327,7 @@ config X86_INVD_BUG
11451
11452 config X86_ALIGNMENT_16
11453 def_bool y
11454- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11455+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11456
11457 config X86_INTEL_USERCOPY
11458 def_bool y
11459@@ -373,7 +373,7 @@ config X86_CMPXCHG64
11460 # generates cmov.
11461 config X86_CMOV
11462 def_bool y
11463- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11464+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11465
11466 config X86_MINIMUM_CPU_FAMILY
11467 int
11468diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
11469index c198b7e..63eea60 100644
11470--- a/arch/x86/Kconfig.debug
11471+++ b/arch/x86/Kconfig.debug
11472@@ -84,7 +84,7 @@ config X86_PTDUMP
11473 config DEBUG_RODATA
11474 bool "Write protect kernel read-only data structures"
11475 default y
11476- depends on DEBUG_KERNEL
11477+ depends on DEBUG_KERNEL && BROKEN
11478 ---help---
11479 Mark the kernel read-only data as write-protected in the pagetables,
11480 in order to catch accidental (and incorrect) writes to such const
11481@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
11482
11483 config DEBUG_SET_MODULE_RONX
11484 bool "Set loadable kernel module data as NX and text as RO"
11485- depends on MODULES
11486+ depends on MODULES && BROKEN
11487 ---help---
11488 This option helps catch unintended modifications to loadable
11489 kernel module's text and read-only data. It also prevents execution
11490diff --git a/arch/x86/Makefile b/arch/x86/Makefile
11491index 5c47726..8c4fa67 100644
11492--- a/arch/x86/Makefile
11493+++ b/arch/x86/Makefile
11494@@ -54,6 +54,7 @@ else
11495 UTS_MACHINE := x86_64
11496 CHECKFLAGS += -D__x86_64__ -m64
11497
11498+ biarch := $(call cc-option,-m64)
11499 KBUILD_AFLAGS += -m64
11500 KBUILD_CFLAGS += -m64
11501
11502@@ -234,3 +235,12 @@ define archhelp
11503 echo ' FDARGS="..." arguments for the booted kernel'
11504 echo ' FDINITRD=file initrd for the booted kernel'
11505 endef
11506+
11507+define OLD_LD
11508+
11509+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
11510+*** Please upgrade your binutils to 2.18 or newer
11511+endef
11512+
11513+archprepare:
11514+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
11515diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
11516index 379814b..add62ce 100644
11517--- a/arch/x86/boot/Makefile
11518+++ b/arch/x86/boot/Makefile
11519@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
11520 $(call cc-option, -fno-stack-protector) \
11521 $(call cc-option, -mpreferred-stack-boundary=2)
11522 KBUILD_CFLAGS += $(call cc-option, -m32)
11523+ifdef CONSTIFY_PLUGIN
11524+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11525+endif
11526 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11527 GCOV_PROFILE := n
11528
11529diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
11530index 878e4b9..20537ab 100644
11531--- a/arch/x86/boot/bitops.h
11532+++ b/arch/x86/boot/bitops.h
11533@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11534 u8 v;
11535 const u32 *p = (const u32 *)addr;
11536
11537- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11538+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11539 return v;
11540 }
11541
11542@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11543
11544 static inline void set_bit(int nr, void *addr)
11545 {
11546- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11547+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11548 }
11549
11550 #endif /* BOOT_BITOPS_H */
11551diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
11552index 5b75319..331a4ca 100644
11553--- a/arch/x86/boot/boot.h
11554+++ b/arch/x86/boot/boot.h
11555@@ -85,7 +85,7 @@ static inline void io_delay(void)
11556 static inline u16 ds(void)
11557 {
11558 u16 seg;
11559- asm("movw %%ds,%0" : "=rm" (seg));
11560+ asm volatile("movw %%ds,%0" : "=rm" (seg));
11561 return seg;
11562 }
11563
11564@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
11565 static inline int memcmp(const void *s1, const void *s2, size_t len)
11566 {
11567 u8 diff;
11568- asm("repe; cmpsb; setnz %0"
11569+ asm volatile("repe; cmpsb; setnz %0"
11570 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
11571 return diff;
11572 }
11573diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
11574index 5ef205c..342191d 100644
11575--- a/arch/x86/boot/compressed/Makefile
11576+++ b/arch/x86/boot/compressed/Makefile
11577@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
11578 KBUILD_CFLAGS += $(cflags-y)
11579 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
11580 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
11581+ifdef CONSTIFY_PLUGIN
11582+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11583+endif
11584
11585 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11586 GCOV_PROFILE := n
11587diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
11588index d606463..b887794 100644
11589--- a/arch/x86/boot/compressed/eboot.c
11590+++ b/arch/x86/boot/compressed/eboot.c
11591@@ -150,7 +150,6 @@ again:
11592 *addr = max_addr;
11593 }
11594
11595-free_pool:
11596 efi_call_phys1(sys_table->boottime->free_pool, map);
11597
11598 fail:
11599@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
11600 if (i == map_size / desc_size)
11601 status = EFI_NOT_FOUND;
11602
11603-free_pool:
11604 efi_call_phys1(sys_table->boottime->free_pool, map);
11605 fail:
11606 return status;
11607diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
11608index a53440e..c3dbf1e 100644
11609--- a/arch/x86/boot/compressed/efi_stub_32.S
11610+++ b/arch/x86/boot/compressed/efi_stub_32.S
11611@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
11612 * parameter 2, ..., param n. To make things easy, we save the return
11613 * address of efi_call_phys in a global variable.
11614 */
11615- popl %ecx
11616- movl %ecx, saved_return_addr(%edx)
11617- /* get the function pointer into ECX*/
11618- popl %ecx
11619- movl %ecx, efi_rt_function_ptr(%edx)
11620+ popl saved_return_addr(%edx)
11621+ popl efi_rt_function_ptr(%edx)
11622
11623 /*
11624 * 3. Call the physical function.
11625 */
11626- call *%ecx
11627+ call *efi_rt_function_ptr(%edx)
11628
11629 /*
11630 * 4. Balance the stack. And because EAX contain the return value,
11631@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
11632 1: popl %edx
11633 subl $1b, %edx
11634
11635- movl efi_rt_function_ptr(%edx), %ecx
11636- pushl %ecx
11637+ pushl efi_rt_function_ptr(%edx)
11638
11639 /*
11640 * 10. Push the saved return address onto the stack and return.
11641 */
11642- movl saved_return_addr(%edx), %ecx
11643- pushl %ecx
11644- ret
11645+ jmpl *saved_return_addr(%edx)
11646 ENDPROC(efi_call_phys)
11647 .previous
11648
11649diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
11650index 1e3184f..0d11e2e 100644
11651--- a/arch/x86/boot/compressed/head_32.S
11652+++ b/arch/x86/boot/compressed/head_32.S
11653@@ -118,7 +118,7 @@ preferred_addr:
11654 notl %eax
11655 andl %eax, %ebx
11656 #else
11657- movl $LOAD_PHYSICAL_ADDR, %ebx
11658+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11659 #endif
11660
11661 /* Target address to relocate to for decompression */
11662@@ -204,7 +204,7 @@ relocated:
11663 * and where it was actually loaded.
11664 */
11665 movl %ebp, %ebx
11666- subl $LOAD_PHYSICAL_ADDR, %ebx
11667+ subl $____LOAD_PHYSICAL_ADDR, %ebx
11668 jz 2f /* Nothing to be done if loaded at compiled addr. */
11669 /*
11670 * Process relocations.
11671@@ -212,8 +212,7 @@ relocated:
11672
11673 1: subl $4, %edi
11674 movl (%edi), %ecx
11675- testl %ecx, %ecx
11676- jz 2f
11677+ jecxz 2f
11678 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
11679 jmp 1b
11680 2:
11681diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
11682index 16f24e6..47491a3 100644
11683--- a/arch/x86/boot/compressed/head_64.S
11684+++ b/arch/x86/boot/compressed/head_64.S
11685@@ -97,7 +97,7 @@ ENTRY(startup_32)
11686 notl %eax
11687 andl %eax, %ebx
11688 #else
11689- movl $LOAD_PHYSICAL_ADDR, %ebx
11690+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11691 #endif
11692
11693 /* Target address to relocate to for decompression */
11694@@ -272,7 +272,7 @@ preferred_addr:
11695 notq %rax
11696 andq %rax, %rbp
11697 #else
11698- movq $LOAD_PHYSICAL_ADDR, %rbp
11699+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11700 #endif
11701
11702 /* Target address to relocate to for decompression */
11703@@ -363,8 +363,8 @@ gdt:
11704 .long gdt
11705 .word 0
11706 .quad 0x0000000000000000 /* NULL descriptor */
11707- .quad 0x00af9a000000ffff /* __KERNEL_CS */
11708- .quad 0x00cf92000000ffff /* __KERNEL_DS */
11709+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
11710+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
11711 .quad 0x0080890000000000 /* TS descriptor */
11712 .quad 0x0000000000000000 /* TS continued */
11713 gdt_end:
11714diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11715index 7cb56c6..d382d84 100644
11716--- a/arch/x86/boot/compressed/misc.c
11717+++ b/arch/x86/boot/compressed/misc.c
11718@@ -303,7 +303,7 @@ static void parse_elf(void *output)
11719 case PT_LOAD:
11720 #ifdef CONFIG_RELOCATABLE
11721 dest = output;
11722- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
11723+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
11724 #else
11725 dest = (void *)(phdr->p_paddr);
11726 #endif
11727@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
11728 error("Destination address too large");
11729 #endif
11730 #ifndef CONFIG_RELOCATABLE
11731- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
11732+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
11733 error("Wrong destination address");
11734 #endif
11735
11736diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
11737index 4d3ff03..e4972ff 100644
11738--- a/arch/x86/boot/cpucheck.c
11739+++ b/arch/x86/boot/cpucheck.c
11740@@ -74,7 +74,7 @@ static int has_fpu(void)
11741 u16 fcw = -1, fsw = -1;
11742 u32 cr0;
11743
11744- asm("movl %%cr0,%0" : "=r" (cr0));
11745+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
11746 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
11747 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
11748 asm volatile("movl %0,%%cr0" : : "r" (cr0));
11749@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
11750 {
11751 u32 f0, f1;
11752
11753- asm("pushfl ; "
11754+ asm volatile("pushfl ; "
11755 "pushfl ; "
11756 "popl %0 ; "
11757 "movl %0,%1 ; "
11758@@ -115,7 +115,7 @@ static void get_flags(void)
11759 set_bit(X86_FEATURE_FPU, cpu.flags);
11760
11761 if (has_eflag(X86_EFLAGS_ID)) {
11762- asm("cpuid"
11763+ asm volatile("cpuid"
11764 : "=a" (max_intel_level),
11765 "=b" (cpu_vendor[0]),
11766 "=d" (cpu_vendor[1]),
11767@@ -124,7 +124,7 @@ static void get_flags(void)
11768
11769 if (max_intel_level >= 0x00000001 &&
11770 max_intel_level <= 0x0000ffff) {
11771- asm("cpuid"
11772+ asm volatile("cpuid"
11773 : "=a" (tfms),
11774 "=c" (cpu.flags[4]),
11775 "=d" (cpu.flags[0])
11776@@ -136,7 +136,7 @@ static void get_flags(void)
11777 cpu.model += ((tfms >> 16) & 0xf) << 4;
11778 }
11779
11780- asm("cpuid"
11781+ asm volatile("cpuid"
11782 : "=a" (max_amd_level)
11783 : "a" (0x80000000)
11784 : "ebx", "ecx", "edx");
11785@@ -144,7 +144,7 @@ static void get_flags(void)
11786 if (max_amd_level >= 0x80000001 &&
11787 max_amd_level <= 0x8000ffff) {
11788 u32 eax = 0x80000001;
11789- asm("cpuid"
11790+ asm volatile("cpuid"
11791 : "+a" (eax),
11792 "=c" (cpu.flags[6]),
11793 "=d" (cpu.flags[1])
11794@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11795 u32 ecx = MSR_K7_HWCR;
11796 u32 eax, edx;
11797
11798- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11799+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11800 eax &= ~(1 << 15);
11801- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11802+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11803
11804 get_flags(); /* Make sure it really did something */
11805 err = check_flags();
11806@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11807 u32 ecx = MSR_VIA_FCR;
11808 u32 eax, edx;
11809
11810- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11811+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11812 eax |= (1<<1)|(1<<7);
11813- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11814+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11815
11816 set_bit(X86_FEATURE_CX8, cpu.flags);
11817 err = check_flags();
11818@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11819 u32 eax, edx;
11820 u32 level = 1;
11821
11822- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11823- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11824- asm("cpuid"
11825+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11826+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11827+ asm volatile("cpuid"
11828 : "+a" (level), "=d" (cpu.flags[0])
11829 : : "ecx", "ebx");
11830- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11831+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11832
11833 err = check_flags();
11834 }
11835diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
11836index 9ec06a1..2c25e79 100644
11837--- a/arch/x86/boot/header.S
11838+++ b/arch/x86/boot/header.S
11839@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
11840 # single linked list of
11841 # struct setup_data
11842
11843-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
11844+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
11845
11846 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
11847+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11848+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
11849+#else
11850 #define VO_INIT_SIZE (VO__end - VO__text)
11851+#endif
11852 #if ZO_INIT_SIZE > VO_INIT_SIZE
11853 #define INIT_SIZE ZO_INIT_SIZE
11854 #else
11855diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
11856index db75d07..8e6d0af 100644
11857--- a/arch/x86/boot/memory.c
11858+++ b/arch/x86/boot/memory.c
11859@@ -19,7 +19,7 @@
11860
11861 static int detect_memory_e820(void)
11862 {
11863- int count = 0;
11864+ unsigned int count = 0;
11865 struct biosregs ireg, oreg;
11866 struct e820entry *desc = boot_params.e820_map;
11867 static struct e820entry buf; /* static so it is zeroed */
11868diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
11869index 11e8c6e..fdbb1ed 100644
11870--- a/arch/x86/boot/video-vesa.c
11871+++ b/arch/x86/boot/video-vesa.c
11872@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
11873
11874 boot_params.screen_info.vesapm_seg = oreg.es;
11875 boot_params.screen_info.vesapm_off = oreg.di;
11876+ boot_params.screen_info.vesapm_size = oreg.cx;
11877 }
11878
11879 /*
11880diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
11881index 43eda28..5ab5fdb 100644
11882--- a/arch/x86/boot/video.c
11883+++ b/arch/x86/boot/video.c
11884@@ -96,7 +96,7 @@ static void store_mode_params(void)
11885 static unsigned int get_entry(void)
11886 {
11887 char entry_buf[4];
11888- int i, len = 0;
11889+ unsigned int i, len = 0;
11890 int key;
11891 unsigned int v;
11892
11893diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
11894index 9105655..5e37f27 100644
11895--- a/arch/x86/crypto/aes-x86_64-asm_64.S
11896+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
11897@@ -8,6 +8,8 @@
11898 * including this sentence is retained in full.
11899 */
11900
11901+#include <asm/alternative-asm.h>
11902+
11903 .extern crypto_ft_tab
11904 .extern crypto_it_tab
11905 .extern crypto_fl_tab
11906@@ -70,6 +72,8 @@
11907 je B192; \
11908 leaq 32(r9),r9;
11909
11910+#define ret pax_force_retaddr 0, 1; ret
11911+
11912 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
11913 movq r1,r2; \
11914 movq r3,r4; \
11915diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
11916index 477e9d7..3ab339f 100644
11917--- a/arch/x86/crypto/aesni-intel_asm.S
11918+++ b/arch/x86/crypto/aesni-intel_asm.S
11919@@ -31,6 +31,7 @@
11920
11921 #include <linux/linkage.h>
11922 #include <asm/inst.h>
11923+#include <asm/alternative-asm.h>
11924
11925 #ifdef __x86_64__
11926 .data
11927@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
11928 pop %r14
11929 pop %r13
11930 pop %r12
11931+ pax_force_retaddr 0, 1
11932 ret
11933 ENDPROC(aesni_gcm_dec)
11934
11935@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
11936 pop %r14
11937 pop %r13
11938 pop %r12
11939+ pax_force_retaddr 0, 1
11940 ret
11941 ENDPROC(aesni_gcm_enc)
11942
11943@@ -1722,6 +1725,7 @@ _key_expansion_256a:
11944 pxor %xmm1, %xmm0
11945 movaps %xmm0, (TKEYP)
11946 add $0x10, TKEYP
11947+ pax_force_retaddr_bts
11948 ret
11949 ENDPROC(_key_expansion_128)
11950 ENDPROC(_key_expansion_256a)
11951@@ -1748,6 +1752,7 @@ _key_expansion_192a:
11952 shufps $0b01001110, %xmm2, %xmm1
11953 movaps %xmm1, 0x10(TKEYP)
11954 add $0x20, TKEYP
11955+ pax_force_retaddr_bts
11956 ret
11957 ENDPROC(_key_expansion_192a)
11958
11959@@ -1768,6 +1773,7 @@ _key_expansion_192b:
11960
11961 movaps %xmm0, (TKEYP)
11962 add $0x10, TKEYP
11963+ pax_force_retaddr_bts
11964 ret
11965 ENDPROC(_key_expansion_192b)
11966
11967@@ -1781,6 +1787,7 @@ _key_expansion_256b:
11968 pxor %xmm1, %xmm2
11969 movaps %xmm2, (TKEYP)
11970 add $0x10, TKEYP
11971+ pax_force_retaddr_bts
11972 ret
11973 ENDPROC(_key_expansion_256b)
11974
11975@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
11976 #ifndef __x86_64__
11977 popl KEYP
11978 #endif
11979+ pax_force_retaddr 0, 1
11980 ret
11981 ENDPROC(aesni_set_key)
11982
11983@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
11984 popl KLEN
11985 popl KEYP
11986 #endif
11987+ pax_force_retaddr 0, 1
11988 ret
11989 ENDPROC(aesni_enc)
11990
11991@@ -1974,6 +1983,7 @@ _aesni_enc1:
11992 AESENC KEY STATE
11993 movaps 0x70(TKEYP), KEY
11994 AESENCLAST KEY STATE
11995+ pax_force_retaddr_bts
11996 ret
11997 ENDPROC(_aesni_enc1)
11998
11999@@ -2083,6 +2093,7 @@ _aesni_enc4:
12000 AESENCLAST KEY STATE2
12001 AESENCLAST KEY STATE3
12002 AESENCLAST KEY STATE4
12003+ pax_force_retaddr_bts
12004 ret
12005 ENDPROC(_aesni_enc4)
12006
12007@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
12008 popl KLEN
12009 popl KEYP
12010 #endif
12011+ pax_force_retaddr 0, 1
12012 ret
12013 ENDPROC(aesni_dec)
12014
12015@@ -2164,6 +2176,7 @@ _aesni_dec1:
12016 AESDEC KEY STATE
12017 movaps 0x70(TKEYP), KEY
12018 AESDECLAST KEY STATE
12019+ pax_force_retaddr_bts
12020 ret
12021 ENDPROC(_aesni_dec1)
12022
12023@@ -2273,6 +2286,7 @@ _aesni_dec4:
12024 AESDECLAST KEY STATE2
12025 AESDECLAST KEY STATE3
12026 AESDECLAST KEY STATE4
12027+ pax_force_retaddr_bts
12028 ret
12029 ENDPROC(_aesni_dec4)
12030
12031@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
12032 popl KEYP
12033 popl LEN
12034 #endif
12035+ pax_force_retaddr 0, 1
12036 ret
12037 ENDPROC(aesni_ecb_enc)
12038
12039@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
12040 popl KEYP
12041 popl LEN
12042 #endif
12043+ pax_force_retaddr 0, 1
12044 ret
12045 ENDPROC(aesni_ecb_dec)
12046
12047@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
12048 popl LEN
12049 popl IVP
12050 #endif
12051+ pax_force_retaddr 0, 1
12052 ret
12053 ENDPROC(aesni_cbc_enc)
12054
12055@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
12056 popl LEN
12057 popl IVP
12058 #endif
12059+ pax_force_retaddr 0, 1
12060 ret
12061 ENDPROC(aesni_cbc_dec)
12062
12063@@ -2550,6 +2568,7 @@ _aesni_inc_init:
12064 mov $1, TCTR_LOW
12065 MOVQ_R64_XMM TCTR_LOW INC
12066 MOVQ_R64_XMM CTR TCTR_LOW
12067+ pax_force_retaddr_bts
12068 ret
12069 ENDPROC(_aesni_inc_init)
12070
12071@@ -2579,6 +2598,7 @@ _aesni_inc:
12072 .Linc_low:
12073 movaps CTR, IV
12074 PSHUFB_XMM BSWAP_MASK IV
12075+ pax_force_retaddr_bts
12076 ret
12077 ENDPROC(_aesni_inc)
12078
12079@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
12080 .Lctr_enc_ret:
12081 movups IV, (IVP)
12082 .Lctr_enc_just_ret:
12083+ pax_force_retaddr 0, 1
12084 ret
12085 ENDPROC(aesni_ctr_enc)
12086
12087@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
12088 pxor INC, STATE4
12089 movdqu STATE4, 0x70(OUTP)
12090
12091+ pax_force_retaddr 0, 1
12092 ret
12093 ENDPROC(aesni_xts_crypt8)
12094
12095diff --git a/arch/x86/crypto/blowfish-avx2-asm_64.S b/arch/x86/crypto/blowfish-avx2-asm_64.S
12096index 784452e..46982c7 100644
12097--- a/arch/x86/crypto/blowfish-avx2-asm_64.S
12098+++ b/arch/x86/crypto/blowfish-avx2-asm_64.S
12099@@ -221,6 +221,7 @@ __blowfish_enc_blk32:
12100
12101 write_block(RXl, RXr);
12102
12103+ pax_force_retaddr 0, 1
12104 ret;
12105 ENDPROC(__blowfish_enc_blk32)
12106
12107@@ -250,6 +251,7 @@ __blowfish_dec_blk32:
12108
12109 write_block(RXl, RXr);
12110
12111+ pax_force_retaddr 0, 1
12112 ret;
12113 ENDPROC(__blowfish_dec_blk32)
12114
12115@@ -284,6 +286,7 @@ ENTRY(blowfish_ecb_enc_32way)
12116
12117 vzeroupper;
12118
12119+ pax_force_retaddr 0, 1
12120 ret;
12121 ENDPROC(blowfish_ecb_enc_32way)
12122
12123@@ -318,6 +321,7 @@ ENTRY(blowfish_ecb_dec_32way)
12124
12125 vzeroupper;
12126
12127+ pax_force_retaddr 0, 1
12128 ret;
12129 ENDPROC(blowfish_ecb_dec_32way)
12130
12131@@ -365,6 +369,7 @@ ENTRY(blowfish_cbc_dec_32way)
12132
12133 vzeroupper;
12134
12135+ pax_force_retaddr 0, 1
12136 ret;
12137 ENDPROC(blowfish_cbc_dec_32way)
12138
12139@@ -445,5 +450,6 @@ ENTRY(blowfish_ctr_32way)
12140
12141 vzeroupper;
12142
12143+ pax_force_retaddr 0, 1
12144 ret;
12145 ENDPROC(blowfish_ctr_32way)
12146diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12147index 246c670..4d1ed00 100644
12148--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
12149+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12150@@ -21,6 +21,7 @@
12151 */
12152
12153 #include <linux/linkage.h>
12154+#include <asm/alternative-asm.h>
12155
12156 .file "blowfish-x86_64-asm.S"
12157 .text
12158@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
12159 jnz .L__enc_xor;
12160
12161 write_block();
12162+ pax_force_retaddr 0, 1
12163 ret;
12164 .L__enc_xor:
12165 xor_block();
12166+ pax_force_retaddr 0, 1
12167 ret;
12168 ENDPROC(__blowfish_enc_blk)
12169
12170@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
12171
12172 movq %r11, %rbp;
12173
12174+ pax_force_retaddr 0, 1
12175 ret;
12176 ENDPROC(blowfish_dec_blk)
12177
12178@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
12179
12180 popq %rbx;
12181 popq %rbp;
12182+ pax_force_retaddr 0, 1
12183 ret;
12184
12185 .L__enc_xor4:
12186@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
12187
12188 popq %rbx;
12189 popq %rbp;
12190+ pax_force_retaddr 0, 1
12191 ret;
12192 ENDPROC(__blowfish_enc_blk_4way)
12193
12194@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
12195 popq %rbx;
12196 popq %rbp;
12197
12198+ pax_force_retaddr 0, 1
12199 ret;
12200 ENDPROC(blowfish_dec_blk_4way)
12201diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12202index ce71f92..2dd5b1e 100644
12203--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12204+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12205@@ -16,6 +16,7 @@
12206 */
12207
12208 #include <linux/linkage.h>
12209+#include <asm/alternative-asm.h>
12210
12211 #define CAMELLIA_TABLE_BYTE_LEN 272
12212
12213@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12214 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
12215 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
12216 %rcx, (%r9));
12217+ pax_force_retaddr_bts
12218 ret;
12219 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12220
12221@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12222 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
12223 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
12224 %rax, (%r9));
12225+ pax_force_retaddr_bts
12226 ret;
12227 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12228
12229@@ -780,6 +783,7 @@ __camellia_enc_blk16:
12230 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12231 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
12232
12233+ pax_force_retaddr_bts
12234 ret;
12235
12236 .align 8
12237@@ -865,6 +869,7 @@ __camellia_dec_blk16:
12238 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12239 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
12240
12241+ pax_force_retaddr_bts
12242 ret;
12243
12244 .align 8
12245@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
12246 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12247 %xmm8, %rsi);
12248
12249+ pax_force_retaddr 0, 1
12250 ret;
12251 ENDPROC(camellia_ecb_enc_16way)
12252
12253@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
12254 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12255 %xmm8, %rsi);
12256
12257+ pax_force_retaddr 0, 1
12258 ret;
12259 ENDPROC(camellia_ecb_dec_16way)
12260
12261@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
12262 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12263 %xmm8, %rsi);
12264
12265+ pax_force_retaddr 0, 1
12266 ret;
12267 ENDPROC(camellia_cbc_dec_16way)
12268
12269@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
12270 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12271 %xmm8, %rsi);
12272
12273+ pax_force_retaddr 0, 1
12274 ret;
12275 ENDPROC(camellia_ctr_16way)
12276
12277@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
12278 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12279 %xmm8, %rsi);
12280
12281+ pax_force_retaddr 0, 1
12282 ret;
12283 ENDPROC(camellia_xts_crypt_16way)
12284
12285diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12286index 91a1878..bcf340a 100644
12287--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12288+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12289@@ -11,6 +11,7 @@
12290 */
12291
12292 #include <linux/linkage.h>
12293+#include <asm/alternative-asm.h>
12294
12295 #define CAMELLIA_TABLE_BYTE_LEN 272
12296
12297@@ -212,6 +213,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12298 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
12299 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
12300 %rcx, (%r9));
12301+ pax_force_retaddr_bts
12302 ret;
12303 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12304
12305@@ -220,6 +222,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12306 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
12307 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
12308 %rax, (%r9));
12309+ pax_force_retaddr_bts
12310 ret;
12311 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12312
12313@@ -802,6 +805,7 @@ __camellia_enc_blk32:
12314 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12315 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
12316
12317+ pax_force_retaddr_bts
12318 ret;
12319
12320 .align 8
12321@@ -887,6 +891,7 @@ __camellia_dec_blk32:
12322 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12323 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
12324
12325+ pax_force_retaddr_bts
12326 ret;
12327
12328 .align 8
12329@@ -930,6 +935,7 @@ ENTRY(camellia_ecb_enc_32way)
12330
12331 vzeroupper;
12332
12333+ pax_force_retaddr 0, 1
12334 ret;
12335 ENDPROC(camellia_ecb_enc_32way)
12336
12337@@ -962,6 +968,7 @@ ENTRY(camellia_ecb_dec_32way)
12338
12339 vzeroupper;
12340
12341+ pax_force_retaddr 0, 1
12342 ret;
12343 ENDPROC(camellia_ecb_dec_32way)
12344
12345@@ -1028,6 +1035,7 @@ ENTRY(camellia_cbc_dec_32way)
12346
12347 vzeroupper;
12348
12349+ pax_force_retaddr 0, 1
12350 ret;
12351 ENDPROC(camellia_cbc_dec_32way)
12352
12353@@ -1166,6 +1174,7 @@ ENTRY(camellia_ctr_32way)
12354
12355 vzeroupper;
12356
12357+ pax_force_retaddr 0, 1
12358 ret;
12359 ENDPROC(camellia_ctr_32way)
12360
12361@@ -1331,6 +1340,7 @@ camellia_xts_crypt_32way:
12362
12363 vzeroupper;
12364
12365+ pax_force_retaddr 0, 1
12366 ret;
12367 ENDPROC(camellia_xts_crypt_32way)
12368
12369diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
12370index 310319c..ce174a4 100644
12371--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
12372+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
12373@@ -21,6 +21,7 @@
12374 */
12375
12376 #include <linux/linkage.h>
12377+#include <asm/alternative-asm.h>
12378
12379 .file "camellia-x86_64-asm_64.S"
12380 .text
12381@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
12382 enc_outunpack(mov, RT1);
12383
12384 movq RRBP, %rbp;
12385+ pax_force_retaddr 0, 1
12386 ret;
12387
12388 .L__enc_xor:
12389 enc_outunpack(xor, RT1);
12390
12391 movq RRBP, %rbp;
12392+ pax_force_retaddr 0, 1
12393 ret;
12394 ENDPROC(__camellia_enc_blk)
12395
12396@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
12397 dec_outunpack();
12398
12399 movq RRBP, %rbp;
12400+ pax_force_retaddr 0, 1
12401 ret;
12402 ENDPROC(camellia_dec_blk)
12403
12404@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
12405
12406 movq RRBP, %rbp;
12407 popq %rbx;
12408+ pax_force_retaddr 0, 1
12409 ret;
12410
12411 .L__enc2_xor:
12412@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
12413
12414 movq RRBP, %rbp;
12415 popq %rbx;
12416+ pax_force_retaddr 0, 1
12417 ret;
12418 ENDPROC(__camellia_enc_blk_2way)
12419
12420@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
12421
12422 movq RRBP, %rbp;
12423 movq RXOR, %rbx;
12424+ pax_force_retaddr 0, 1
12425 ret;
12426 ENDPROC(camellia_dec_blk_2way)
12427diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12428index c35fd5d..c1ee236 100644
12429--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12430+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12431@@ -24,6 +24,7 @@
12432 */
12433
12434 #include <linux/linkage.h>
12435+#include <asm/alternative-asm.h>
12436
12437 .file "cast5-avx-x86_64-asm_64.S"
12438
12439@@ -281,6 +282,7 @@ __cast5_enc_blk16:
12440 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12441 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12442
12443+ pax_force_retaddr 0, 1
12444 ret;
12445 ENDPROC(__cast5_enc_blk16)
12446
12447@@ -352,6 +354,7 @@ __cast5_dec_blk16:
12448 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12449 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12450
12451+ pax_force_retaddr 0, 1
12452 ret;
12453
12454 .L__skip_dec:
12455@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
12456 vmovdqu RR4, (6*4*4)(%r11);
12457 vmovdqu RL4, (7*4*4)(%r11);
12458
12459+ pax_force_retaddr
12460 ret;
12461 ENDPROC(cast5_ecb_enc_16way)
12462
12463@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
12464 vmovdqu RR4, (6*4*4)(%r11);
12465 vmovdqu RL4, (7*4*4)(%r11);
12466
12467+ pax_force_retaddr
12468 ret;
12469 ENDPROC(cast5_ecb_dec_16way)
12470
12471@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
12472
12473 popq %r12;
12474
12475+ pax_force_retaddr
12476 ret;
12477 ENDPROC(cast5_cbc_dec_16way)
12478
12479@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
12480
12481 popq %r12;
12482
12483+ pax_force_retaddr
12484 ret;
12485 ENDPROC(cast5_ctr_16way)
12486diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12487index e3531f8..18ded3a 100644
12488--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12489+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
12490@@ -24,6 +24,7 @@
12491 */
12492
12493 #include <linux/linkage.h>
12494+#include <asm/alternative-asm.h>
12495 #include "glue_helper-asm-avx.S"
12496
12497 .file "cast6-avx-x86_64-asm_64.S"
12498@@ -295,6 +296,7 @@ __cast6_enc_blk8:
12499 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
12500 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
12501
12502+ pax_force_retaddr 0, 1
12503 ret;
12504 ENDPROC(__cast6_enc_blk8)
12505
12506@@ -340,6 +342,7 @@ __cast6_dec_blk8:
12507 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
12508 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
12509
12510+ pax_force_retaddr 0, 1
12511 ret;
12512 ENDPROC(__cast6_dec_blk8)
12513
12514@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
12515
12516 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12517
12518+ pax_force_retaddr
12519 ret;
12520 ENDPROC(cast6_ecb_enc_8way)
12521
12522@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
12523
12524 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12525
12526+ pax_force_retaddr
12527 ret;
12528 ENDPROC(cast6_ecb_dec_8way)
12529
12530@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
12531
12532 popq %r12;
12533
12534+ pax_force_retaddr
12535 ret;
12536 ENDPROC(cast6_cbc_dec_8way)
12537
12538@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
12539
12540 popq %r12;
12541
12542+ pax_force_retaddr
12543 ret;
12544 ENDPROC(cast6_ctr_8way)
12545
12546@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
12547 /* dst <= regs xor IVs(in dst) */
12548 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12549
12550+ pax_force_retaddr
12551 ret;
12552 ENDPROC(cast6_xts_enc_8way)
12553
12554@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
12555 /* dst <= regs xor IVs(in dst) */
12556 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12557
12558+ pax_force_retaddr
12559 ret;
12560 ENDPROC(cast6_xts_dec_8way)
12561diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12562index dbc4339..3d868c5 100644
12563--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12564+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
12565@@ -45,6 +45,7 @@
12566
12567 #include <asm/inst.h>
12568 #include <linux/linkage.h>
12569+#include <asm/alternative-asm.h>
12570
12571 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
12572
12573@@ -312,6 +313,7 @@ do_return:
12574 popq %rsi
12575 popq %rdi
12576 popq %rbx
12577+ pax_force_retaddr 0, 1
12578 ret
12579
12580 ################################################################
12581diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
12582index 586f41a..d02851e 100644
12583--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
12584+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
12585@@ -18,6 +18,7 @@
12586
12587 #include <linux/linkage.h>
12588 #include <asm/inst.h>
12589+#include <asm/alternative-asm.h>
12590
12591 .data
12592
12593@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
12594 psrlq $1, T2
12595 pxor T2, T1
12596 pxor T1, DATA
12597+ pax_force_retaddr
12598 ret
12599 ENDPROC(__clmul_gf128mul_ble)
12600
12601@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
12602 call __clmul_gf128mul_ble
12603 PSHUFB_XMM BSWAP DATA
12604 movups DATA, (%rdi)
12605+ pax_force_retaddr
12606 ret
12607 ENDPROC(clmul_ghash_mul)
12608
12609@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
12610 PSHUFB_XMM BSWAP DATA
12611 movups DATA, (%rdi)
12612 .Lupdate_just_ret:
12613+ pax_force_retaddr
12614 ret
12615 ENDPROC(clmul_ghash_update)
12616
12617@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
12618 pand .Lpoly, %xmm1
12619 pxor %xmm1, %xmm0
12620 movups %xmm0, (%rdi)
12621+ pax_force_retaddr
12622 ret
12623 ENDPROC(clmul_ghash_setkey)
12624diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
12625index 9279e0b..9270820 100644
12626--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
12627+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
12628@@ -1,4 +1,5 @@
12629 #include <linux/linkage.h>
12630+#include <asm/alternative-asm.h>
12631
12632 # enter salsa20_encrypt_bytes
12633 ENTRY(salsa20_encrypt_bytes)
12634@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
12635 add %r11,%rsp
12636 mov %rdi,%rax
12637 mov %rsi,%rdx
12638+ pax_force_retaddr 0, 1
12639 ret
12640 # bytesatleast65:
12641 ._bytesatleast65:
12642@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
12643 add %r11,%rsp
12644 mov %rdi,%rax
12645 mov %rsi,%rdx
12646+ pax_force_retaddr
12647 ret
12648 ENDPROC(salsa20_keysetup)
12649
12650@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
12651 add %r11,%rsp
12652 mov %rdi,%rax
12653 mov %rsi,%rdx
12654+ pax_force_retaddr
12655 ret
12656 ENDPROC(salsa20_ivsetup)
12657diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12658index 2f202f4..d9164d6 100644
12659--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12660+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
12661@@ -24,6 +24,7 @@
12662 */
12663
12664 #include <linux/linkage.h>
12665+#include <asm/alternative-asm.h>
12666 #include "glue_helper-asm-avx.S"
12667
12668 .file "serpent-avx-x86_64-asm_64.S"
12669@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
12670 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12671 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12672
12673+ pax_force_retaddr
12674 ret;
12675 ENDPROC(__serpent_enc_blk8_avx)
12676
12677@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
12678 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12679 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12680
12681+ pax_force_retaddr
12682 ret;
12683 ENDPROC(__serpent_dec_blk8_avx)
12684
12685@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
12686
12687 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12688
12689+ pax_force_retaddr
12690 ret;
12691 ENDPROC(serpent_ecb_enc_8way_avx)
12692
12693@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
12694
12695 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12696
12697+ pax_force_retaddr
12698 ret;
12699 ENDPROC(serpent_ecb_dec_8way_avx)
12700
12701@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
12702
12703 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12704
12705+ pax_force_retaddr
12706 ret;
12707 ENDPROC(serpent_cbc_dec_8way_avx)
12708
12709@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
12710
12711 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12712
12713+ pax_force_retaddr
12714 ret;
12715 ENDPROC(serpent_ctr_8way_avx)
12716
12717@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
12718 /* dst <= regs xor IVs(in dst) */
12719 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12720
12721+ pax_force_retaddr
12722 ret;
12723 ENDPROC(serpent_xts_enc_8way_avx)
12724
12725@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
12726 /* dst <= regs xor IVs(in dst) */
12727 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12728
12729+ pax_force_retaddr
12730 ret;
12731 ENDPROC(serpent_xts_dec_8way_avx)
12732diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
12733index b222085..abd483c 100644
12734--- a/arch/x86/crypto/serpent-avx2-asm_64.S
12735+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
12736@@ -15,6 +15,7 @@
12737 */
12738
12739 #include <linux/linkage.h>
12740+#include <asm/alternative-asm.h>
12741 #include "glue_helper-asm-avx2.S"
12742
12743 .file "serpent-avx2-asm_64.S"
12744@@ -610,6 +611,7 @@ __serpent_enc_blk16:
12745 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12746 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12747
12748+ pax_force_retaddr
12749 ret;
12750 ENDPROC(__serpent_enc_blk16)
12751
12752@@ -664,6 +666,7 @@ __serpent_dec_blk16:
12753 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12754 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12755
12756+ pax_force_retaddr
12757 ret;
12758 ENDPROC(__serpent_dec_blk16)
12759
12760@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
12761
12762 vzeroupper;
12763
12764+ pax_force_retaddr
12765 ret;
12766 ENDPROC(serpent_ecb_enc_16way)
12767
12768@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
12769
12770 vzeroupper;
12771
12772+ pax_force_retaddr
12773 ret;
12774 ENDPROC(serpent_ecb_dec_16way)
12775
12776@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
12777
12778 vzeroupper;
12779
12780+ pax_force_retaddr
12781 ret;
12782 ENDPROC(serpent_cbc_dec_16way)
12783
12784@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
12785
12786 vzeroupper;
12787
12788+ pax_force_retaddr
12789 ret;
12790 ENDPROC(serpent_ctr_16way)
12791
12792@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
12793
12794 vzeroupper;
12795
12796+ pax_force_retaddr
12797 ret;
12798 ENDPROC(serpent_xts_enc_16way)
12799
12800@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
12801
12802 vzeroupper;
12803
12804+ pax_force_retaddr
12805 ret;
12806 ENDPROC(serpent_xts_dec_16way)
12807diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12808index acc066c..1559cc4 100644
12809--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12810+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12811@@ -25,6 +25,7 @@
12812 */
12813
12814 #include <linux/linkage.h>
12815+#include <asm/alternative-asm.h>
12816
12817 .file "serpent-sse2-x86_64-asm_64.S"
12818 .text
12819@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
12820 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12821 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12822
12823+ pax_force_retaddr
12824 ret;
12825
12826 .L__enc_xor8:
12827 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12828 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12829
12830+ pax_force_retaddr
12831 ret;
12832 ENDPROC(__serpent_enc_blk_8way)
12833
12834@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
12835 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12836 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12837
12838+ pax_force_retaddr
12839 ret;
12840 ENDPROC(serpent_dec_blk_8way)
12841diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
12842index a410950..3356d42 100644
12843--- a/arch/x86/crypto/sha1_ssse3_asm.S
12844+++ b/arch/x86/crypto/sha1_ssse3_asm.S
12845@@ -29,6 +29,7 @@
12846 */
12847
12848 #include <linux/linkage.h>
12849+#include <asm/alternative-asm.h>
12850
12851 #define CTX %rdi // arg1
12852 #define BUF %rsi // arg2
12853@@ -104,6 +105,7 @@
12854 pop %r12
12855 pop %rbp
12856 pop %rbx
12857+ pax_force_retaddr 0, 1
12858 ret
12859
12860 ENDPROC(\name)
12861diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
12862index 642f156..4ab07b9 100644
12863--- a/arch/x86/crypto/sha256-avx-asm.S
12864+++ b/arch/x86/crypto/sha256-avx-asm.S
12865@@ -49,6 +49,7 @@
12866
12867 #ifdef CONFIG_AS_AVX
12868 #include <linux/linkage.h>
12869+#include <asm/alternative-asm.h>
12870
12871 ## assume buffers not aligned
12872 #define VMOVDQ vmovdqu
12873@@ -460,6 +461,7 @@ done_hash:
12874 popq %r13
12875 popq %rbp
12876 popq %rbx
12877+ pax_force_retaddr 0, 1
12878 ret
12879 ENDPROC(sha256_transform_avx)
12880
12881diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
12882index 9e86944..2e7f95a 100644
12883--- a/arch/x86/crypto/sha256-avx2-asm.S
12884+++ b/arch/x86/crypto/sha256-avx2-asm.S
12885@@ -50,6 +50,7 @@
12886
12887 #ifdef CONFIG_AS_AVX2
12888 #include <linux/linkage.h>
12889+#include <asm/alternative-asm.h>
12890
12891 ## assume buffers not aligned
12892 #define VMOVDQ vmovdqu
12893@@ -720,6 +721,7 @@ done_hash:
12894 popq %r12
12895 popq %rbp
12896 popq %rbx
12897+ pax_force_retaddr 0, 1
12898 ret
12899 ENDPROC(sha256_transform_rorx)
12900
12901diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
12902index f833b74..c36ed14 100644
12903--- a/arch/x86/crypto/sha256-ssse3-asm.S
12904+++ b/arch/x86/crypto/sha256-ssse3-asm.S
12905@@ -47,6 +47,7 @@
12906 ########################################################################
12907
12908 #include <linux/linkage.h>
12909+#include <asm/alternative-asm.h>
12910
12911 ## assume buffers not aligned
12912 #define MOVDQ movdqu
12913@@ -471,6 +472,7 @@ done_hash:
12914 popq %rbp
12915 popq %rbx
12916
12917+ pax_force_retaddr 0, 1
12918 ret
12919 ENDPROC(sha256_transform_ssse3)
12920
12921diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
12922index 974dde9..4533d34 100644
12923--- a/arch/x86/crypto/sha512-avx-asm.S
12924+++ b/arch/x86/crypto/sha512-avx-asm.S
12925@@ -49,6 +49,7 @@
12926
12927 #ifdef CONFIG_AS_AVX
12928 #include <linux/linkage.h>
12929+#include <asm/alternative-asm.h>
12930
12931 .text
12932
12933@@ -364,6 +365,7 @@ updateblock:
12934 mov frame_RSPSAVE(%rsp), %rsp
12935
12936 nowork:
12937+ pax_force_retaddr 0, 1
12938 ret
12939 ENDPROC(sha512_transform_avx)
12940
12941diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
12942index 568b961..061ef1d 100644
12943--- a/arch/x86/crypto/sha512-avx2-asm.S
12944+++ b/arch/x86/crypto/sha512-avx2-asm.S
12945@@ -51,6 +51,7 @@
12946
12947 #ifdef CONFIG_AS_AVX2
12948 #include <linux/linkage.h>
12949+#include <asm/alternative-asm.h>
12950
12951 .text
12952
12953@@ -678,6 +679,7 @@ done_hash:
12954
12955 # Restore Stack Pointer
12956 mov frame_RSPSAVE(%rsp), %rsp
12957+ pax_force_retaddr 0, 1
12958 ret
12959 ENDPROC(sha512_transform_rorx)
12960
12961diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
12962index fb56855..e23914f 100644
12963--- a/arch/x86/crypto/sha512-ssse3-asm.S
12964+++ b/arch/x86/crypto/sha512-ssse3-asm.S
12965@@ -48,6 +48,7 @@
12966 ########################################################################
12967
12968 #include <linux/linkage.h>
12969+#include <asm/alternative-asm.h>
12970
12971 .text
12972
12973@@ -363,6 +364,7 @@ updateblock:
12974 mov frame_RSPSAVE(%rsp), %rsp
12975
12976 nowork:
12977+ pax_force_retaddr 0, 1
12978 ret
12979 ENDPROC(sha512_transform_ssse3)
12980
12981diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12982index 0505813..63b1d00 100644
12983--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12984+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12985@@ -24,6 +24,7 @@
12986 */
12987
12988 #include <linux/linkage.h>
12989+#include <asm/alternative-asm.h>
12990 #include "glue_helper-asm-avx.S"
12991
12992 .file "twofish-avx-x86_64-asm_64.S"
12993@@ -284,6 +285,7 @@ __twofish_enc_blk8:
12994 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
12995 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
12996
12997+ pax_force_retaddr 0, 1
12998 ret;
12999 ENDPROC(__twofish_enc_blk8)
13000
13001@@ -324,6 +326,7 @@ __twofish_dec_blk8:
13002 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
13003 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
13004
13005+ pax_force_retaddr 0, 1
13006 ret;
13007 ENDPROC(__twofish_dec_blk8)
13008
13009@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
13010
13011 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13012
13013+ pax_force_retaddr 0, 1
13014 ret;
13015 ENDPROC(twofish_ecb_enc_8way)
13016
13017@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
13018
13019 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13020
13021+ pax_force_retaddr 0, 1
13022 ret;
13023 ENDPROC(twofish_ecb_dec_8way)
13024
13025@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
13026
13027 popq %r12;
13028
13029+ pax_force_retaddr 0, 1
13030 ret;
13031 ENDPROC(twofish_cbc_dec_8way)
13032
13033@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
13034
13035 popq %r12;
13036
13037+ pax_force_retaddr 0, 1
13038 ret;
13039 ENDPROC(twofish_ctr_8way)
13040
13041@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
13042 /* dst <= regs xor IVs(in dst) */
13043 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13044
13045+ pax_force_retaddr 0, 1
13046 ret;
13047 ENDPROC(twofish_xts_enc_8way)
13048
13049@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
13050 /* dst <= regs xor IVs(in dst) */
13051 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13052
13053+ pax_force_retaddr 0, 1
13054 ret;
13055 ENDPROC(twofish_xts_dec_8way)
13056diff --git a/arch/x86/crypto/twofish-avx2-asm_64.S b/arch/x86/crypto/twofish-avx2-asm_64.S
13057index e1a83b9..33006b9 100644
13058--- a/arch/x86/crypto/twofish-avx2-asm_64.S
13059+++ b/arch/x86/crypto/twofish-avx2-asm_64.S
13060@@ -11,6 +11,7 @@
13061 */
13062
13063 #include <linux/linkage.h>
13064+#include <asm/alternative-asm.h>
13065 #include "glue_helper-asm-avx2.S"
13066
13067 .file "twofish-avx2-asm_64.S"
13068@@ -422,6 +423,7 @@ __twofish_enc_blk16:
13069 outunpack_enc16(RA, RB, RC, RD);
13070 write_blocks16(RA, RB, RC, RD);
13071
13072+ pax_force_retaddr_bts
13073 ret;
13074 ENDPROC(__twofish_enc_blk16)
13075
13076@@ -454,6 +456,7 @@ __twofish_dec_blk16:
13077 outunpack_dec16(RA, RB, RC, RD);
13078 write_blocks16(RA, RB, RC, RD);
13079
13080+ pax_force_retaddr_bts
13081 ret;
13082 ENDPROC(__twofish_dec_blk16)
13083
13084@@ -476,6 +479,7 @@ ENTRY(twofish_ecb_enc_16way)
13085 popq %r12;
13086 vzeroupper;
13087
13088+ pax_force_retaddr 0, 1
13089 ret;
13090 ENDPROC(twofish_ecb_enc_16way)
13091
13092@@ -498,6 +502,7 @@ ENTRY(twofish_ecb_dec_16way)
13093 popq %r12;
13094 vzeroupper;
13095
13096+ pax_force_retaddr 0, 1
13097 ret;
13098 ENDPROC(twofish_ecb_dec_16way)
13099
13100@@ -521,6 +526,7 @@ ENTRY(twofish_cbc_dec_16way)
13101 popq %r12;
13102 vzeroupper;
13103
13104+ pax_force_retaddr 0, 1
13105 ret;
13106 ENDPROC(twofish_cbc_dec_16way)
13107
13108@@ -546,6 +552,7 @@ ENTRY(twofish_ctr_16way)
13109 popq %r12;
13110 vzeroupper;
13111
13112+ pax_force_retaddr 0, 1
13113 ret;
13114 ENDPROC(twofish_ctr_16way)
13115
13116@@ -574,6 +581,7 @@ twofish_xts_crypt_16way:
13117 popq %r12;
13118 vzeroupper;
13119
13120+ pax_force_retaddr 0, 1
13121 ret;
13122 ENDPROC(twofish_xts_crypt_16way)
13123
13124diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13125index 1c3b7ce..b365c5e 100644
13126--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13127+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13128@@ -21,6 +21,7 @@
13129 */
13130
13131 #include <linux/linkage.h>
13132+#include <asm/alternative-asm.h>
13133
13134 .file "twofish-x86_64-asm-3way.S"
13135 .text
13136@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
13137 popq %r13;
13138 popq %r14;
13139 popq %r15;
13140+ pax_force_retaddr 0, 1
13141 ret;
13142
13143 .L__enc_xor3:
13144@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
13145 popq %r13;
13146 popq %r14;
13147 popq %r15;
13148+ pax_force_retaddr 0, 1
13149 ret;
13150 ENDPROC(__twofish_enc_blk_3way)
13151
13152@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
13153 popq %r13;
13154 popq %r14;
13155 popq %r15;
13156+ pax_force_retaddr 0, 1
13157 ret;
13158 ENDPROC(twofish_dec_blk_3way)
13159diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
13160index a039d21..29e7615 100644
13161--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
13162+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
13163@@ -22,6 +22,7 @@
13164
13165 #include <linux/linkage.h>
13166 #include <asm/asm-offsets.h>
13167+#include <asm/alternative-asm.h>
13168
13169 #define a_offset 0
13170 #define b_offset 4
13171@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
13172
13173 popq R1
13174 movq $1,%rax
13175+ pax_force_retaddr 0, 1
13176 ret
13177 ENDPROC(twofish_enc_blk)
13178
13179@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
13180
13181 popq R1
13182 movq $1,%rax
13183+ pax_force_retaddr 0, 1
13184 ret
13185 ENDPROC(twofish_dec_blk)
13186diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
13187index 52ff81c..98af645 100644
13188--- a/arch/x86/ia32/ia32_aout.c
13189+++ b/arch/x86/ia32/ia32_aout.c
13190@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
13191 unsigned long dump_start, dump_size;
13192 struct user32 dump;
13193
13194+ memset(&dump, 0, sizeof(dump));
13195+
13196 fs = get_fs();
13197 set_fs(KERNEL_DS);
13198 has_dumped = 1;
13199diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
13200index cf1a471..5ba2673 100644
13201--- a/arch/x86/ia32/ia32_signal.c
13202+++ b/arch/x86/ia32/ia32_signal.c
13203@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
13204 sp -= frame_size;
13205 /* Align the stack pointer according to the i386 ABI,
13206 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
13207- sp = ((sp + 4) & -16ul) - 4;
13208+ sp = ((sp - 12) & -16ul) - 4;
13209 return (void __user *) sp;
13210 }
13211
13212@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13213 * These are actually not used anymore, but left because some
13214 * gdb versions depend on them as a marker.
13215 */
13216- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13217+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13218 } put_user_catch(err);
13219
13220 if (err)
13221@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13222 0xb8,
13223 __NR_ia32_rt_sigreturn,
13224 0x80cd,
13225- 0,
13226+ 0
13227 };
13228
13229 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
13230@@ -459,20 +459,22 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13231 else
13232 put_user_ex(0, &frame->uc.uc_flags);
13233 put_user_ex(0, &frame->uc.uc_link);
13234- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
13235+ __compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
13236
13237 if (ksig->ka.sa.sa_flags & SA_RESTORER)
13238 restorer = ksig->ka.sa.sa_restorer;
13239+ else if (current->mm->context.vdso)
13240+ /* Return stub is in 32bit vsyscall page */
13241+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13242 else
13243- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13244- rt_sigreturn);
13245+ restorer = &frame->retcode;
13246 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
13247
13248 /*
13249 * Not actually used anymore, but left because some gdb
13250 * versions need it.
13251 */
13252- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13253+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13254 } put_user_catch(err);
13255
13256 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
13257diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
13258index 474dc1b..9297c58 100644
13259--- a/arch/x86/ia32/ia32entry.S
13260+++ b/arch/x86/ia32/ia32entry.S
13261@@ -15,8 +15,10 @@
13262 #include <asm/irqflags.h>
13263 #include <asm/asm.h>
13264 #include <asm/smap.h>
13265+#include <asm/pgtable.h>
13266 #include <linux/linkage.h>
13267 #include <linux/err.h>
13268+#include <asm/alternative-asm.h>
13269
13270 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13271 #include <linux/elf-em.h>
13272@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
13273 ENDPROC(native_irq_enable_sysexit)
13274 #endif
13275
13276+ .macro pax_enter_kernel_user
13277+ pax_set_fptr_mask
13278+#ifdef CONFIG_PAX_MEMORY_UDEREF
13279+ call pax_enter_kernel_user
13280+#endif
13281+ .endm
13282+
13283+ .macro pax_exit_kernel_user
13284+#ifdef CONFIG_PAX_MEMORY_UDEREF
13285+ call pax_exit_kernel_user
13286+#endif
13287+#ifdef CONFIG_PAX_RANDKSTACK
13288+ pushq %rax
13289+ pushq %r11
13290+ call pax_randomize_kstack
13291+ popq %r11
13292+ popq %rax
13293+#endif
13294+ .endm
13295+
13296+ .macro pax_erase_kstack
13297+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13298+ call pax_erase_kstack
13299+#endif
13300+ .endm
13301+
13302 /*
13303 * 32bit SYSENTER instruction entry.
13304 *
13305@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
13306 CFI_REGISTER rsp,rbp
13307 SWAPGS_UNSAFE_STACK
13308 movq PER_CPU_VAR(kernel_stack), %rsp
13309- addq $(KERNEL_STACK_OFFSET),%rsp
13310- /*
13311- * No need to follow this irqs on/off section: the syscall
13312- * disabled irqs, here we enable it straight after entry:
13313- */
13314- ENABLE_INTERRUPTS(CLBR_NONE)
13315 movl %ebp,%ebp /* zero extension */
13316 pushq_cfi $__USER32_DS
13317 /*CFI_REL_OFFSET ss,0*/
13318@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
13319 CFI_REL_OFFSET rsp,0
13320 pushfq_cfi
13321 /*CFI_REL_OFFSET rflags,0*/
13322- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
13323- CFI_REGISTER rip,r10
13324+ orl $X86_EFLAGS_IF,(%rsp)
13325+ GET_THREAD_INFO(%r11)
13326+ movl TI_sysenter_return(%r11), %r11d
13327+ CFI_REGISTER rip,r11
13328 pushq_cfi $__USER32_CS
13329 /*CFI_REL_OFFSET cs,0*/
13330 movl %eax, %eax
13331- pushq_cfi %r10
13332+ pushq_cfi %r11
13333 CFI_REL_OFFSET rip,0
13334 pushq_cfi %rax
13335 cld
13336 SAVE_ARGS 0,1,0
13337+ pax_enter_kernel_user
13338+
13339+#ifdef CONFIG_PAX_RANDKSTACK
13340+ pax_erase_kstack
13341+#endif
13342+
13343+ /*
13344+ * No need to follow this irqs on/off section: the syscall
13345+ * disabled irqs, here we enable it straight after entry:
13346+ */
13347+ ENABLE_INTERRUPTS(CLBR_NONE)
13348 /* no need to do an access_ok check here because rbp has been
13349 32bit zero extended */
13350+
13351+#ifdef CONFIG_PAX_MEMORY_UDEREF
13352+ addq pax_user_shadow_base,%rbp
13353+ ASM_PAX_OPEN_USERLAND
13354+#endif
13355+
13356 ASM_STAC
13357 1: movl (%rbp),%ebp
13358 _ASM_EXTABLE(1b,ia32_badarg)
13359 ASM_CLAC
13360- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13361- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13362+
13363+#ifdef CONFIG_PAX_MEMORY_UDEREF
13364+ ASM_PAX_CLOSE_USERLAND
13365+#endif
13366+
13367+ GET_THREAD_INFO(%r11)
13368+ orl $TS_COMPAT,TI_status(%r11)
13369+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13370 CFI_REMEMBER_STATE
13371 jnz sysenter_tracesys
13372 cmpq $(IA32_NR_syscalls-1),%rax
13373@@ -162,12 +209,15 @@ sysenter_do_call:
13374 sysenter_dispatch:
13375 call *ia32_sys_call_table(,%rax,8)
13376 movq %rax,RAX-ARGOFFSET(%rsp)
13377+ GET_THREAD_INFO(%r11)
13378 DISABLE_INTERRUPTS(CLBR_NONE)
13379 TRACE_IRQS_OFF
13380- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13381+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13382 jnz sysexit_audit
13383 sysexit_from_sys_call:
13384- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13385+ pax_exit_kernel_user
13386+ pax_erase_kstack
13387+ andl $~TS_COMPAT,TI_status(%r11)
13388 /* clear IF, that popfq doesn't enable interrupts early */
13389 andl $~0x200,EFLAGS-R11(%rsp)
13390 movl RIP-R11(%rsp),%edx /* User %eip */
13391@@ -193,6 +243,9 @@ sysexit_from_sys_call:
13392 movl %eax,%esi /* 2nd arg: syscall number */
13393 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
13394 call __audit_syscall_entry
13395+
13396+ pax_erase_kstack
13397+
13398 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
13399 cmpq $(IA32_NR_syscalls-1),%rax
13400 ja ia32_badsys
13401@@ -204,7 +257,7 @@ sysexit_from_sys_call:
13402 .endm
13403
13404 .macro auditsys_exit exit
13405- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13406+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13407 jnz ia32_ret_from_sys_call
13408 TRACE_IRQS_ON
13409 ENABLE_INTERRUPTS(CLBR_NONE)
13410@@ -215,11 +268,12 @@ sysexit_from_sys_call:
13411 1: setbe %al /* 1 if error, 0 if not */
13412 movzbl %al,%edi /* zero-extend that into %edi */
13413 call __audit_syscall_exit
13414+ GET_THREAD_INFO(%r11)
13415 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
13416 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
13417 DISABLE_INTERRUPTS(CLBR_NONE)
13418 TRACE_IRQS_OFF
13419- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13420+ testl %edi,TI_flags(%r11)
13421 jz \exit
13422 CLEAR_RREGS -ARGOFFSET
13423 jmp int_with_check
13424@@ -237,7 +291,7 @@ sysexit_audit:
13425
13426 sysenter_tracesys:
13427 #ifdef CONFIG_AUDITSYSCALL
13428- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13429+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13430 jz sysenter_auditsys
13431 #endif
13432 SAVE_REST
13433@@ -249,6 +303,9 @@ sysenter_tracesys:
13434 RESTORE_REST
13435 cmpq $(IA32_NR_syscalls-1),%rax
13436 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
13437+
13438+ pax_erase_kstack
13439+
13440 jmp sysenter_do_call
13441 CFI_ENDPROC
13442 ENDPROC(ia32_sysenter_target)
13443@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
13444 ENTRY(ia32_cstar_target)
13445 CFI_STARTPROC32 simple
13446 CFI_SIGNAL_FRAME
13447- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13448+ CFI_DEF_CFA rsp,0
13449 CFI_REGISTER rip,rcx
13450 /*CFI_REGISTER rflags,r11*/
13451 SWAPGS_UNSAFE_STACK
13452 movl %esp,%r8d
13453 CFI_REGISTER rsp,r8
13454 movq PER_CPU_VAR(kernel_stack),%rsp
13455+ SAVE_ARGS 8*6,0,0
13456+ pax_enter_kernel_user
13457+
13458+#ifdef CONFIG_PAX_RANDKSTACK
13459+ pax_erase_kstack
13460+#endif
13461+
13462 /*
13463 * No need to follow this irqs on/off section: the syscall
13464 * disabled irqs and here we enable it straight after entry:
13465 */
13466 ENABLE_INTERRUPTS(CLBR_NONE)
13467- SAVE_ARGS 8,0,0
13468 movl %eax,%eax /* zero extension */
13469 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13470 movq %rcx,RIP-ARGOFFSET(%rsp)
13471@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
13472 /* no need to do an access_ok check here because r8 has been
13473 32bit zero extended */
13474 /* hardware stack frame is complete now */
13475+
13476+#ifdef CONFIG_PAX_MEMORY_UDEREF
13477+ ASM_PAX_OPEN_USERLAND
13478+ movq pax_user_shadow_base,%r8
13479+ addq RSP-ARGOFFSET(%rsp),%r8
13480+#endif
13481+
13482 ASM_STAC
13483 1: movl (%r8),%r9d
13484 _ASM_EXTABLE(1b,ia32_badarg)
13485 ASM_CLAC
13486- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13487- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13488+
13489+#ifdef CONFIG_PAX_MEMORY_UDEREF
13490+ ASM_PAX_CLOSE_USERLAND
13491+#endif
13492+
13493+ GET_THREAD_INFO(%r11)
13494+ orl $TS_COMPAT,TI_status(%r11)
13495+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13496 CFI_REMEMBER_STATE
13497 jnz cstar_tracesys
13498 cmpq $IA32_NR_syscalls-1,%rax
13499@@ -319,12 +395,15 @@ cstar_do_call:
13500 cstar_dispatch:
13501 call *ia32_sys_call_table(,%rax,8)
13502 movq %rax,RAX-ARGOFFSET(%rsp)
13503+ GET_THREAD_INFO(%r11)
13504 DISABLE_INTERRUPTS(CLBR_NONE)
13505 TRACE_IRQS_OFF
13506- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13507+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13508 jnz sysretl_audit
13509 sysretl_from_sys_call:
13510- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13511+ pax_exit_kernel_user
13512+ pax_erase_kstack
13513+ andl $~TS_COMPAT,TI_status(%r11)
13514 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
13515 movl RIP-ARGOFFSET(%rsp),%ecx
13516 CFI_REGISTER rip,rcx
13517@@ -352,7 +431,7 @@ sysretl_audit:
13518
13519 cstar_tracesys:
13520 #ifdef CONFIG_AUDITSYSCALL
13521- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13522+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13523 jz cstar_auditsys
13524 #endif
13525 xchgl %r9d,%ebp
13526@@ -366,11 +445,19 @@ cstar_tracesys:
13527 xchgl %ebp,%r9d
13528 cmpq $(IA32_NR_syscalls-1),%rax
13529 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
13530+
13531+ pax_erase_kstack
13532+
13533 jmp cstar_do_call
13534 END(ia32_cstar_target)
13535
13536 ia32_badarg:
13537 ASM_CLAC
13538+
13539+#ifdef CONFIG_PAX_MEMORY_UDEREF
13540+ ASM_PAX_CLOSE_USERLAND
13541+#endif
13542+
13543 movq $-EFAULT,%rax
13544 jmp ia32_sysret
13545 CFI_ENDPROC
13546@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
13547 CFI_REL_OFFSET rip,RIP-RIP
13548 PARAVIRT_ADJUST_EXCEPTION_FRAME
13549 SWAPGS
13550- /*
13551- * No need to follow this irqs on/off section: the syscall
13552- * disabled irqs and here we enable it straight after entry:
13553- */
13554- ENABLE_INTERRUPTS(CLBR_NONE)
13555 movl %eax,%eax
13556 pushq_cfi %rax
13557 cld
13558 /* note the registers are not zero extended to the sf.
13559 this could be a problem. */
13560 SAVE_ARGS 0,1,0
13561- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13562- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13563+ pax_enter_kernel_user
13564+
13565+#ifdef CONFIG_PAX_RANDKSTACK
13566+ pax_erase_kstack
13567+#endif
13568+
13569+ /*
13570+ * No need to follow this irqs on/off section: the syscall
13571+ * disabled irqs and here we enable it straight after entry:
13572+ */
13573+ ENABLE_INTERRUPTS(CLBR_NONE)
13574+ GET_THREAD_INFO(%r11)
13575+ orl $TS_COMPAT,TI_status(%r11)
13576+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13577 jnz ia32_tracesys
13578 cmpq $(IA32_NR_syscalls-1),%rax
13579 ja ia32_badsys
13580@@ -442,6 +536,9 @@ ia32_tracesys:
13581 RESTORE_REST
13582 cmpq $(IA32_NR_syscalls-1),%rax
13583 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
13584+
13585+ pax_erase_kstack
13586+
13587 jmp ia32_do_call
13588 END(ia32_syscall)
13589
13590diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
13591index 8e0ceec..af13504 100644
13592--- a/arch/x86/ia32/sys_ia32.c
13593+++ b/arch/x86/ia32/sys_ia32.c
13594@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
13595 */
13596 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
13597 {
13598- typeof(ubuf->st_uid) uid = 0;
13599- typeof(ubuf->st_gid) gid = 0;
13600+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
13601+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
13602 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
13603 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
13604 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
13605diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
13606index 372231c..a5aa1a1 100644
13607--- a/arch/x86/include/asm/alternative-asm.h
13608+++ b/arch/x86/include/asm/alternative-asm.h
13609@@ -18,6 +18,45 @@
13610 .endm
13611 #endif
13612
13613+#ifdef KERNEXEC_PLUGIN
13614+ .macro pax_force_retaddr_bts rip=0
13615+ btsq $63,\rip(%rsp)
13616+ .endm
13617+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
13618+ .macro pax_force_retaddr rip=0, reload=0
13619+ btsq $63,\rip(%rsp)
13620+ .endm
13621+ .macro pax_force_fptr ptr
13622+ btsq $63,\ptr
13623+ .endm
13624+ .macro pax_set_fptr_mask
13625+ .endm
13626+#endif
13627+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
13628+ .macro pax_force_retaddr rip=0, reload=0
13629+ .if \reload
13630+ pax_set_fptr_mask
13631+ .endif
13632+ orq %r10,\rip(%rsp)
13633+ .endm
13634+ .macro pax_force_fptr ptr
13635+ orq %r10,\ptr
13636+ .endm
13637+ .macro pax_set_fptr_mask
13638+ movabs $0x8000000000000000,%r10
13639+ .endm
13640+#endif
13641+#else
13642+ .macro pax_force_retaddr rip=0, reload=0
13643+ .endm
13644+ .macro pax_force_fptr ptr
13645+ .endm
13646+ .macro pax_force_retaddr_bts rip=0
13647+ .endm
13648+ .macro pax_set_fptr_mask
13649+ .endm
13650+#endif
13651+
13652 .macro altinstruction_entry orig alt feature orig_len alt_len
13653 .long \orig - .
13654 .long \alt - .
13655diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
13656index 58ed6d9..f1cbe58 100644
13657--- a/arch/x86/include/asm/alternative.h
13658+++ b/arch/x86/include/asm/alternative.h
13659@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
13660 ".pushsection .discard,\"aw\",@progbits\n" \
13661 DISCARD_ENTRY(1) \
13662 ".popsection\n" \
13663- ".pushsection .altinstr_replacement, \"ax\"\n" \
13664+ ".pushsection .altinstr_replacement, \"a\"\n" \
13665 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
13666 ".popsection"
13667
13668@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
13669 DISCARD_ENTRY(1) \
13670 DISCARD_ENTRY(2) \
13671 ".popsection\n" \
13672- ".pushsection .altinstr_replacement, \"ax\"\n" \
13673+ ".pushsection .altinstr_replacement, \"a\"\n" \
13674 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
13675 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
13676 ".popsection"
13677diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
13678index 3388034..050f0b9 100644
13679--- a/arch/x86/include/asm/apic.h
13680+++ b/arch/x86/include/asm/apic.h
13681@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
13682
13683 #ifdef CONFIG_X86_LOCAL_APIC
13684
13685-extern unsigned int apic_verbosity;
13686+extern int apic_verbosity;
13687 extern int local_apic_timer_c2_ok;
13688
13689 extern int disable_apic;
13690diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
13691index 20370c6..a2eb9b0 100644
13692--- a/arch/x86/include/asm/apm.h
13693+++ b/arch/x86/include/asm/apm.h
13694@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
13695 __asm__ __volatile__(APM_DO_ZERO_SEGS
13696 "pushl %%edi\n\t"
13697 "pushl %%ebp\n\t"
13698- "lcall *%%cs:apm_bios_entry\n\t"
13699+ "lcall *%%ss:apm_bios_entry\n\t"
13700 "setc %%al\n\t"
13701 "popl %%ebp\n\t"
13702 "popl %%edi\n\t"
13703@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
13704 __asm__ __volatile__(APM_DO_ZERO_SEGS
13705 "pushl %%edi\n\t"
13706 "pushl %%ebp\n\t"
13707- "lcall *%%cs:apm_bios_entry\n\t"
13708+ "lcall *%%ss:apm_bios_entry\n\t"
13709 "setc %%bl\n\t"
13710 "popl %%ebp\n\t"
13711 "popl %%edi\n\t"
13712diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
13713index 722aa3b..3a0bb27 100644
13714--- a/arch/x86/include/asm/atomic.h
13715+++ b/arch/x86/include/asm/atomic.h
13716@@ -22,7 +22,18 @@
13717 */
13718 static inline int atomic_read(const atomic_t *v)
13719 {
13720- return (*(volatile int *)&(v)->counter);
13721+ return (*(volatile const int *)&(v)->counter);
13722+}
13723+
13724+/**
13725+ * atomic_read_unchecked - read atomic variable
13726+ * @v: pointer of type atomic_unchecked_t
13727+ *
13728+ * Atomically reads the value of @v.
13729+ */
13730+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
13731+{
13732+ return (*(volatile const int *)&(v)->counter);
13733 }
13734
13735 /**
13736@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
13737 }
13738
13739 /**
13740+ * atomic_set_unchecked - set atomic variable
13741+ * @v: pointer of type atomic_unchecked_t
13742+ * @i: required value
13743+ *
13744+ * Atomically sets the value of @v to @i.
13745+ */
13746+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
13747+{
13748+ v->counter = i;
13749+}
13750+
13751+/**
13752 * atomic_add - add integer to atomic variable
13753 * @i: integer value to add
13754 * @v: pointer of type atomic_t
13755@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
13756 */
13757 static inline void atomic_add(int i, atomic_t *v)
13758 {
13759- asm volatile(LOCK_PREFIX "addl %1,%0"
13760+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13761+
13762+#ifdef CONFIG_PAX_REFCOUNT
13763+ "jno 0f\n"
13764+ LOCK_PREFIX "subl %1,%0\n"
13765+ "int $4\n0:\n"
13766+ _ASM_EXTABLE(0b, 0b)
13767+#endif
13768+
13769+ : "+m" (v->counter)
13770+ : "ir" (i));
13771+}
13772+
13773+/**
13774+ * atomic_add_unchecked - add integer to atomic variable
13775+ * @i: integer value to add
13776+ * @v: pointer of type atomic_unchecked_t
13777+ *
13778+ * Atomically adds @i to @v.
13779+ */
13780+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
13781+{
13782+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13783 : "+m" (v->counter)
13784 : "ir" (i));
13785 }
13786@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
13787 */
13788 static inline void atomic_sub(int i, atomic_t *v)
13789 {
13790- asm volatile(LOCK_PREFIX "subl %1,%0"
13791+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13792+
13793+#ifdef CONFIG_PAX_REFCOUNT
13794+ "jno 0f\n"
13795+ LOCK_PREFIX "addl %1,%0\n"
13796+ "int $4\n0:\n"
13797+ _ASM_EXTABLE(0b, 0b)
13798+#endif
13799+
13800+ : "+m" (v->counter)
13801+ : "ir" (i));
13802+}
13803+
13804+/**
13805+ * atomic_sub_unchecked - subtract integer from atomic variable
13806+ * @i: integer value to subtract
13807+ * @v: pointer of type atomic_unchecked_t
13808+ *
13809+ * Atomically subtracts @i from @v.
13810+ */
13811+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
13812+{
13813+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13814 : "+m" (v->counter)
13815 : "ir" (i));
13816 }
13817@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13818 {
13819 unsigned char c;
13820
13821- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
13822+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
13823+
13824+#ifdef CONFIG_PAX_REFCOUNT
13825+ "jno 0f\n"
13826+ LOCK_PREFIX "addl %2,%0\n"
13827+ "int $4\n0:\n"
13828+ _ASM_EXTABLE(0b, 0b)
13829+#endif
13830+
13831+ "sete %1\n"
13832 : "+m" (v->counter), "=qm" (c)
13833 : "ir" (i) : "memory");
13834 return c;
13835@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13836 */
13837 static inline void atomic_inc(atomic_t *v)
13838 {
13839- asm volatile(LOCK_PREFIX "incl %0"
13840+ asm volatile(LOCK_PREFIX "incl %0\n"
13841+
13842+#ifdef CONFIG_PAX_REFCOUNT
13843+ "jno 0f\n"
13844+ LOCK_PREFIX "decl %0\n"
13845+ "int $4\n0:\n"
13846+ _ASM_EXTABLE(0b, 0b)
13847+#endif
13848+
13849+ : "+m" (v->counter));
13850+}
13851+
13852+/**
13853+ * atomic_inc_unchecked - increment atomic variable
13854+ * @v: pointer of type atomic_unchecked_t
13855+ *
13856+ * Atomically increments @v by 1.
13857+ */
13858+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
13859+{
13860+ asm volatile(LOCK_PREFIX "incl %0\n"
13861 : "+m" (v->counter));
13862 }
13863
13864@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
13865 */
13866 static inline void atomic_dec(atomic_t *v)
13867 {
13868- asm volatile(LOCK_PREFIX "decl %0"
13869+ asm volatile(LOCK_PREFIX "decl %0\n"
13870+
13871+#ifdef CONFIG_PAX_REFCOUNT
13872+ "jno 0f\n"
13873+ LOCK_PREFIX "incl %0\n"
13874+ "int $4\n0:\n"
13875+ _ASM_EXTABLE(0b, 0b)
13876+#endif
13877+
13878+ : "+m" (v->counter));
13879+}
13880+
13881+/**
13882+ * atomic_dec_unchecked - decrement atomic variable
13883+ * @v: pointer of type atomic_unchecked_t
13884+ *
13885+ * Atomically decrements @v by 1.
13886+ */
13887+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
13888+{
13889+ asm volatile(LOCK_PREFIX "decl %0\n"
13890 : "+m" (v->counter));
13891 }
13892
13893@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
13894 {
13895 unsigned char c;
13896
13897- asm volatile(LOCK_PREFIX "decl %0; sete %1"
13898+ asm volatile(LOCK_PREFIX "decl %0\n"
13899+
13900+#ifdef CONFIG_PAX_REFCOUNT
13901+ "jno 0f\n"
13902+ LOCK_PREFIX "incl %0\n"
13903+ "int $4\n0:\n"
13904+ _ASM_EXTABLE(0b, 0b)
13905+#endif
13906+
13907+ "sete %1\n"
13908 : "+m" (v->counter), "=qm" (c)
13909 : : "memory");
13910 return c != 0;
13911@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
13912 {
13913 unsigned char c;
13914
13915- asm volatile(LOCK_PREFIX "incl %0; sete %1"
13916+ asm volatile(LOCK_PREFIX "incl %0\n"
13917+
13918+#ifdef CONFIG_PAX_REFCOUNT
13919+ "jno 0f\n"
13920+ LOCK_PREFIX "decl %0\n"
13921+ "int $4\n0:\n"
13922+ _ASM_EXTABLE(0b, 0b)
13923+#endif
13924+
13925+ "sete %1\n"
13926+ : "+m" (v->counter), "=qm" (c)
13927+ : : "memory");
13928+ return c != 0;
13929+}
13930+
13931+/**
13932+ * atomic_inc_and_test_unchecked - increment and test
13933+ * @v: pointer of type atomic_unchecked_t
13934+ *
13935+ * Atomically increments @v by 1
13936+ * and returns true if the result is zero, or false for all
13937+ * other cases.
13938+ */
13939+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
13940+{
13941+ unsigned char c;
13942+
13943+ asm volatile(LOCK_PREFIX "incl %0\n"
13944+ "sete %1\n"
13945 : "+m" (v->counter), "=qm" (c)
13946 : : "memory");
13947 return c != 0;
13948@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13949 {
13950 unsigned char c;
13951
13952- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
13953+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
13954+
13955+#ifdef CONFIG_PAX_REFCOUNT
13956+ "jno 0f\n"
13957+ LOCK_PREFIX "subl %2,%0\n"
13958+ "int $4\n0:\n"
13959+ _ASM_EXTABLE(0b, 0b)
13960+#endif
13961+
13962+ "sets %1\n"
13963 : "+m" (v->counter), "=qm" (c)
13964 : "ir" (i) : "memory");
13965 return c;
13966@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13967 */
13968 static inline int atomic_add_return(int i, atomic_t *v)
13969 {
13970+ return i + xadd_check_overflow(&v->counter, i);
13971+}
13972+
13973+/**
13974+ * atomic_add_return_unchecked - add integer and return
13975+ * @i: integer value to add
13976+ * @v: pointer of type atomic_unchecked_t
13977+ *
13978+ * Atomically adds @i to @v and returns @i + @v
13979+ */
13980+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
13981+{
13982 return i + xadd(&v->counter, i);
13983 }
13984
13985@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
13986 }
13987
13988 #define atomic_inc_return(v) (atomic_add_return(1, v))
13989+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
13990+{
13991+ return atomic_add_return_unchecked(1, v);
13992+}
13993 #define atomic_dec_return(v) (atomic_sub_return(1, v))
13994
13995 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13996@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13997 return cmpxchg(&v->counter, old, new);
13998 }
13999
14000+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
14001+{
14002+ return cmpxchg(&v->counter, old, new);
14003+}
14004+
14005 static inline int atomic_xchg(atomic_t *v, int new)
14006 {
14007 return xchg(&v->counter, new);
14008 }
14009
14010+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
14011+{
14012+ return xchg(&v->counter, new);
14013+}
14014+
14015 /**
14016 * __atomic_add_unless - add unless the number is already a given value
14017 * @v: pointer of type atomic_t
14018@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
14019 */
14020 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14021 {
14022- int c, old;
14023+ int c, old, new;
14024 c = atomic_read(v);
14025 for (;;) {
14026- if (unlikely(c == (u)))
14027+ if (unlikely(c == u))
14028 break;
14029- old = atomic_cmpxchg((v), c, c + (a));
14030+
14031+ asm volatile("addl %2,%0\n"
14032+
14033+#ifdef CONFIG_PAX_REFCOUNT
14034+ "jno 0f\n"
14035+ "subl %2,%0\n"
14036+ "int $4\n0:\n"
14037+ _ASM_EXTABLE(0b, 0b)
14038+#endif
14039+
14040+ : "=r" (new)
14041+ : "0" (c), "ir" (a));
14042+
14043+ old = atomic_cmpxchg(v, c, new);
14044 if (likely(old == c))
14045 break;
14046 c = old;
14047@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14048 }
14049
14050 /**
14051+ * atomic_inc_not_zero_hint - increment if not null
14052+ * @v: pointer of type atomic_t
14053+ * @hint: probable value of the atomic before the increment
14054+ *
14055+ * This version of atomic_inc_not_zero() gives a hint of probable
14056+ * value of the atomic. This helps processor to not read the memory
14057+ * before doing the atomic read/modify/write cycle, lowering
14058+ * number of bus transactions on some arches.
14059+ *
14060+ * Returns: 0 if increment was not done, 1 otherwise.
14061+ */
14062+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
14063+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
14064+{
14065+ int val, c = hint, new;
14066+
14067+ /* sanity test, should be removed by compiler if hint is a constant */
14068+ if (!hint)
14069+ return __atomic_add_unless(v, 1, 0);
14070+
14071+ do {
14072+ asm volatile("incl %0\n"
14073+
14074+#ifdef CONFIG_PAX_REFCOUNT
14075+ "jno 0f\n"
14076+ "decl %0\n"
14077+ "int $4\n0:\n"
14078+ _ASM_EXTABLE(0b, 0b)
14079+#endif
14080+
14081+ : "=r" (new)
14082+ : "0" (c));
14083+
14084+ val = atomic_cmpxchg(v, c, new);
14085+ if (val == c)
14086+ return 1;
14087+ c = val;
14088+ } while (c);
14089+
14090+ return 0;
14091+}
14092+
14093+/**
14094 * atomic_inc_short - increment of a short integer
14095 * @v: pointer to type int
14096 *
14097@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
14098 #endif
14099
14100 /* These are x86-specific, used by some header files */
14101-#define atomic_clear_mask(mask, addr) \
14102- asm volatile(LOCK_PREFIX "andl %0,%1" \
14103- : : "r" (~(mask)), "m" (*(addr)) : "memory")
14104+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
14105+{
14106+ asm volatile(LOCK_PREFIX "andl %1,%0"
14107+ : "+m" (v->counter)
14108+ : "r" (~(mask))
14109+ : "memory");
14110+}
14111
14112-#define atomic_set_mask(mask, addr) \
14113- asm volatile(LOCK_PREFIX "orl %0,%1" \
14114- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
14115- : "memory")
14116+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14117+{
14118+ asm volatile(LOCK_PREFIX "andl %1,%0"
14119+ : "+m" (v->counter)
14120+ : "r" (~(mask))
14121+ : "memory");
14122+}
14123+
14124+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
14125+{
14126+ asm volatile(LOCK_PREFIX "orl %1,%0"
14127+ : "+m" (v->counter)
14128+ : "r" (mask)
14129+ : "memory");
14130+}
14131+
14132+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14133+{
14134+ asm volatile(LOCK_PREFIX "orl %1,%0"
14135+ : "+m" (v->counter)
14136+ : "r" (mask)
14137+ : "memory");
14138+}
14139
14140 /* Atomic operations are already serializing on x86 */
14141 #define smp_mb__before_atomic_dec() barrier()
14142diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
14143index b154de7..aadebd8 100644
14144--- a/arch/x86/include/asm/atomic64_32.h
14145+++ b/arch/x86/include/asm/atomic64_32.h
14146@@ -12,6 +12,14 @@ typedef struct {
14147 u64 __aligned(8) counter;
14148 } atomic64_t;
14149
14150+#ifdef CONFIG_PAX_REFCOUNT
14151+typedef struct {
14152+ u64 __aligned(8) counter;
14153+} atomic64_unchecked_t;
14154+#else
14155+typedef atomic64_t atomic64_unchecked_t;
14156+#endif
14157+
14158 #define ATOMIC64_INIT(val) { (val) }
14159
14160 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
14161@@ -37,21 +45,31 @@ typedef struct {
14162 ATOMIC64_DECL_ONE(sym##_386)
14163
14164 ATOMIC64_DECL_ONE(add_386);
14165+ATOMIC64_DECL_ONE(add_unchecked_386);
14166 ATOMIC64_DECL_ONE(sub_386);
14167+ATOMIC64_DECL_ONE(sub_unchecked_386);
14168 ATOMIC64_DECL_ONE(inc_386);
14169+ATOMIC64_DECL_ONE(inc_unchecked_386);
14170 ATOMIC64_DECL_ONE(dec_386);
14171+ATOMIC64_DECL_ONE(dec_unchecked_386);
14172 #endif
14173
14174 #define alternative_atomic64(f, out, in...) \
14175 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
14176
14177 ATOMIC64_DECL(read);
14178+ATOMIC64_DECL(read_unchecked);
14179 ATOMIC64_DECL(set);
14180+ATOMIC64_DECL(set_unchecked);
14181 ATOMIC64_DECL(xchg);
14182 ATOMIC64_DECL(add_return);
14183+ATOMIC64_DECL(add_return_unchecked);
14184 ATOMIC64_DECL(sub_return);
14185+ATOMIC64_DECL(sub_return_unchecked);
14186 ATOMIC64_DECL(inc_return);
14187+ATOMIC64_DECL(inc_return_unchecked);
14188 ATOMIC64_DECL(dec_return);
14189+ATOMIC64_DECL(dec_return_unchecked);
14190 ATOMIC64_DECL(dec_if_positive);
14191 ATOMIC64_DECL(inc_not_zero);
14192 ATOMIC64_DECL(add_unless);
14193@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
14194 }
14195
14196 /**
14197+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
14198+ * @p: pointer to type atomic64_unchecked_t
14199+ * @o: expected value
14200+ * @n: new value
14201+ *
14202+ * Atomically sets @v to @n if it was equal to @o and returns
14203+ * the old value.
14204+ */
14205+
14206+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
14207+{
14208+ return cmpxchg64(&v->counter, o, n);
14209+}
14210+
14211+/**
14212 * atomic64_xchg - xchg atomic64 variable
14213 * @v: pointer to type atomic64_t
14214 * @n: value to assign
14215@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
14216 }
14217
14218 /**
14219+ * atomic64_set_unchecked - set atomic64 variable
14220+ * @v: pointer to type atomic64_unchecked_t
14221+ * @n: value to assign
14222+ *
14223+ * Atomically sets the value of @v to @n.
14224+ */
14225+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
14226+{
14227+ unsigned high = (unsigned)(i >> 32);
14228+ unsigned low = (unsigned)i;
14229+ alternative_atomic64(set, /* no output */,
14230+ "S" (v), "b" (low), "c" (high)
14231+ : "eax", "edx", "memory");
14232+}
14233+
14234+/**
14235 * atomic64_read - read atomic64 variable
14236 * @v: pointer to type atomic64_t
14237 *
14238@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
14239 }
14240
14241 /**
14242+ * atomic64_read_unchecked - read atomic64 variable
14243+ * @v: pointer to type atomic64_unchecked_t
14244+ *
14245+ * Atomically reads the value of @v and returns it.
14246+ */
14247+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
14248+{
14249+ long long r;
14250+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
14251+ return r;
14252+ }
14253+
14254+/**
14255 * atomic64_add_return - add and return
14256 * @i: integer value to add
14257 * @v: pointer to type atomic64_t
14258@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
14259 return i;
14260 }
14261
14262+/**
14263+ * atomic64_add_return_unchecked - add and return
14264+ * @i: integer value to add
14265+ * @v: pointer to type atomic64_unchecked_t
14266+ *
14267+ * Atomically adds @i to @v and returns @i + *@v
14268+ */
14269+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
14270+{
14271+ alternative_atomic64(add_return_unchecked,
14272+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14273+ ASM_NO_INPUT_CLOBBER("memory"));
14274+ return i;
14275+}
14276+
14277 /*
14278 * Other variants with different arithmetic operators:
14279 */
14280@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
14281 return a;
14282 }
14283
14284+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14285+{
14286+ long long a;
14287+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
14288+ "S" (v) : "memory", "ecx");
14289+ return a;
14290+}
14291+
14292 static inline long long atomic64_dec_return(atomic64_t *v)
14293 {
14294 long long a;
14295@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
14296 }
14297
14298 /**
14299+ * atomic64_add_unchecked - add integer to atomic64 variable
14300+ * @i: integer value to add
14301+ * @v: pointer to type atomic64_unchecked_t
14302+ *
14303+ * Atomically adds @i to @v.
14304+ */
14305+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
14306+{
14307+ __alternative_atomic64(add_unchecked, add_return_unchecked,
14308+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14309+ ASM_NO_INPUT_CLOBBER("memory"));
14310+ return i;
14311+}
14312+
14313+/**
14314 * atomic64_sub - subtract the atomic64 variable
14315 * @i: integer value to subtract
14316 * @v: pointer to type atomic64_t
14317diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
14318index 0e1cbfc..5623683 100644
14319--- a/arch/x86/include/asm/atomic64_64.h
14320+++ b/arch/x86/include/asm/atomic64_64.h
14321@@ -18,7 +18,19 @@
14322 */
14323 static inline long atomic64_read(const atomic64_t *v)
14324 {
14325- return (*(volatile long *)&(v)->counter);
14326+ return (*(volatile const long *)&(v)->counter);
14327+}
14328+
14329+/**
14330+ * atomic64_read_unchecked - read atomic64 variable
14331+ * @v: pointer of type atomic64_unchecked_t
14332+ *
14333+ * Atomically reads the value of @v.
14334+ * Doesn't imply a read memory barrier.
14335+ */
14336+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
14337+{
14338+ return (*(volatile const long *)&(v)->counter);
14339 }
14340
14341 /**
14342@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
14343 }
14344
14345 /**
14346+ * atomic64_set_unchecked - set atomic64 variable
14347+ * @v: pointer to type atomic64_unchecked_t
14348+ * @i: required value
14349+ *
14350+ * Atomically sets the value of @v to @i.
14351+ */
14352+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
14353+{
14354+ v->counter = i;
14355+}
14356+
14357+/**
14358 * atomic64_add - add integer to atomic64 variable
14359 * @i: integer value to add
14360 * @v: pointer to type atomic64_t
14361@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
14362 */
14363 static inline void atomic64_add(long i, atomic64_t *v)
14364 {
14365+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
14366+
14367+#ifdef CONFIG_PAX_REFCOUNT
14368+ "jno 0f\n"
14369+ LOCK_PREFIX "subq %1,%0\n"
14370+ "int $4\n0:\n"
14371+ _ASM_EXTABLE(0b, 0b)
14372+#endif
14373+
14374+ : "=m" (v->counter)
14375+ : "er" (i), "m" (v->counter));
14376+}
14377+
14378+/**
14379+ * atomic64_add_unchecked - add integer to atomic64 variable
14380+ * @i: integer value to add
14381+ * @v: pointer to type atomic64_unchecked_t
14382+ *
14383+ * Atomically adds @i to @v.
14384+ */
14385+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
14386+{
14387 asm volatile(LOCK_PREFIX "addq %1,%0"
14388 : "=m" (v->counter)
14389 : "er" (i), "m" (v->counter));
14390@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
14391 */
14392 static inline void atomic64_sub(long i, atomic64_t *v)
14393 {
14394- asm volatile(LOCK_PREFIX "subq %1,%0"
14395+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14396+
14397+#ifdef CONFIG_PAX_REFCOUNT
14398+ "jno 0f\n"
14399+ LOCK_PREFIX "addq %1,%0\n"
14400+ "int $4\n0:\n"
14401+ _ASM_EXTABLE(0b, 0b)
14402+#endif
14403+
14404+ : "=m" (v->counter)
14405+ : "er" (i), "m" (v->counter));
14406+}
14407+
14408+/**
14409+ * atomic64_sub_unchecked - subtract the atomic64 variable
14410+ * @i: integer value to subtract
14411+ * @v: pointer to type atomic64_unchecked_t
14412+ *
14413+ * Atomically subtracts @i from @v.
14414+ */
14415+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
14416+{
14417+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14418 : "=m" (v->counter)
14419 : "er" (i), "m" (v->counter));
14420 }
14421@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14422 {
14423 unsigned char c;
14424
14425- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
14426+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
14427+
14428+#ifdef CONFIG_PAX_REFCOUNT
14429+ "jno 0f\n"
14430+ LOCK_PREFIX "addq %2,%0\n"
14431+ "int $4\n0:\n"
14432+ _ASM_EXTABLE(0b, 0b)
14433+#endif
14434+
14435+ "sete %1\n"
14436 : "=m" (v->counter), "=qm" (c)
14437 : "er" (i), "m" (v->counter) : "memory");
14438 return c;
14439@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14440 */
14441 static inline void atomic64_inc(atomic64_t *v)
14442 {
14443+ asm volatile(LOCK_PREFIX "incq %0\n"
14444+
14445+#ifdef CONFIG_PAX_REFCOUNT
14446+ "jno 0f\n"
14447+ LOCK_PREFIX "decq %0\n"
14448+ "int $4\n0:\n"
14449+ _ASM_EXTABLE(0b, 0b)
14450+#endif
14451+
14452+ : "=m" (v->counter)
14453+ : "m" (v->counter));
14454+}
14455+
14456+/**
14457+ * atomic64_inc_unchecked - increment atomic64 variable
14458+ * @v: pointer to type atomic64_unchecked_t
14459+ *
14460+ * Atomically increments @v by 1.
14461+ */
14462+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
14463+{
14464 asm volatile(LOCK_PREFIX "incq %0"
14465 : "=m" (v->counter)
14466 : "m" (v->counter));
14467@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
14468 */
14469 static inline void atomic64_dec(atomic64_t *v)
14470 {
14471- asm volatile(LOCK_PREFIX "decq %0"
14472+ asm volatile(LOCK_PREFIX "decq %0\n"
14473+
14474+#ifdef CONFIG_PAX_REFCOUNT
14475+ "jno 0f\n"
14476+ LOCK_PREFIX "incq %0\n"
14477+ "int $4\n0:\n"
14478+ _ASM_EXTABLE(0b, 0b)
14479+#endif
14480+
14481+ : "=m" (v->counter)
14482+ : "m" (v->counter));
14483+}
14484+
14485+/**
14486+ * atomic64_dec_unchecked - decrement atomic64 variable
14487+ * @v: pointer to type atomic64_t
14488+ *
14489+ * Atomically decrements @v by 1.
14490+ */
14491+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
14492+{
14493+ asm volatile(LOCK_PREFIX "decq %0\n"
14494 : "=m" (v->counter)
14495 : "m" (v->counter));
14496 }
14497@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
14498 {
14499 unsigned char c;
14500
14501- asm volatile(LOCK_PREFIX "decq %0; sete %1"
14502+ asm volatile(LOCK_PREFIX "decq %0\n"
14503+
14504+#ifdef CONFIG_PAX_REFCOUNT
14505+ "jno 0f\n"
14506+ LOCK_PREFIX "incq %0\n"
14507+ "int $4\n0:\n"
14508+ _ASM_EXTABLE(0b, 0b)
14509+#endif
14510+
14511+ "sete %1\n"
14512 : "=m" (v->counter), "=qm" (c)
14513 : "m" (v->counter) : "memory");
14514 return c != 0;
14515@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
14516 {
14517 unsigned char c;
14518
14519- asm volatile(LOCK_PREFIX "incq %0; sete %1"
14520+ asm volatile(LOCK_PREFIX "incq %0\n"
14521+
14522+#ifdef CONFIG_PAX_REFCOUNT
14523+ "jno 0f\n"
14524+ LOCK_PREFIX "decq %0\n"
14525+ "int $4\n0:\n"
14526+ _ASM_EXTABLE(0b, 0b)
14527+#endif
14528+
14529+ "sete %1\n"
14530 : "=m" (v->counter), "=qm" (c)
14531 : "m" (v->counter) : "memory");
14532 return c != 0;
14533@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
14534 {
14535 unsigned char c;
14536
14537- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
14538+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
14539+
14540+#ifdef CONFIG_PAX_REFCOUNT
14541+ "jno 0f\n"
14542+ LOCK_PREFIX "subq %2,%0\n"
14543+ "int $4\n0:\n"
14544+ _ASM_EXTABLE(0b, 0b)
14545+#endif
14546+
14547+ "sets %1\n"
14548 : "=m" (v->counter), "=qm" (c)
14549 : "er" (i), "m" (v->counter) : "memory");
14550 return c;
14551@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
14552 */
14553 static inline long atomic64_add_return(long i, atomic64_t *v)
14554 {
14555+ return i + xadd_check_overflow(&v->counter, i);
14556+}
14557+
14558+/**
14559+ * atomic64_add_return_unchecked - add and return
14560+ * @i: integer value to add
14561+ * @v: pointer to type atomic64_unchecked_t
14562+ *
14563+ * Atomically adds @i to @v and returns @i + @v
14564+ */
14565+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
14566+{
14567 return i + xadd(&v->counter, i);
14568 }
14569
14570@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
14571 }
14572
14573 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
14574+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14575+{
14576+ return atomic64_add_return_unchecked(1, v);
14577+}
14578 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
14579
14580 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
14581@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
14582 return cmpxchg(&v->counter, old, new);
14583 }
14584
14585+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
14586+{
14587+ return cmpxchg(&v->counter, old, new);
14588+}
14589+
14590 static inline long atomic64_xchg(atomic64_t *v, long new)
14591 {
14592 return xchg(&v->counter, new);
14593@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
14594 */
14595 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
14596 {
14597- long c, old;
14598+ long c, old, new;
14599 c = atomic64_read(v);
14600 for (;;) {
14601- if (unlikely(c == (u)))
14602+ if (unlikely(c == u))
14603 break;
14604- old = atomic64_cmpxchg((v), c, c + (a));
14605+
14606+ asm volatile("add %2,%0\n"
14607+
14608+#ifdef CONFIG_PAX_REFCOUNT
14609+ "jno 0f\n"
14610+ "sub %2,%0\n"
14611+ "int $4\n0:\n"
14612+ _ASM_EXTABLE(0b, 0b)
14613+#endif
14614+
14615+ : "=r" (new)
14616+ : "0" (c), "ir" (a));
14617+
14618+ old = atomic64_cmpxchg(v, c, new);
14619 if (likely(old == c))
14620 break;
14621 c = old;
14622 }
14623- return c != (u);
14624+ return c != u;
14625 }
14626
14627 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
14628diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
14629index 6dfd019..28e188d 100644
14630--- a/arch/x86/include/asm/bitops.h
14631+++ b/arch/x86/include/asm/bitops.h
14632@@ -40,7 +40,7 @@
14633 * a mask operation on a byte.
14634 */
14635 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
14636-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
14637+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
14638 #define CONST_MASK(nr) (1 << ((nr) & 7))
14639
14640 /**
14641@@ -486,7 +486,7 @@ static inline int fls(int x)
14642 * at position 64.
14643 */
14644 #ifdef CONFIG_X86_64
14645-static __always_inline int fls64(__u64 x)
14646+static __always_inline long fls64(__u64 x)
14647 {
14648 int bitpos = -1;
14649 /*
14650diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
14651index 4fa687a..60f2d39 100644
14652--- a/arch/x86/include/asm/boot.h
14653+++ b/arch/x86/include/asm/boot.h
14654@@ -6,10 +6,15 @@
14655 #include <uapi/asm/boot.h>
14656
14657 /* Physical address where kernel should be loaded. */
14658-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
14659+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
14660 + (CONFIG_PHYSICAL_ALIGN - 1)) \
14661 & ~(CONFIG_PHYSICAL_ALIGN - 1))
14662
14663+#ifndef __ASSEMBLY__
14664+extern unsigned char __LOAD_PHYSICAL_ADDR[];
14665+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
14666+#endif
14667+
14668 /* Minimum kernel alignment, as a power of two */
14669 #ifdef CONFIG_X86_64
14670 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
14671diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
14672index 48f99f1..d78ebf9 100644
14673--- a/arch/x86/include/asm/cache.h
14674+++ b/arch/x86/include/asm/cache.h
14675@@ -5,12 +5,13 @@
14676
14677 /* L1 cache line size */
14678 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
14679-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
14680+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
14681
14682 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
14683+#define __read_only __attribute__((__section__(".data..read_only")))
14684
14685 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
14686-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
14687+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
14688
14689 #ifdef CONFIG_X86_VSMP
14690 #ifdef CONFIG_SMP
14691diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
14692index 9863ee3..4a1f8e1 100644
14693--- a/arch/x86/include/asm/cacheflush.h
14694+++ b/arch/x86/include/asm/cacheflush.h
14695@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
14696 unsigned long pg_flags = pg->flags & _PGMT_MASK;
14697
14698 if (pg_flags == _PGMT_DEFAULT)
14699- return -1;
14700+ return ~0UL;
14701 else if (pg_flags == _PGMT_WC)
14702 return _PAGE_CACHE_WC;
14703 else if (pg_flags == _PGMT_UC_MINUS)
14704diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
14705index 46fc474..b02b0f9 100644
14706--- a/arch/x86/include/asm/checksum_32.h
14707+++ b/arch/x86/include/asm/checksum_32.h
14708@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
14709 int len, __wsum sum,
14710 int *src_err_ptr, int *dst_err_ptr);
14711
14712+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
14713+ int len, __wsum sum,
14714+ int *src_err_ptr, int *dst_err_ptr);
14715+
14716+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
14717+ int len, __wsum sum,
14718+ int *src_err_ptr, int *dst_err_ptr);
14719+
14720 /*
14721 * Note: when you get a NULL pointer exception here this means someone
14722 * passed in an incorrect kernel address to one of these functions.
14723@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
14724 int *err_ptr)
14725 {
14726 might_sleep();
14727- return csum_partial_copy_generic((__force void *)src, dst,
14728+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
14729 len, sum, err_ptr, NULL);
14730 }
14731
14732@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
14733 {
14734 might_sleep();
14735 if (access_ok(VERIFY_WRITE, dst, len))
14736- return csum_partial_copy_generic(src, (__force void *)dst,
14737+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
14738 len, sum, NULL, err_ptr);
14739
14740 if (len)
14741diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
14742index d47786a..ce1b05d 100644
14743--- a/arch/x86/include/asm/cmpxchg.h
14744+++ b/arch/x86/include/asm/cmpxchg.h
14745@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
14746 __compiletime_error("Bad argument size for cmpxchg");
14747 extern void __xadd_wrong_size(void)
14748 __compiletime_error("Bad argument size for xadd");
14749+extern void __xadd_check_overflow_wrong_size(void)
14750+ __compiletime_error("Bad argument size for xadd_check_overflow");
14751 extern void __add_wrong_size(void)
14752 __compiletime_error("Bad argument size for add");
14753+extern void __add_check_overflow_wrong_size(void)
14754+ __compiletime_error("Bad argument size for add_check_overflow");
14755
14756 /*
14757 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
14758@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
14759 __ret; \
14760 })
14761
14762+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
14763+ ({ \
14764+ __typeof__ (*(ptr)) __ret = (arg); \
14765+ switch (sizeof(*(ptr))) { \
14766+ case __X86_CASE_L: \
14767+ asm volatile (lock #op "l %0, %1\n" \
14768+ "jno 0f\n" \
14769+ "mov %0,%1\n" \
14770+ "int $4\n0:\n" \
14771+ _ASM_EXTABLE(0b, 0b) \
14772+ : "+r" (__ret), "+m" (*(ptr)) \
14773+ : : "memory", "cc"); \
14774+ break; \
14775+ case __X86_CASE_Q: \
14776+ asm volatile (lock #op "q %q0, %1\n" \
14777+ "jno 0f\n" \
14778+ "mov %0,%1\n" \
14779+ "int $4\n0:\n" \
14780+ _ASM_EXTABLE(0b, 0b) \
14781+ : "+r" (__ret), "+m" (*(ptr)) \
14782+ : : "memory", "cc"); \
14783+ break; \
14784+ default: \
14785+ __ ## op ## _check_overflow_wrong_size(); \
14786+ } \
14787+ __ret; \
14788+ })
14789+
14790 /*
14791 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
14792 * Since this is generally used to protect other memory information, we
14793@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
14794 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
14795 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
14796
14797+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
14798+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
14799+
14800 #define __add(ptr, inc, lock) \
14801 ({ \
14802 __typeof__ (*(ptr)) __ret = (inc); \
14803diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
14804index 59c6c40..5e0b22c 100644
14805--- a/arch/x86/include/asm/compat.h
14806+++ b/arch/x86/include/asm/compat.h
14807@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
14808 typedef u32 compat_uint_t;
14809 typedef u32 compat_ulong_t;
14810 typedef u64 __attribute__((aligned(4))) compat_u64;
14811-typedef u32 compat_uptr_t;
14812+typedef u32 __user compat_uptr_t;
14813
14814 struct compat_timespec {
14815 compat_time_t tv_sec;
14816diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
14817index e99ac27..10d834e 100644
14818--- a/arch/x86/include/asm/cpufeature.h
14819+++ b/arch/x86/include/asm/cpufeature.h
14820@@ -203,7 +203,7 @@
14821 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
14822 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
14823 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
14824-
14825+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
14826
14827 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
14828 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
14829@@ -211,7 +211,7 @@
14830 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
14831 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
14832 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
14833-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
14834+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
14835 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
14836 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
14837 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
14838@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
14839 #undef cpu_has_centaur_mcr
14840 #define cpu_has_centaur_mcr 0
14841
14842+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
14843 #endif /* CONFIG_X86_64 */
14844
14845 #if __GNUC__ >= 4
14846@@ -394,7 +395,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
14847 ".section .discard,\"aw\",@progbits\n"
14848 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
14849 ".previous\n"
14850- ".section .altinstr_replacement,\"ax\"\n"
14851+ ".section .altinstr_replacement,\"a\"\n"
14852 "3: movb $1,%0\n"
14853 "4:\n"
14854 ".previous\n"
14855diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
14856index 8bf1c06..b6ae785 100644
14857--- a/arch/x86/include/asm/desc.h
14858+++ b/arch/x86/include/asm/desc.h
14859@@ -4,6 +4,7 @@
14860 #include <asm/desc_defs.h>
14861 #include <asm/ldt.h>
14862 #include <asm/mmu.h>
14863+#include <asm/pgtable.h>
14864
14865 #include <linux/smp.h>
14866 #include <linux/percpu.h>
14867@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14868
14869 desc->type = (info->read_exec_only ^ 1) << 1;
14870 desc->type |= info->contents << 2;
14871+ desc->type |= info->seg_not_present ^ 1;
14872
14873 desc->s = 1;
14874 desc->dpl = 0x3;
14875@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14876 }
14877
14878 extern struct desc_ptr idt_descr;
14879-extern gate_desc idt_table[];
14880 extern struct desc_ptr nmi_idt_descr;
14881-extern gate_desc nmi_idt_table[];
14882-
14883-struct gdt_page {
14884- struct desc_struct gdt[GDT_ENTRIES];
14885-} __attribute__((aligned(PAGE_SIZE)));
14886-
14887-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
14888+extern gate_desc idt_table[256];
14889+extern gate_desc nmi_idt_table[256];
14890
14891+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
14892 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
14893 {
14894- return per_cpu(gdt_page, cpu).gdt;
14895+ return cpu_gdt_table[cpu];
14896 }
14897
14898 #ifdef CONFIG_X86_64
14899@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
14900 unsigned long base, unsigned dpl, unsigned flags,
14901 unsigned short seg)
14902 {
14903- gate->a = (seg << 16) | (base & 0xffff);
14904- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
14905+ gate->gate.offset_low = base;
14906+ gate->gate.seg = seg;
14907+ gate->gate.reserved = 0;
14908+ gate->gate.type = type;
14909+ gate->gate.s = 0;
14910+ gate->gate.dpl = dpl;
14911+ gate->gate.p = 1;
14912+ gate->gate.offset_high = base >> 16;
14913 }
14914
14915 #endif
14916@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
14917
14918 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
14919 {
14920+ pax_open_kernel();
14921 memcpy(&idt[entry], gate, sizeof(*gate));
14922+ pax_close_kernel();
14923 }
14924
14925 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
14926 {
14927+ pax_open_kernel();
14928 memcpy(&ldt[entry], desc, 8);
14929+ pax_close_kernel();
14930 }
14931
14932 static inline void
14933@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
14934 default: size = sizeof(*gdt); break;
14935 }
14936
14937+ pax_open_kernel();
14938 memcpy(&gdt[entry], desc, size);
14939+ pax_close_kernel();
14940 }
14941
14942 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
14943@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
14944
14945 static inline void native_load_tr_desc(void)
14946 {
14947+ pax_open_kernel();
14948 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
14949+ pax_close_kernel();
14950 }
14951
14952 static inline void native_load_gdt(const struct desc_ptr *dtr)
14953@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
14954 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
14955 unsigned int i;
14956
14957+ pax_open_kernel();
14958 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
14959 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
14960+ pax_close_kernel();
14961 }
14962
14963 #define _LDT_empty(info) \
14964@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
14965 preempt_enable();
14966 }
14967
14968-static inline unsigned long get_desc_base(const struct desc_struct *desc)
14969+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
14970 {
14971 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
14972 }
14973@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
14974 }
14975
14976 #ifdef CONFIG_X86_64
14977-static inline void set_nmi_gate(int gate, void *addr)
14978+static inline void set_nmi_gate(int gate, const void *addr)
14979 {
14980 gate_desc s;
14981
14982@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
14983 }
14984 #endif
14985
14986-static inline void _set_gate(int gate, unsigned type, void *addr,
14987+static inline void _set_gate(int gate, unsigned type, const void *addr,
14988 unsigned dpl, unsigned ist, unsigned seg)
14989 {
14990 gate_desc s;
14991@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
14992 * Pentium F0 0F bugfix can have resulted in the mapped
14993 * IDT being write-protected.
14994 */
14995-static inline void set_intr_gate(unsigned int n, void *addr)
14996+static inline void set_intr_gate(unsigned int n, const void *addr)
14997 {
14998 BUG_ON((unsigned)n > 0xFF);
14999 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
15000@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
15001 /*
15002 * This routine sets up an interrupt gate at directory privilege level 3.
15003 */
15004-static inline void set_system_intr_gate(unsigned int n, void *addr)
15005+static inline void set_system_intr_gate(unsigned int n, const void *addr)
15006 {
15007 BUG_ON((unsigned)n > 0xFF);
15008 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
15009 }
15010
15011-static inline void set_system_trap_gate(unsigned int n, void *addr)
15012+static inline void set_system_trap_gate(unsigned int n, const void *addr)
15013 {
15014 BUG_ON((unsigned)n > 0xFF);
15015 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
15016 }
15017
15018-static inline void set_trap_gate(unsigned int n, void *addr)
15019+static inline void set_trap_gate(unsigned int n, const void *addr)
15020 {
15021 BUG_ON((unsigned)n > 0xFF);
15022 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
15023@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
15024 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
15025 {
15026 BUG_ON((unsigned)n > 0xFF);
15027- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
15028+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
15029 }
15030
15031-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
15032+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
15033 {
15034 BUG_ON((unsigned)n > 0xFF);
15035 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
15036 }
15037
15038-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
15039+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
15040 {
15041 BUG_ON((unsigned)n > 0xFF);
15042 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
15043 }
15044
15045+#ifdef CONFIG_X86_32
15046+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
15047+{
15048+ struct desc_struct d;
15049+
15050+ if (likely(limit))
15051+ limit = (limit - 1UL) >> PAGE_SHIFT;
15052+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
15053+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
15054+}
15055+#endif
15056+
15057 #endif /* _ASM_X86_DESC_H */
15058diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
15059index 278441f..b95a174 100644
15060--- a/arch/x86/include/asm/desc_defs.h
15061+++ b/arch/x86/include/asm/desc_defs.h
15062@@ -31,6 +31,12 @@ struct desc_struct {
15063 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
15064 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
15065 };
15066+ struct {
15067+ u16 offset_low;
15068+ u16 seg;
15069+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
15070+ unsigned offset_high: 16;
15071+ } gate;
15072 };
15073 } __attribute__((packed));
15074
15075diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
15076index ced283a..ffe04cc 100644
15077--- a/arch/x86/include/asm/div64.h
15078+++ b/arch/x86/include/asm/div64.h
15079@@ -39,7 +39,7 @@
15080 __mod; \
15081 })
15082
15083-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15084+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15085 {
15086 union {
15087 u64 v64;
15088diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
15089index 9c999c1..3860cb8 100644
15090--- a/arch/x86/include/asm/elf.h
15091+++ b/arch/x86/include/asm/elf.h
15092@@ -243,7 +243,25 @@ extern int force_personality32;
15093 the loader. We need to make sure that it is out of the way of the program
15094 that it will "exec", and that there is sufficient room for the brk. */
15095
15096+#ifdef CONFIG_PAX_SEGMEXEC
15097+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
15098+#else
15099 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
15100+#endif
15101+
15102+#ifdef CONFIG_PAX_ASLR
15103+#ifdef CONFIG_X86_32
15104+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
15105+
15106+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15107+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15108+#else
15109+#define PAX_ELF_ET_DYN_BASE 0x400000UL
15110+
15111+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15112+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15113+#endif
15114+#endif
15115
15116 /* This yields a mask that user programs can use to figure out what
15117 instruction set this CPU supports. This could be done in user space,
15118@@ -296,16 +314,12 @@ do { \
15119
15120 #define ARCH_DLINFO \
15121 do { \
15122- if (vdso_enabled) \
15123- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15124- (unsigned long)current->mm->context.vdso); \
15125+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15126 } while (0)
15127
15128 #define ARCH_DLINFO_X32 \
15129 do { \
15130- if (vdso_enabled) \
15131- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15132- (unsigned long)current->mm->context.vdso); \
15133+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15134 } while (0)
15135
15136 #define AT_SYSINFO 32
15137@@ -320,7 +334,7 @@ else \
15138
15139 #endif /* !CONFIG_X86_32 */
15140
15141-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
15142+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
15143
15144 #define VDSO_ENTRY \
15145 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
15146@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
15147 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
15148 #define compat_arch_setup_additional_pages syscall32_setup_pages
15149
15150-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
15151-#define arch_randomize_brk arch_randomize_brk
15152-
15153 /*
15154 * True on X86_32 or when emulating IA32 on X86_64
15155 */
15156diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
15157index 75ce3f4..882e801 100644
15158--- a/arch/x86/include/asm/emergency-restart.h
15159+++ b/arch/x86/include/asm/emergency-restart.h
15160@@ -13,6 +13,6 @@ enum reboot_type {
15161
15162 extern enum reboot_type reboot_type;
15163
15164-extern void machine_emergency_restart(void);
15165+extern void machine_emergency_restart(void) __noreturn;
15166
15167 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
15168diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
15169index e25cc33..7d3ec01 100644
15170--- a/arch/x86/include/asm/fpu-internal.h
15171+++ b/arch/x86/include/asm/fpu-internal.h
15172@@ -126,8 +126,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
15173 #define user_insn(insn, output, input...) \
15174 ({ \
15175 int err; \
15176+ pax_open_userland(); \
15177 asm volatile(ASM_STAC "\n" \
15178- "1:" #insn "\n\t" \
15179+ "1:" \
15180+ __copyuser_seg \
15181+ #insn "\n\t" \
15182 "2: " ASM_CLAC "\n" \
15183 ".section .fixup,\"ax\"\n" \
15184 "3: movl $-1,%[err]\n" \
15185@@ -136,6 +139,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
15186 _ASM_EXTABLE(1b, 3b) \
15187 : [err] "=r" (err), output \
15188 : "0"(0), input); \
15189+ pax_close_userland(); \
15190 err; \
15191 })
15192
15193@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
15194 "emms\n\t" /* clear stack tags */
15195 "fildl %P[addr]", /* set F?P to defined value */
15196 X86_FEATURE_FXSAVE_LEAK,
15197- [addr] "m" (tsk->thread.fpu.has_fpu));
15198+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
15199
15200 return fpu_restore_checking(&tsk->thread.fpu);
15201 }
15202diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
15203index be27ba1..04a8801 100644
15204--- a/arch/x86/include/asm/futex.h
15205+++ b/arch/x86/include/asm/futex.h
15206@@ -12,6 +12,7 @@
15207 #include <asm/smap.h>
15208
15209 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
15210+ typecheck(u32 __user *, uaddr); \
15211 asm volatile("\t" ASM_STAC "\n" \
15212 "1:\t" insn "\n" \
15213 "2:\t" ASM_CLAC "\n" \
15214@@ -20,15 +21,16 @@
15215 "\tjmp\t2b\n" \
15216 "\t.previous\n" \
15217 _ASM_EXTABLE(1b, 3b) \
15218- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
15219+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
15220 : "i" (-EFAULT), "0" (oparg), "1" (0))
15221
15222 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
15223+ typecheck(u32 __user *, uaddr); \
15224 asm volatile("\t" ASM_STAC "\n" \
15225 "1:\tmovl %2, %0\n" \
15226 "\tmovl\t%0, %3\n" \
15227 "\t" insn "\n" \
15228- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
15229+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
15230 "\tjnz\t1b\n" \
15231 "3:\t" ASM_CLAC "\n" \
15232 "\t.section .fixup,\"ax\"\n" \
15233@@ -38,7 +40,7 @@
15234 _ASM_EXTABLE(1b, 4b) \
15235 _ASM_EXTABLE(2b, 4b) \
15236 : "=&a" (oldval), "=&r" (ret), \
15237- "+m" (*uaddr), "=&r" (tem) \
15238+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
15239 : "r" (oparg), "i" (-EFAULT), "1" (0))
15240
15241 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15242@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15243
15244 pagefault_disable();
15245
15246+ pax_open_userland();
15247 switch (op) {
15248 case FUTEX_OP_SET:
15249- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
15250+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
15251 break;
15252 case FUTEX_OP_ADD:
15253- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
15254+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
15255 uaddr, oparg);
15256 break;
15257 case FUTEX_OP_OR:
15258@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
15259 default:
15260 ret = -ENOSYS;
15261 }
15262+ pax_close_userland();
15263
15264 pagefault_enable();
15265
15266@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
15267 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
15268 return -EFAULT;
15269
15270+ pax_open_userland();
15271 asm volatile("\t" ASM_STAC "\n"
15272- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
15273+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
15274 "2:\t" ASM_CLAC "\n"
15275 "\t.section .fixup, \"ax\"\n"
15276 "3:\tmov %3, %0\n"
15277 "\tjmp 2b\n"
15278 "\t.previous\n"
15279 _ASM_EXTABLE(1b, 3b)
15280- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
15281+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
15282 : "i" (-EFAULT), "r" (newval), "1" (oldval)
15283 : "memory"
15284 );
15285+ pax_close_userland();
15286
15287 *uval = oldval;
15288 return ret;
15289diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
15290index 1da97ef..9c2ebff 100644
15291--- a/arch/x86/include/asm/hw_irq.h
15292+++ b/arch/x86/include/asm/hw_irq.h
15293@@ -148,8 +148,8 @@ extern void setup_ioapic_dest(void);
15294 extern void enable_IO_APIC(void);
15295
15296 /* Statistics */
15297-extern atomic_t irq_err_count;
15298-extern atomic_t irq_mis_count;
15299+extern atomic_unchecked_t irq_err_count;
15300+extern atomic_unchecked_t irq_mis_count;
15301
15302 /* EISA */
15303 extern void eisa_set_level_irq(unsigned int irq);
15304diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
15305index a203659..9889f1c 100644
15306--- a/arch/x86/include/asm/i8259.h
15307+++ b/arch/x86/include/asm/i8259.h
15308@@ -62,7 +62,7 @@ struct legacy_pic {
15309 void (*init)(int auto_eoi);
15310 int (*irq_pending)(unsigned int irq);
15311 void (*make_irq)(unsigned int irq);
15312-};
15313+} __do_const;
15314
15315 extern struct legacy_pic *legacy_pic;
15316 extern struct legacy_pic null_legacy_pic;
15317diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
15318index d8e8eef..1765f78 100644
15319--- a/arch/x86/include/asm/io.h
15320+++ b/arch/x86/include/asm/io.h
15321@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
15322 "m" (*(volatile type __force *)addr) barrier); }
15323
15324 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
15325-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
15326-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
15327+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
15328+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
15329
15330 build_mmio_read(__readb, "b", unsigned char, "=q", )
15331-build_mmio_read(__readw, "w", unsigned short, "=r", )
15332-build_mmio_read(__readl, "l", unsigned int, "=r", )
15333+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
15334+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
15335
15336 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
15337 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
15338@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
15339 return ioremap_nocache(offset, size);
15340 }
15341
15342-extern void iounmap(volatile void __iomem *addr);
15343+extern void iounmap(const volatile void __iomem *addr);
15344
15345 extern void set_iounmap_nonlazy(void);
15346
15347@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
15348
15349 #include <linux/vmalloc.h>
15350
15351+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
15352+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
15353+{
15354+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
15355+}
15356+
15357+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
15358+{
15359+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
15360+}
15361+
15362 /*
15363 * Convert a virtual cached pointer to an uncached pointer
15364 */
15365diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
15366index bba3cf8..06bc8da 100644
15367--- a/arch/x86/include/asm/irqflags.h
15368+++ b/arch/x86/include/asm/irqflags.h
15369@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
15370 sti; \
15371 sysexit
15372
15373+#define GET_CR0_INTO_RDI mov %cr0, %rdi
15374+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
15375+#define GET_CR3_INTO_RDI mov %cr3, %rdi
15376+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
15377+
15378 #else
15379 #define INTERRUPT_RETURN iret
15380 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
15381diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
15382index 5a6d287..f815789 100644
15383--- a/arch/x86/include/asm/kprobes.h
15384+++ b/arch/x86/include/asm/kprobes.h
15385@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
15386 #define RELATIVEJUMP_SIZE 5
15387 #define RELATIVECALL_OPCODE 0xe8
15388 #define RELATIVE_ADDR_SIZE 4
15389-#define MAX_STACK_SIZE 64
15390-#define MIN_STACK_SIZE(ADDR) \
15391- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
15392- THREAD_SIZE - (unsigned long)(ADDR))) \
15393- ? (MAX_STACK_SIZE) \
15394- : (((unsigned long)current_thread_info()) + \
15395- THREAD_SIZE - (unsigned long)(ADDR)))
15396+#define MAX_STACK_SIZE 64UL
15397+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
15398
15399 #define flush_insn_slot(p) do { } while (0)
15400
15401diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
15402index 2d89e39..baee879 100644
15403--- a/arch/x86/include/asm/local.h
15404+++ b/arch/x86/include/asm/local.h
15405@@ -10,33 +10,97 @@ typedef struct {
15406 atomic_long_t a;
15407 } local_t;
15408
15409+typedef struct {
15410+ atomic_long_unchecked_t a;
15411+} local_unchecked_t;
15412+
15413 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
15414
15415 #define local_read(l) atomic_long_read(&(l)->a)
15416+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
15417 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
15418+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
15419
15420 static inline void local_inc(local_t *l)
15421 {
15422- asm volatile(_ASM_INC "%0"
15423+ asm volatile(_ASM_INC "%0\n"
15424+
15425+#ifdef CONFIG_PAX_REFCOUNT
15426+ "jno 0f\n"
15427+ _ASM_DEC "%0\n"
15428+ "int $4\n0:\n"
15429+ _ASM_EXTABLE(0b, 0b)
15430+#endif
15431+
15432+ : "+m" (l->a.counter));
15433+}
15434+
15435+static inline void local_inc_unchecked(local_unchecked_t *l)
15436+{
15437+ asm volatile(_ASM_INC "%0\n"
15438 : "+m" (l->a.counter));
15439 }
15440
15441 static inline void local_dec(local_t *l)
15442 {
15443- asm volatile(_ASM_DEC "%0"
15444+ asm volatile(_ASM_DEC "%0\n"
15445+
15446+#ifdef CONFIG_PAX_REFCOUNT
15447+ "jno 0f\n"
15448+ _ASM_INC "%0\n"
15449+ "int $4\n0:\n"
15450+ _ASM_EXTABLE(0b, 0b)
15451+#endif
15452+
15453+ : "+m" (l->a.counter));
15454+}
15455+
15456+static inline void local_dec_unchecked(local_unchecked_t *l)
15457+{
15458+ asm volatile(_ASM_DEC "%0\n"
15459 : "+m" (l->a.counter));
15460 }
15461
15462 static inline void local_add(long i, local_t *l)
15463 {
15464- asm volatile(_ASM_ADD "%1,%0"
15465+ asm volatile(_ASM_ADD "%1,%0\n"
15466+
15467+#ifdef CONFIG_PAX_REFCOUNT
15468+ "jno 0f\n"
15469+ _ASM_SUB "%1,%0\n"
15470+ "int $4\n0:\n"
15471+ _ASM_EXTABLE(0b, 0b)
15472+#endif
15473+
15474+ : "+m" (l->a.counter)
15475+ : "ir" (i));
15476+}
15477+
15478+static inline void local_add_unchecked(long i, local_unchecked_t *l)
15479+{
15480+ asm volatile(_ASM_ADD "%1,%0\n"
15481 : "+m" (l->a.counter)
15482 : "ir" (i));
15483 }
15484
15485 static inline void local_sub(long i, local_t *l)
15486 {
15487- asm volatile(_ASM_SUB "%1,%0"
15488+ asm volatile(_ASM_SUB "%1,%0\n"
15489+
15490+#ifdef CONFIG_PAX_REFCOUNT
15491+ "jno 0f\n"
15492+ _ASM_ADD "%1,%0\n"
15493+ "int $4\n0:\n"
15494+ _ASM_EXTABLE(0b, 0b)
15495+#endif
15496+
15497+ : "+m" (l->a.counter)
15498+ : "ir" (i));
15499+}
15500+
15501+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
15502+{
15503+ asm volatile(_ASM_SUB "%1,%0\n"
15504 : "+m" (l->a.counter)
15505 : "ir" (i));
15506 }
15507@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
15508 {
15509 unsigned char c;
15510
15511- asm volatile(_ASM_SUB "%2,%0; sete %1"
15512+ asm volatile(_ASM_SUB "%2,%0\n"
15513+
15514+#ifdef CONFIG_PAX_REFCOUNT
15515+ "jno 0f\n"
15516+ _ASM_ADD "%2,%0\n"
15517+ "int $4\n0:\n"
15518+ _ASM_EXTABLE(0b, 0b)
15519+#endif
15520+
15521+ "sete %1\n"
15522 : "+m" (l->a.counter), "=qm" (c)
15523 : "ir" (i) : "memory");
15524 return c;
15525@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
15526 {
15527 unsigned char c;
15528
15529- asm volatile(_ASM_DEC "%0; sete %1"
15530+ asm volatile(_ASM_DEC "%0\n"
15531+
15532+#ifdef CONFIG_PAX_REFCOUNT
15533+ "jno 0f\n"
15534+ _ASM_INC "%0\n"
15535+ "int $4\n0:\n"
15536+ _ASM_EXTABLE(0b, 0b)
15537+#endif
15538+
15539+ "sete %1\n"
15540 : "+m" (l->a.counter), "=qm" (c)
15541 : : "memory");
15542 return c != 0;
15543@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
15544 {
15545 unsigned char c;
15546
15547- asm volatile(_ASM_INC "%0; sete %1"
15548+ asm volatile(_ASM_INC "%0\n"
15549+
15550+#ifdef CONFIG_PAX_REFCOUNT
15551+ "jno 0f\n"
15552+ _ASM_DEC "%0\n"
15553+ "int $4\n0:\n"
15554+ _ASM_EXTABLE(0b, 0b)
15555+#endif
15556+
15557+ "sete %1\n"
15558 : "+m" (l->a.counter), "=qm" (c)
15559 : : "memory");
15560 return c != 0;
15561@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
15562 {
15563 unsigned char c;
15564
15565- asm volatile(_ASM_ADD "%2,%0; sets %1"
15566+ asm volatile(_ASM_ADD "%2,%0\n"
15567+
15568+#ifdef CONFIG_PAX_REFCOUNT
15569+ "jno 0f\n"
15570+ _ASM_SUB "%2,%0\n"
15571+ "int $4\n0:\n"
15572+ _ASM_EXTABLE(0b, 0b)
15573+#endif
15574+
15575+ "sets %1\n"
15576 : "+m" (l->a.counter), "=qm" (c)
15577 : "ir" (i) : "memory");
15578 return c;
15579@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
15580 static inline long local_add_return(long i, local_t *l)
15581 {
15582 long __i = i;
15583+ asm volatile(_ASM_XADD "%0, %1\n"
15584+
15585+#ifdef CONFIG_PAX_REFCOUNT
15586+ "jno 0f\n"
15587+ _ASM_MOV "%0,%1\n"
15588+ "int $4\n0:\n"
15589+ _ASM_EXTABLE(0b, 0b)
15590+#endif
15591+
15592+ : "+r" (i), "+m" (l->a.counter)
15593+ : : "memory");
15594+ return i + __i;
15595+}
15596+
15597+/**
15598+ * local_add_return_unchecked - add and return
15599+ * @i: integer value to add
15600+ * @l: pointer to type local_unchecked_t
15601+ *
15602+ * Atomically adds @i to @l and returns @i + @l
15603+ */
15604+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
15605+{
15606+ long __i = i;
15607 asm volatile(_ASM_XADD "%0, %1;"
15608 : "+r" (i), "+m" (l->a.counter)
15609 : : "memory");
15610@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
15611
15612 #define local_cmpxchg(l, o, n) \
15613 (cmpxchg_local(&((l)->a.counter), (o), (n)))
15614+#define local_cmpxchg_unchecked(l, o, n) \
15615+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
15616 /* Always has a lock prefix */
15617 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
15618
15619diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
15620new file mode 100644
15621index 0000000..2bfd3ba
15622--- /dev/null
15623+++ b/arch/x86/include/asm/mman.h
15624@@ -0,0 +1,15 @@
15625+#ifndef _X86_MMAN_H
15626+#define _X86_MMAN_H
15627+
15628+#include <uapi/asm/mman.h>
15629+
15630+#ifdef __KERNEL__
15631+#ifndef __ASSEMBLY__
15632+#ifdef CONFIG_X86_32
15633+#define arch_mmap_check i386_mmap_check
15634+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
15635+#endif
15636+#endif
15637+#endif
15638+
15639+#endif /* X86_MMAN_H */
15640diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
15641index 5f55e69..e20bfb1 100644
15642--- a/arch/x86/include/asm/mmu.h
15643+++ b/arch/x86/include/asm/mmu.h
15644@@ -9,7 +9,7 @@
15645 * we put the segment information here.
15646 */
15647 typedef struct {
15648- void *ldt;
15649+ struct desc_struct *ldt;
15650 int size;
15651
15652 #ifdef CONFIG_X86_64
15653@@ -18,7 +18,19 @@ typedef struct {
15654 #endif
15655
15656 struct mutex lock;
15657- void *vdso;
15658+ unsigned long vdso;
15659+
15660+#ifdef CONFIG_X86_32
15661+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15662+ unsigned long user_cs_base;
15663+ unsigned long user_cs_limit;
15664+
15665+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15666+ cpumask_t cpu_user_cs_mask;
15667+#endif
15668+
15669+#endif
15670+#endif
15671 } mm_context_t;
15672
15673 #ifdef CONFIG_SMP
15674diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
15675index cdbf367..4c73c9e 100644
15676--- a/arch/x86/include/asm/mmu_context.h
15677+++ b/arch/x86/include/asm/mmu_context.h
15678@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
15679
15680 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
15681 {
15682+
15683+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15684+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
15685+ unsigned int i;
15686+ pgd_t *pgd;
15687+
15688+ pax_open_kernel();
15689+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
15690+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
15691+ set_pgd_batched(pgd+i, native_make_pgd(0));
15692+ pax_close_kernel();
15693+ }
15694+#endif
15695+
15696 #ifdef CONFIG_SMP
15697 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
15698 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
15699@@ -34,16 +48,55 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15700 struct task_struct *tsk)
15701 {
15702 unsigned cpu = smp_processor_id();
15703+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15704+ int tlbstate = TLBSTATE_OK;
15705+#endif
15706
15707 if (likely(prev != next)) {
15708 #ifdef CONFIG_SMP
15709+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15710+ tlbstate = this_cpu_read(cpu_tlbstate.state);
15711+#endif
15712 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15713 this_cpu_write(cpu_tlbstate.active_mm, next);
15714 #endif
15715 cpumask_set_cpu(cpu, mm_cpumask(next));
15716
15717 /* Re-load page tables */
15718+#ifdef CONFIG_PAX_PER_CPU_PGD
15719+ pax_open_kernel();
15720+
15721+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15722+ if (static_cpu_has(X86_FEATURE_PCID))
15723+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15724+ else
15725+#endif
15726+
15727+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15728+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15729+ pax_close_kernel();
15730+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15731+
15732+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15733+ if (static_cpu_has(X86_FEATURE_PCID)) {
15734+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15735+ unsigned long descriptor[2];
15736+ descriptor[0] = PCID_USER;
15737+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15738+ } else {
15739+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15740+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15741+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15742+ else
15743+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15744+ }
15745+ } else
15746+#endif
15747+
15748+ load_cr3(get_cpu_pgd(cpu, kernel));
15749+#else
15750 load_cr3(next->pgd);
15751+#endif
15752
15753 /* stop flush ipis for the previous mm */
15754 cpumask_clear_cpu(cpu, mm_cpumask(prev));
15755@@ -53,9 +106,63 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15756 */
15757 if (unlikely(prev->context.ldt != next->context.ldt))
15758 load_LDT_nolock(&next->context);
15759- }
15760+
15761+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15762+ if (!(__supported_pte_mask & _PAGE_NX)) {
15763+ smp_mb__before_clear_bit();
15764+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
15765+ smp_mb__after_clear_bit();
15766+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15767+ }
15768+#endif
15769+
15770+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15771+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
15772+ prev->context.user_cs_limit != next->context.user_cs_limit))
15773+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15774 #ifdef CONFIG_SMP
15775+ else if (unlikely(tlbstate != TLBSTATE_OK))
15776+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15777+#endif
15778+#endif
15779+
15780+ }
15781 else {
15782+
15783+#ifdef CONFIG_PAX_PER_CPU_PGD
15784+ pax_open_kernel();
15785+
15786+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15787+ if (static_cpu_has(X86_FEATURE_PCID))
15788+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15789+ else
15790+#endif
15791+
15792+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15793+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15794+ pax_close_kernel();
15795+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15796+
15797+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15798+ if (static_cpu_has(X86_FEATURE_PCID)) {
15799+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15800+ unsigned long descriptor[2];
15801+ descriptor[0] = PCID_USER;
15802+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15803+ } else {
15804+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15805+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15806+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15807+ else
15808+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15809+ }
15810+ } else
15811+#endif
15812+
15813+ load_cr3(get_cpu_pgd(cpu, kernel));
15814+#endif
15815+
15816+#ifdef CONFIG_SMP
15817 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15818 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
15819
15820@@ -64,11 +171,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15821 * tlb flush IPI delivery. We must reload CR3
15822 * to make sure to use no freed page tables.
15823 */
15824+
15825+#ifndef CONFIG_PAX_PER_CPU_PGD
15826 load_cr3(next->pgd);
15827+#endif
15828+
15829 load_LDT_nolock(&next->context);
15830+
15831+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15832+ if (!(__supported_pte_mask & _PAGE_NX))
15833+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15834+#endif
15835+
15836+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15837+#ifdef CONFIG_PAX_PAGEEXEC
15838+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
15839+#endif
15840+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15841+#endif
15842+
15843 }
15844+#endif
15845 }
15846-#endif
15847 }
15848
15849 #define activate_mm(prev, next) \
15850diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
15851index e3b7819..b257c64 100644
15852--- a/arch/x86/include/asm/module.h
15853+++ b/arch/x86/include/asm/module.h
15854@@ -5,6 +5,7 @@
15855
15856 #ifdef CONFIG_X86_64
15857 /* X86_64 does not define MODULE_PROC_FAMILY */
15858+#define MODULE_PROC_FAMILY ""
15859 #elif defined CONFIG_M486
15860 #define MODULE_PROC_FAMILY "486 "
15861 #elif defined CONFIG_M586
15862@@ -57,8 +58,20 @@
15863 #error unknown processor family
15864 #endif
15865
15866-#ifdef CONFIG_X86_32
15867-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
15868+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15869+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
15870+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
15871+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
15872+#else
15873+#define MODULE_PAX_KERNEXEC ""
15874 #endif
15875
15876+#ifdef CONFIG_PAX_MEMORY_UDEREF
15877+#define MODULE_PAX_UDEREF "UDEREF "
15878+#else
15879+#define MODULE_PAX_UDEREF ""
15880+#endif
15881+
15882+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
15883+
15884 #endif /* _ASM_X86_MODULE_H */
15885diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
15886index 86f9301..b365cda 100644
15887--- a/arch/x86/include/asm/nmi.h
15888+++ b/arch/x86/include/asm/nmi.h
15889@@ -40,11 +40,11 @@ struct nmiaction {
15890 nmi_handler_t handler;
15891 unsigned long flags;
15892 const char *name;
15893-};
15894+} __do_const;
15895
15896 #define register_nmi_handler(t, fn, fg, n, init...) \
15897 ({ \
15898- static struct nmiaction init fn##_na = { \
15899+ static const struct nmiaction init fn##_na = { \
15900 .handler = (fn), \
15901 .name = (n), \
15902 .flags = (fg), \
15903@@ -52,7 +52,7 @@ struct nmiaction {
15904 __register_nmi_handler((t), &fn##_na); \
15905 })
15906
15907-int __register_nmi_handler(unsigned int, struct nmiaction *);
15908+int __register_nmi_handler(unsigned int, const struct nmiaction *);
15909
15910 void unregister_nmi_handler(unsigned int, const char *);
15911
15912diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
15913index c878924..21f4889 100644
15914--- a/arch/x86/include/asm/page.h
15915+++ b/arch/x86/include/asm/page.h
15916@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
15917 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
15918
15919 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
15920+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
15921
15922 #define __boot_va(x) __va(x)
15923 #define __boot_pa(x) __pa(x)
15924diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
15925index 0f1ddee..e2fc3d1 100644
15926--- a/arch/x86/include/asm/page_64.h
15927+++ b/arch/x86/include/asm/page_64.h
15928@@ -7,9 +7,9 @@
15929
15930 /* duplicated to the one in bootmem.h */
15931 extern unsigned long max_pfn;
15932-extern unsigned long phys_base;
15933+extern const unsigned long phys_base;
15934
15935-static inline unsigned long __phys_addr_nodebug(unsigned long x)
15936+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
15937 {
15938 unsigned long y = x - __START_KERNEL_map;
15939
15940diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
15941index cfdc9ee..3f7b5d6 100644
15942--- a/arch/x86/include/asm/paravirt.h
15943+++ b/arch/x86/include/asm/paravirt.h
15944@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
15945 return (pmd_t) { ret };
15946 }
15947
15948-static inline pmdval_t pmd_val(pmd_t pmd)
15949+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
15950 {
15951 pmdval_t ret;
15952
15953@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
15954 val);
15955 }
15956
15957+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
15958+{
15959+ pgdval_t val = native_pgd_val(pgd);
15960+
15961+ if (sizeof(pgdval_t) > sizeof(long))
15962+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
15963+ val, (u64)val >> 32);
15964+ else
15965+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
15966+ val);
15967+}
15968+
15969 static inline void pgd_clear(pgd_t *pgdp)
15970 {
15971 set_pgd(pgdp, __pgd(0));
15972@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
15973 pv_mmu_ops.set_fixmap(idx, phys, flags);
15974 }
15975
15976+#ifdef CONFIG_PAX_KERNEXEC
15977+static inline unsigned long pax_open_kernel(void)
15978+{
15979+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
15980+}
15981+
15982+static inline unsigned long pax_close_kernel(void)
15983+{
15984+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
15985+}
15986+#else
15987+static inline unsigned long pax_open_kernel(void) { return 0; }
15988+static inline unsigned long pax_close_kernel(void) { return 0; }
15989+#endif
15990+
15991 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
15992
15993 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
15994@@ -926,7 +953,7 @@ extern void default_banner(void);
15995
15996 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
15997 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
15998-#define PARA_INDIRECT(addr) *%cs:addr
15999+#define PARA_INDIRECT(addr) *%ss:addr
16000 #endif
16001
16002 #define INTERRUPT_RETURN \
16003@@ -1001,6 +1028,21 @@ extern void default_banner(void);
16004 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
16005 CLBR_NONE, \
16006 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
16007+
16008+#define GET_CR0_INTO_RDI \
16009+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
16010+ mov %rax,%rdi
16011+
16012+#define SET_RDI_INTO_CR0 \
16013+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16014+
16015+#define GET_CR3_INTO_RDI \
16016+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
16017+ mov %rax,%rdi
16018+
16019+#define SET_RDI_INTO_CR3 \
16020+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
16021+
16022 #endif /* CONFIG_X86_32 */
16023
16024 #endif /* __ASSEMBLY__ */
16025diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
16026index 0db1fca..52310cc 100644
16027--- a/arch/x86/include/asm/paravirt_types.h
16028+++ b/arch/x86/include/asm/paravirt_types.h
16029@@ -84,7 +84,7 @@ struct pv_init_ops {
16030 */
16031 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
16032 unsigned long addr, unsigned len);
16033-};
16034+} __no_const;
16035
16036
16037 struct pv_lazy_ops {
16038@@ -98,7 +98,7 @@ struct pv_time_ops {
16039 unsigned long long (*sched_clock)(void);
16040 unsigned long long (*steal_clock)(int cpu);
16041 unsigned long (*get_tsc_khz)(void);
16042-};
16043+} __no_const;
16044
16045 struct pv_cpu_ops {
16046 /* hooks for various privileged instructions */
16047@@ -192,7 +192,7 @@ struct pv_cpu_ops {
16048
16049 void (*start_context_switch)(struct task_struct *prev);
16050 void (*end_context_switch)(struct task_struct *next);
16051-};
16052+} __no_const;
16053
16054 struct pv_irq_ops {
16055 /*
16056@@ -223,7 +223,7 @@ struct pv_apic_ops {
16057 unsigned long start_eip,
16058 unsigned long start_esp);
16059 #endif
16060-};
16061+} __no_const;
16062
16063 struct pv_mmu_ops {
16064 unsigned long (*read_cr2)(void);
16065@@ -313,6 +313,7 @@ struct pv_mmu_ops {
16066 struct paravirt_callee_save make_pud;
16067
16068 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
16069+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
16070 #endif /* PAGETABLE_LEVELS == 4 */
16071 #endif /* PAGETABLE_LEVELS >= 3 */
16072
16073@@ -324,6 +325,12 @@ struct pv_mmu_ops {
16074 an mfn. We can tell which is which from the index. */
16075 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
16076 phys_addr_t phys, pgprot_t flags);
16077+
16078+#ifdef CONFIG_PAX_KERNEXEC
16079+ unsigned long (*pax_open_kernel)(void);
16080+ unsigned long (*pax_close_kernel)(void);
16081+#endif
16082+
16083 };
16084
16085 struct arch_spinlock;
16086@@ -334,7 +341,7 @@ struct pv_lock_ops {
16087 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
16088 int (*spin_trylock)(struct arch_spinlock *lock);
16089 void (*spin_unlock)(struct arch_spinlock *lock);
16090-};
16091+} __no_const;
16092
16093 /* This contains all the paravirt structures: we get a convenient
16094 * number for each function using the offset which we use to indicate
16095diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
16096index b4389a4..7024269 100644
16097--- a/arch/x86/include/asm/pgalloc.h
16098+++ b/arch/x86/include/asm/pgalloc.h
16099@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
16100 pmd_t *pmd, pte_t *pte)
16101 {
16102 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16103+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
16104+}
16105+
16106+static inline void pmd_populate_user(struct mm_struct *mm,
16107+ pmd_t *pmd, pte_t *pte)
16108+{
16109+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16110 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
16111 }
16112
16113@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
16114
16115 #ifdef CONFIG_X86_PAE
16116 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
16117+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16118+{
16119+ pud_populate(mm, pudp, pmd);
16120+}
16121 #else /* !CONFIG_X86_PAE */
16122 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16123 {
16124 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16125 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
16126 }
16127+
16128+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16129+{
16130+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16131+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
16132+}
16133 #endif /* CONFIG_X86_PAE */
16134
16135 #if PAGETABLE_LEVELS > 3
16136@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16137 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
16138 }
16139
16140+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16141+{
16142+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
16143+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
16144+}
16145+
16146 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
16147 {
16148 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
16149diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
16150index f2b489c..4f7e2e5 100644
16151--- a/arch/x86/include/asm/pgtable-2level.h
16152+++ b/arch/x86/include/asm/pgtable-2level.h
16153@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
16154
16155 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16156 {
16157+ pax_open_kernel();
16158 *pmdp = pmd;
16159+ pax_close_kernel();
16160 }
16161
16162 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16163diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
16164index 4cc9f2b..5fd9226 100644
16165--- a/arch/x86/include/asm/pgtable-3level.h
16166+++ b/arch/x86/include/asm/pgtable-3level.h
16167@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16168
16169 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16170 {
16171+ pax_open_kernel();
16172 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
16173+ pax_close_kernel();
16174 }
16175
16176 static inline void native_set_pud(pud_t *pudp, pud_t pud)
16177 {
16178+ pax_open_kernel();
16179 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
16180+ pax_close_kernel();
16181 }
16182
16183 /*
16184diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
16185index 1e67223..92a9585 100644
16186--- a/arch/x86/include/asm/pgtable.h
16187+++ b/arch/x86/include/asm/pgtable.h
16188@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
16189
16190 #ifndef __PAGETABLE_PUD_FOLDED
16191 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
16192+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
16193 #define pgd_clear(pgd) native_pgd_clear(pgd)
16194 #endif
16195
16196@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
16197
16198 #define arch_end_context_switch(prev) do {} while(0)
16199
16200+#define pax_open_kernel() native_pax_open_kernel()
16201+#define pax_close_kernel() native_pax_close_kernel()
16202 #endif /* CONFIG_PARAVIRT */
16203
16204+#define __HAVE_ARCH_PAX_OPEN_KERNEL
16205+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
16206+
16207+#ifdef CONFIG_PAX_KERNEXEC
16208+static inline unsigned long native_pax_open_kernel(void)
16209+{
16210+ unsigned long cr0;
16211+
16212+ preempt_disable();
16213+ barrier();
16214+ cr0 = read_cr0() ^ X86_CR0_WP;
16215+ BUG_ON(cr0 & X86_CR0_WP);
16216+ write_cr0(cr0);
16217+ return cr0 ^ X86_CR0_WP;
16218+}
16219+
16220+static inline unsigned long native_pax_close_kernel(void)
16221+{
16222+ unsigned long cr0;
16223+
16224+ cr0 = read_cr0() ^ X86_CR0_WP;
16225+ BUG_ON(!(cr0 & X86_CR0_WP));
16226+ write_cr0(cr0);
16227+ barrier();
16228+ preempt_enable_no_resched();
16229+ return cr0 ^ X86_CR0_WP;
16230+}
16231+#else
16232+static inline unsigned long native_pax_open_kernel(void) { return 0; }
16233+static inline unsigned long native_pax_close_kernel(void) { return 0; }
16234+#endif
16235+
16236 /*
16237 * The following only work if pte_present() is true.
16238 * Undefined behaviour if not..
16239 */
16240+static inline int pte_user(pte_t pte)
16241+{
16242+ return pte_val(pte) & _PAGE_USER;
16243+}
16244+
16245 static inline int pte_dirty(pte_t pte)
16246 {
16247 return pte_flags(pte) & _PAGE_DIRTY;
16248@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
16249 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
16250 }
16251
16252+static inline unsigned long pgd_pfn(pgd_t pgd)
16253+{
16254+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
16255+}
16256+
16257 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
16258
16259 static inline int pmd_large(pmd_t pte)
16260@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
16261 return pte_clear_flags(pte, _PAGE_RW);
16262 }
16263
16264+static inline pte_t pte_mkread(pte_t pte)
16265+{
16266+ return __pte(pte_val(pte) | _PAGE_USER);
16267+}
16268+
16269 static inline pte_t pte_mkexec(pte_t pte)
16270 {
16271- return pte_clear_flags(pte, _PAGE_NX);
16272+#ifdef CONFIG_X86_PAE
16273+ if (__supported_pte_mask & _PAGE_NX)
16274+ return pte_clear_flags(pte, _PAGE_NX);
16275+ else
16276+#endif
16277+ return pte_set_flags(pte, _PAGE_USER);
16278+}
16279+
16280+static inline pte_t pte_exprotect(pte_t pte)
16281+{
16282+#ifdef CONFIG_X86_PAE
16283+ if (__supported_pte_mask & _PAGE_NX)
16284+ return pte_set_flags(pte, _PAGE_NX);
16285+ else
16286+#endif
16287+ return pte_clear_flags(pte, _PAGE_USER);
16288 }
16289
16290 static inline pte_t pte_mkdirty(pte_t pte)
16291@@ -394,6 +459,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
16292 #endif
16293
16294 #ifndef __ASSEMBLY__
16295+
16296+#ifdef CONFIG_PAX_PER_CPU_PGD
16297+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
16298+enum cpu_pgd_type {kernel = 0, user = 1};
16299+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
16300+{
16301+ return cpu_pgd[cpu][type];
16302+}
16303+#endif
16304+
16305 #include <linux/mm_types.h>
16306 #include <linux/log2.h>
16307
16308@@ -529,7 +604,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
16309 * Currently stuck as a macro due to indirect forward reference to
16310 * linux/mmzone.h's __section_mem_map_addr() definition:
16311 */
16312-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
16313+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
16314
16315 /* Find an entry in the second-level page table.. */
16316 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
16317@@ -569,7 +644,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
16318 * Currently stuck as a macro due to indirect forward reference to
16319 * linux/mmzone.h's __section_mem_map_addr() definition:
16320 */
16321-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
16322+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
16323
16324 /* to find an entry in a page-table-directory. */
16325 static inline unsigned long pud_index(unsigned long address)
16326@@ -584,7 +659,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
16327
16328 static inline int pgd_bad(pgd_t pgd)
16329 {
16330- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
16331+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
16332 }
16333
16334 static inline int pgd_none(pgd_t pgd)
16335@@ -607,7 +682,12 @@ static inline int pgd_none(pgd_t pgd)
16336 * pgd_offset() returns a (pgd_t *)
16337 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
16338 */
16339-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
16340+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
16341+
16342+#ifdef CONFIG_PAX_PER_CPU_PGD
16343+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
16344+#endif
16345+
16346 /*
16347 * a shortcut which implies the use of the kernel's pgd, instead
16348 * of a process's
16349@@ -618,6 +698,23 @@ static inline int pgd_none(pgd_t pgd)
16350 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
16351 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
16352
16353+#ifdef CONFIG_X86_32
16354+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
16355+#else
16356+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
16357+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
16358+
16359+#ifdef CONFIG_PAX_MEMORY_UDEREF
16360+#ifdef __ASSEMBLY__
16361+#define pax_user_shadow_base pax_user_shadow_base(%rip)
16362+#else
16363+extern unsigned long pax_user_shadow_base;
16364+extern pgdval_t clone_pgd_mask;
16365+#endif
16366+#endif
16367+
16368+#endif
16369+
16370 #ifndef __ASSEMBLY__
16371
16372 extern int direct_gbpages;
16373@@ -784,11 +881,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
16374 * dst and src can be on the same page, but the range must not overlap,
16375 * and must not cross a page boundary.
16376 */
16377-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
16378+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
16379 {
16380- memcpy(dst, src, count * sizeof(pgd_t));
16381+ pax_open_kernel();
16382+ while (count--)
16383+ *dst++ = *src++;
16384+ pax_close_kernel();
16385 }
16386
16387+#ifdef CONFIG_PAX_PER_CPU_PGD
16388+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
16389+#endif
16390+
16391+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16392+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
16393+#else
16394+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
16395+#endif
16396+
16397 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
16398 static inline int page_level_shift(enum pg_level level)
16399 {
16400diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
16401index 9ee3221..b979c6b 100644
16402--- a/arch/x86/include/asm/pgtable_32.h
16403+++ b/arch/x86/include/asm/pgtable_32.h
16404@@ -25,9 +25,6 @@
16405 struct mm_struct;
16406 struct vm_area_struct;
16407
16408-extern pgd_t swapper_pg_dir[1024];
16409-extern pgd_t initial_page_table[1024];
16410-
16411 static inline void pgtable_cache_init(void) { }
16412 static inline void check_pgt_cache(void) { }
16413 void paging_init(void);
16414@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
16415 # include <asm/pgtable-2level.h>
16416 #endif
16417
16418+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
16419+extern pgd_t initial_page_table[PTRS_PER_PGD];
16420+#ifdef CONFIG_X86_PAE
16421+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
16422+#endif
16423+
16424 #if defined(CONFIG_HIGHPTE)
16425 #define pte_offset_map(dir, address) \
16426 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
16427@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
16428 /* Clear a kernel PTE and flush it from the TLB */
16429 #define kpte_clear_flush(ptep, vaddr) \
16430 do { \
16431+ pax_open_kernel(); \
16432 pte_clear(&init_mm, (vaddr), (ptep)); \
16433+ pax_close_kernel(); \
16434 __flush_tlb_one((vaddr)); \
16435 } while (0)
16436
16437 #endif /* !__ASSEMBLY__ */
16438
16439+#define HAVE_ARCH_UNMAPPED_AREA
16440+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
16441+
16442 /*
16443 * kern_addr_valid() is (1) for FLATMEM and (0) for
16444 * SPARSEMEM and DISCONTIGMEM
16445diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
16446index ed5903b..c7fe163 100644
16447--- a/arch/x86/include/asm/pgtable_32_types.h
16448+++ b/arch/x86/include/asm/pgtable_32_types.h
16449@@ -8,7 +8,7 @@
16450 */
16451 #ifdef CONFIG_X86_PAE
16452 # include <asm/pgtable-3level_types.h>
16453-# define PMD_SIZE (1UL << PMD_SHIFT)
16454+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
16455 # define PMD_MASK (~(PMD_SIZE - 1))
16456 #else
16457 # include <asm/pgtable-2level_types.h>
16458@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
16459 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
16460 #endif
16461
16462+#ifdef CONFIG_PAX_KERNEXEC
16463+#ifndef __ASSEMBLY__
16464+extern unsigned char MODULES_EXEC_VADDR[];
16465+extern unsigned char MODULES_EXEC_END[];
16466+#endif
16467+#include <asm/boot.h>
16468+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
16469+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
16470+#else
16471+#define ktla_ktva(addr) (addr)
16472+#define ktva_ktla(addr) (addr)
16473+#endif
16474+
16475 #define MODULES_VADDR VMALLOC_START
16476 #define MODULES_END VMALLOC_END
16477 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
16478diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
16479index e22c1db..23a625a 100644
16480--- a/arch/x86/include/asm/pgtable_64.h
16481+++ b/arch/x86/include/asm/pgtable_64.h
16482@@ -16,10 +16,14 @@
16483
16484 extern pud_t level3_kernel_pgt[512];
16485 extern pud_t level3_ident_pgt[512];
16486+extern pud_t level3_vmalloc_start_pgt[512];
16487+extern pud_t level3_vmalloc_end_pgt[512];
16488+extern pud_t level3_vmemmap_pgt[512];
16489+extern pud_t level2_vmemmap_pgt[512];
16490 extern pmd_t level2_kernel_pgt[512];
16491 extern pmd_t level2_fixmap_pgt[512];
16492-extern pmd_t level2_ident_pgt[512];
16493-extern pgd_t init_level4_pgt[];
16494+extern pmd_t level2_ident_pgt[512*2];
16495+extern pgd_t init_level4_pgt[512];
16496
16497 #define swapper_pg_dir init_level4_pgt
16498
16499@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16500
16501 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16502 {
16503+ pax_open_kernel();
16504 *pmdp = pmd;
16505+ pax_close_kernel();
16506 }
16507
16508 static inline void native_pmd_clear(pmd_t *pmd)
16509@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
16510
16511 static inline void native_set_pud(pud_t *pudp, pud_t pud)
16512 {
16513+ pax_open_kernel();
16514 *pudp = pud;
16515+ pax_close_kernel();
16516 }
16517
16518 static inline void native_pud_clear(pud_t *pud)
16519@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
16520
16521 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
16522 {
16523+ pax_open_kernel();
16524+ *pgdp = pgd;
16525+ pax_close_kernel();
16526+}
16527+
16528+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
16529+{
16530 *pgdp = pgd;
16531 }
16532
16533diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
16534index 2d88344..4679fc3 100644
16535--- a/arch/x86/include/asm/pgtable_64_types.h
16536+++ b/arch/x86/include/asm/pgtable_64_types.h
16537@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
16538 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
16539 #define MODULES_END _AC(0xffffffffff000000, UL)
16540 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
16541+#define MODULES_EXEC_VADDR MODULES_VADDR
16542+#define MODULES_EXEC_END MODULES_END
16543+
16544+#define ktla_ktva(addr) (addr)
16545+#define ktva_ktla(addr) (addr)
16546
16547 #define EARLY_DYNAMIC_PAGE_TABLES 64
16548
16549diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
16550index e642300..0ef8f31 100644
16551--- a/arch/x86/include/asm/pgtable_types.h
16552+++ b/arch/x86/include/asm/pgtable_types.h
16553@@ -16,13 +16,12 @@
16554 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16555 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
16556 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
16557-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
16558+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
16559 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
16560 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
16561 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
16562-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
16563-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
16564-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
16565+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
16566+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
16567 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
16568
16569 /* If _PAGE_BIT_PRESENT is clear, we use these: */
16570@@ -40,7 +39,6 @@
16571 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
16572 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
16573 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
16574-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
16575 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
16576 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
16577 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
16578@@ -57,8 +55,10 @@
16579
16580 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
16581 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
16582-#else
16583+#elif defined(CONFIG_KMEMCHECK)
16584 #define _PAGE_NX (_AT(pteval_t, 0))
16585+#else
16586+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
16587 #endif
16588
16589 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
16590@@ -116,6 +116,9 @@
16591 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
16592 _PAGE_ACCESSED)
16593
16594+#define PAGE_READONLY_NOEXEC PAGE_READONLY
16595+#define PAGE_SHARED_NOEXEC PAGE_SHARED
16596+
16597 #define __PAGE_KERNEL_EXEC \
16598 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
16599 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
16600@@ -126,7 +129,7 @@
16601 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
16602 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
16603 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
16604-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
16605+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
16606 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
16607 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
16608 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
16609@@ -188,8 +191,8 @@
16610 * bits are combined, this will alow user to access the high address mapped
16611 * VDSO in the presence of CONFIG_COMPAT_VDSO
16612 */
16613-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
16614-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
16615+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
16616+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
16617 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
16618 #endif
16619
16620@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
16621 {
16622 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
16623 }
16624+#endif
16625
16626+#if PAGETABLE_LEVELS == 3
16627+#include <asm-generic/pgtable-nopud.h>
16628+#endif
16629+
16630+#if PAGETABLE_LEVELS == 2
16631+#include <asm-generic/pgtable-nopmd.h>
16632+#endif
16633+
16634+#ifndef __ASSEMBLY__
16635 #if PAGETABLE_LEVELS > 3
16636 typedef struct { pudval_t pud; } pud_t;
16637
16638@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
16639 return pud.pud;
16640 }
16641 #else
16642-#include <asm-generic/pgtable-nopud.h>
16643-
16644 static inline pudval_t native_pud_val(pud_t pud)
16645 {
16646 return native_pgd_val(pud.pgd);
16647@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
16648 return pmd.pmd;
16649 }
16650 #else
16651-#include <asm-generic/pgtable-nopmd.h>
16652-
16653 static inline pmdval_t native_pmd_val(pmd_t pmd)
16654 {
16655 return native_pgd_val(pmd.pud.pgd);
16656@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
16657
16658 extern pteval_t __supported_pte_mask;
16659 extern void set_nx(void);
16660-extern int nx_enabled;
16661
16662 #define pgprot_writecombine pgprot_writecombine
16663 extern pgprot_t pgprot_writecombine(pgprot_t prot);
16664diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
16665index 22224b3..b3a2f90 100644
16666--- a/arch/x86/include/asm/processor.h
16667+++ b/arch/x86/include/asm/processor.h
16668@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
16669 : "memory");
16670 }
16671
16672+/* invpcid (%rdx),%rax */
16673+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
16674+
16675+#define INVPCID_SINGLE_ADDRESS 0UL
16676+#define INVPCID_SINGLE_CONTEXT 1UL
16677+#define INVPCID_ALL_GLOBAL 2UL
16678+#define INVPCID_ALL_MONGLOBAL 3UL
16679+
16680+#define PCID_KERNEL 0UL
16681+#define PCID_USER 1UL
16682+#define PCID_NOFLUSH (1UL << 63)
16683+
16684 static inline void load_cr3(pgd_t *pgdir)
16685 {
16686- write_cr3(__pa(pgdir));
16687+ write_cr3(__pa(pgdir) | PCID_KERNEL);
16688 }
16689
16690 #ifdef CONFIG_X86_32
16691@@ -282,7 +294,7 @@ struct tss_struct {
16692
16693 } ____cacheline_aligned;
16694
16695-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
16696+extern struct tss_struct init_tss[NR_CPUS];
16697
16698 /*
16699 * Save the original ist values for checking stack pointers during debugging
16700@@ -452,6 +464,7 @@ struct thread_struct {
16701 unsigned short ds;
16702 unsigned short fsindex;
16703 unsigned short gsindex;
16704+ unsigned short ss;
16705 #endif
16706 #ifdef CONFIG_X86_32
16707 unsigned long ip;
16708@@ -552,29 +565,8 @@ static inline void load_sp0(struct tss_struct *tss,
16709 extern unsigned long mmu_cr4_features;
16710 extern u32 *trampoline_cr4_features;
16711
16712-static inline void set_in_cr4(unsigned long mask)
16713-{
16714- unsigned long cr4;
16715-
16716- mmu_cr4_features |= mask;
16717- if (trampoline_cr4_features)
16718- *trampoline_cr4_features = mmu_cr4_features;
16719- cr4 = read_cr4();
16720- cr4 |= mask;
16721- write_cr4(cr4);
16722-}
16723-
16724-static inline void clear_in_cr4(unsigned long mask)
16725-{
16726- unsigned long cr4;
16727-
16728- mmu_cr4_features &= ~mask;
16729- if (trampoline_cr4_features)
16730- *trampoline_cr4_features = mmu_cr4_features;
16731- cr4 = read_cr4();
16732- cr4 &= ~mask;
16733- write_cr4(cr4);
16734-}
16735+extern void set_in_cr4(unsigned long mask);
16736+extern void clear_in_cr4(unsigned long mask);
16737
16738 typedef struct {
16739 unsigned long seg;
16740@@ -823,11 +815,18 @@ static inline void spin_lock_prefetch(const void *x)
16741 */
16742 #define TASK_SIZE PAGE_OFFSET
16743 #define TASK_SIZE_MAX TASK_SIZE
16744+
16745+#ifdef CONFIG_PAX_SEGMEXEC
16746+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
16747+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
16748+#else
16749 #define STACK_TOP TASK_SIZE
16750-#define STACK_TOP_MAX STACK_TOP
16751+#endif
16752+
16753+#define STACK_TOP_MAX TASK_SIZE
16754
16755 #define INIT_THREAD { \
16756- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16757+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16758 .vm86_info = NULL, \
16759 .sysenter_cs = __KERNEL_CS, \
16760 .io_bitmap_ptr = NULL, \
16761@@ -841,7 +840,7 @@ static inline void spin_lock_prefetch(const void *x)
16762 */
16763 #define INIT_TSS { \
16764 .x86_tss = { \
16765- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16766+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16767 .ss0 = __KERNEL_DS, \
16768 .ss1 = __KERNEL_CS, \
16769 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
16770@@ -852,11 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
16771 extern unsigned long thread_saved_pc(struct task_struct *tsk);
16772
16773 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
16774-#define KSTK_TOP(info) \
16775-({ \
16776- unsigned long *__ptr = (unsigned long *)(info); \
16777- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
16778-})
16779+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
16780
16781 /*
16782 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
16783@@ -871,7 +866,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16784 #define task_pt_regs(task) \
16785 ({ \
16786 struct pt_regs *__regs__; \
16787- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
16788+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
16789 __regs__ - 1; \
16790 })
16791
16792@@ -881,13 +876,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16793 /*
16794 * User space process size. 47bits minus one guard page.
16795 */
16796-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
16797+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
16798
16799 /* This decides where the kernel will search for a free chunk of vm
16800 * space during mmap's.
16801 */
16802 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
16803- 0xc0000000 : 0xFFFFe000)
16804+ 0xc0000000 : 0xFFFFf000)
16805
16806 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
16807 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
16808@@ -898,11 +893,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16809 #define STACK_TOP_MAX TASK_SIZE_MAX
16810
16811 #define INIT_THREAD { \
16812- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16813+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16814 }
16815
16816 #define INIT_TSS { \
16817- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16818+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16819 }
16820
16821 /*
16822@@ -930,6 +925,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
16823 */
16824 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
16825
16826+#ifdef CONFIG_PAX_SEGMEXEC
16827+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
16828+#endif
16829+
16830 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
16831
16832 /* Get/set a process' ability to use the timestamp counter instruction */
16833@@ -942,7 +941,8 @@ extern int set_tsc_mode(unsigned int val);
16834 extern u16 amd_get_nb_id(int cpu);
16835
16836 struct aperfmperf {
16837- u64 aperf, mperf;
16838+ u64 aperf __intentional_overflow(0);
16839+ u64 mperf __intentional_overflow(0);
16840 };
16841
16842 static inline void get_aperfmperf(struct aperfmperf *am)
16843@@ -970,7 +970,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
16844 return ratio;
16845 }
16846
16847-extern unsigned long arch_align_stack(unsigned long sp);
16848+#define arch_align_stack(x) ((x) & ~0xfUL)
16849 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
16850
16851 void default_idle(void);
16852@@ -980,6 +980,6 @@ bool xen_set_default_idle(void);
16853 #define xen_set_default_idle 0
16854 #endif
16855
16856-void stop_this_cpu(void *dummy);
16857+void stop_this_cpu(void *dummy) __noreturn;
16858
16859 #endif /* _ASM_X86_PROCESSOR_H */
16860diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
16861index 942a086..6c26446 100644
16862--- a/arch/x86/include/asm/ptrace.h
16863+++ b/arch/x86/include/asm/ptrace.h
16864@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
16865 }
16866
16867 /*
16868- * user_mode_vm(regs) determines whether a register set came from user mode.
16869+ * user_mode(regs) determines whether a register set came from user mode.
16870 * This is true if V8086 mode was enabled OR if the register set was from
16871 * protected mode with RPL-3 CS value. This tricky test checks that with
16872 * one comparison. Many places in the kernel can bypass this full check
16873- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
16874+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
16875+ * be used.
16876 */
16877-static inline int user_mode(struct pt_regs *regs)
16878+static inline int user_mode_novm(struct pt_regs *regs)
16879 {
16880 #ifdef CONFIG_X86_32
16881 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
16882 #else
16883- return !!(regs->cs & 3);
16884+ return !!(regs->cs & SEGMENT_RPL_MASK);
16885 #endif
16886 }
16887
16888-static inline int user_mode_vm(struct pt_regs *regs)
16889+static inline int user_mode(struct pt_regs *regs)
16890 {
16891 #ifdef CONFIG_X86_32
16892 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
16893 USER_RPL;
16894 #else
16895- return user_mode(regs);
16896+ return user_mode_novm(regs);
16897 #endif
16898 }
16899
16900@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
16901 #ifdef CONFIG_X86_64
16902 static inline bool user_64bit_mode(struct pt_regs *regs)
16903 {
16904+ unsigned long cs = regs->cs & 0xffff;
16905 #ifndef CONFIG_PARAVIRT
16906 /*
16907 * On non-paravirt systems, this is the only long mode CPL 3
16908 * selector. We do not allow long mode selectors in the LDT.
16909 */
16910- return regs->cs == __USER_CS;
16911+ return cs == __USER_CS;
16912 #else
16913 /* Headers are too twisted for this to go in paravirt.h. */
16914- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
16915+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
16916 #endif
16917 }
16918
16919@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
16920 * Traps from the kernel do not save sp and ss.
16921 * Use the helper function to retrieve sp.
16922 */
16923- if (offset == offsetof(struct pt_regs, sp) &&
16924- regs->cs == __KERNEL_CS)
16925- return kernel_stack_pointer(regs);
16926+ if (offset == offsetof(struct pt_regs, sp)) {
16927+ unsigned long cs = regs->cs & 0xffff;
16928+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
16929+ return kernel_stack_pointer(regs);
16930+ }
16931 #endif
16932 return *(unsigned long *)((unsigned long)regs + offset);
16933 }
16934diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
16935index 9c6b890..5305f53 100644
16936--- a/arch/x86/include/asm/realmode.h
16937+++ b/arch/x86/include/asm/realmode.h
16938@@ -22,16 +22,14 @@ struct real_mode_header {
16939 #endif
16940 /* APM/BIOS reboot */
16941 u32 machine_real_restart_asm;
16942-#ifdef CONFIG_X86_64
16943 u32 machine_real_restart_seg;
16944-#endif
16945 };
16946
16947 /* This must match data at trampoline_32/64.S */
16948 struct trampoline_header {
16949 #ifdef CONFIG_X86_32
16950 u32 start;
16951- u16 gdt_pad;
16952+ u16 boot_cs;
16953 u16 gdt_limit;
16954 u32 gdt_base;
16955 #else
16956diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
16957index a82c4f1..ac45053 100644
16958--- a/arch/x86/include/asm/reboot.h
16959+++ b/arch/x86/include/asm/reboot.h
16960@@ -6,13 +6,13 @@
16961 struct pt_regs;
16962
16963 struct machine_ops {
16964- void (*restart)(char *cmd);
16965- void (*halt)(void);
16966- void (*power_off)(void);
16967+ void (* __noreturn restart)(char *cmd);
16968+ void (* __noreturn halt)(void);
16969+ void (* __noreturn power_off)(void);
16970 void (*shutdown)(void);
16971 void (*crash_shutdown)(struct pt_regs *);
16972- void (*emergency_restart)(void);
16973-};
16974+ void (* __noreturn emergency_restart)(void);
16975+} __no_const;
16976
16977 extern struct machine_ops machine_ops;
16978
16979diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
16980index cad82c9..2e5c5c1 100644
16981--- a/arch/x86/include/asm/rwsem.h
16982+++ b/arch/x86/include/asm/rwsem.h
16983@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
16984 {
16985 asm volatile("# beginning down_read\n\t"
16986 LOCK_PREFIX _ASM_INC "(%1)\n\t"
16987+
16988+#ifdef CONFIG_PAX_REFCOUNT
16989+ "jno 0f\n"
16990+ LOCK_PREFIX _ASM_DEC "(%1)\n"
16991+ "int $4\n0:\n"
16992+ _ASM_EXTABLE(0b, 0b)
16993+#endif
16994+
16995 /* adds 0x00000001 */
16996 " jns 1f\n"
16997 " call call_rwsem_down_read_failed\n"
16998@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
16999 "1:\n\t"
17000 " mov %1,%2\n\t"
17001 " add %3,%2\n\t"
17002+
17003+#ifdef CONFIG_PAX_REFCOUNT
17004+ "jno 0f\n"
17005+ "sub %3,%2\n"
17006+ "int $4\n0:\n"
17007+ _ASM_EXTABLE(0b, 0b)
17008+#endif
17009+
17010 " jle 2f\n\t"
17011 LOCK_PREFIX " cmpxchg %2,%0\n\t"
17012 " jnz 1b\n\t"
17013@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
17014 long tmp;
17015 asm volatile("# beginning down_write\n\t"
17016 LOCK_PREFIX " xadd %1,(%2)\n\t"
17017+
17018+#ifdef CONFIG_PAX_REFCOUNT
17019+ "jno 0f\n"
17020+ "mov %1,(%2)\n"
17021+ "int $4\n0:\n"
17022+ _ASM_EXTABLE(0b, 0b)
17023+#endif
17024+
17025 /* adds 0xffff0001, returns the old value */
17026 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
17027 /* was the active mask 0 before? */
17028@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
17029 long tmp;
17030 asm volatile("# beginning __up_read\n\t"
17031 LOCK_PREFIX " xadd %1,(%2)\n\t"
17032+
17033+#ifdef CONFIG_PAX_REFCOUNT
17034+ "jno 0f\n"
17035+ "mov %1,(%2)\n"
17036+ "int $4\n0:\n"
17037+ _ASM_EXTABLE(0b, 0b)
17038+#endif
17039+
17040 /* subtracts 1, returns the old value */
17041 " jns 1f\n\t"
17042 " call call_rwsem_wake\n" /* expects old value in %edx */
17043@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
17044 long tmp;
17045 asm volatile("# beginning __up_write\n\t"
17046 LOCK_PREFIX " xadd %1,(%2)\n\t"
17047+
17048+#ifdef CONFIG_PAX_REFCOUNT
17049+ "jno 0f\n"
17050+ "mov %1,(%2)\n"
17051+ "int $4\n0:\n"
17052+ _ASM_EXTABLE(0b, 0b)
17053+#endif
17054+
17055 /* subtracts 0xffff0001, returns the old value */
17056 " jns 1f\n\t"
17057 " call call_rwsem_wake\n" /* expects old value in %edx */
17058@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17059 {
17060 asm volatile("# beginning __downgrade_write\n\t"
17061 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
17062+
17063+#ifdef CONFIG_PAX_REFCOUNT
17064+ "jno 0f\n"
17065+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
17066+ "int $4\n0:\n"
17067+ _ASM_EXTABLE(0b, 0b)
17068+#endif
17069+
17070 /*
17071 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
17072 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
17073@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17074 */
17075 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17076 {
17077- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
17078+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
17079+
17080+#ifdef CONFIG_PAX_REFCOUNT
17081+ "jno 0f\n"
17082+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
17083+ "int $4\n0:\n"
17084+ _ASM_EXTABLE(0b, 0b)
17085+#endif
17086+
17087 : "+m" (sem->count)
17088 : "er" (delta));
17089 }
17090@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17091 */
17092 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
17093 {
17094- return delta + xadd(&sem->count, delta);
17095+ return delta + xadd_check_overflow(&sem->count, delta);
17096 }
17097
17098 #endif /* __KERNEL__ */
17099diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
17100index c48a950..bc40804 100644
17101--- a/arch/x86/include/asm/segment.h
17102+++ b/arch/x86/include/asm/segment.h
17103@@ -64,10 +64,15 @@
17104 * 26 - ESPFIX small SS
17105 * 27 - per-cpu [ offset to per-cpu data area ]
17106 * 28 - stack_canary-20 [ for stack protector ]
17107- * 29 - unused
17108- * 30 - unused
17109+ * 29 - PCI BIOS CS
17110+ * 30 - PCI BIOS DS
17111 * 31 - TSS for double fault handler
17112 */
17113+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
17114+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
17115+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
17116+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
17117+
17118 #define GDT_ENTRY_TLS_MIN 6
17119 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
17120
17121@@ -79,6 +84,8 @@
17122
17123 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
17124
17125+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
17126+
17127 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
17128
17129 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
17130@@ -104,6 +111,12 @@
17131 #define __KERNEL_STACK_CANARY 0
17132 #endif
17133
17134+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
17135+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
17136+
17137+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
17138+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
17139+
17140 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
17141
17142 /*
17143@@ -141,7 +154,7 @@
17144 */
17145
17146 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
17147-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
17148+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
17149
17150
17151 #else
17152@@ -165,6 +178,8 @@
17153 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
17154 #define __USER32_DS __USER_DS
17155
17156+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
17157+
17158 #define GDT_ENTRY_TSS 8 /* needs two entries */
17159 #define GDT_ENTRY_LDT 10 /* needs two entries */
17160 #define GDT_ENTRY_TLS_MIN 12
17161@@ -173,6 +188,8 @@
17162 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
17163 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
17164
17165+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
17166+
17167 /* TLS indexes for 64bit - hardcoded in arch_prctl */
17168 #define FS_TLS 0
17169 #define GS_TLS 1
17170@@ -180,12 +197,14 @@
17171 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
17172 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
17173
17174-#define GDT_ENTRIES 16
17175+#define GDT_ENTRIES 17
17176
17177 #endif
17178
17179 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
17180+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
17181 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
17182+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
17183 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
17184 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
17185 #ifndef CONFIG_PARAVIRT
17186@@ -265,7 +284,7 @@ static inline unsigned long get_limit(unsigned long segment)
17187 {
17188 unsigned long __limit;
17189 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
17190- return __limit + 1;
17191+ return __limit;
17192 }
17193
17194 #endif /* !__ASSEMBLY__ */
17195diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
17196index 8d3120f..352b440 100644
17197--- a/arch/x86/include/asm/smap.h
17198+++ b/arch/x86/include/asm/smap.h
17199@@ -25,11 +25,40 @@
17200
17201 #include <asm/alternative-asm.h>
17202
17203+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17204+#define ASM_PAX_OPEN_USERLAND \
17205+ 661: jmp 663f; \
17206+ .pushsection .altinstr_replacement, "a" ; \
17207+ 662: pushq %rax; nop; \
17208+ .popsection ; \
17209+ .pushsection .altinstructions, "a" ; \
17210+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
17211+ .popsection ; \
17212+ call __pax_open_userland; \
17213+ popq %rax; \
17214+ 663:
17215+
17216+#define ASM_PAX_CLOSE_USERLAND \
17217+ 661: jmp 663f; \
17218+ .pushsection .altinstr_replacement, "a" ; \
17219+ 662: pushq %rax; nop; \
17220+ .popsection; \
17221+ .pushsection .altinstructions, "a" ; \
17222+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
17223+ .popsection; \
17224+ call __pax_close_userland; \
17225+ popq %rax; \
17226+ 663:
17227+#else
17228+#define ASM_PAX_OPEN_USERLAND
17229+#define ASM_PAX_CLOSE_USERLAND
17230+#endif
17231+
17232 #ifdef CONFIG_X86_SMAP
17233
17234 #define ASM_CLAC \
17235 661: ASM_NOP3 ; \
17236- .pushsection .altinstr_replacement, "ax" ; \
17237+ .pushsection .altinstr_replacement, "a" ; \
17238 662: __ASM_CLAC ; \
17239 .popsection ; \
17240 .pushsection .altinstructions, "a" ; \
17241@@ -38,7 +67,7 @@
17242
17243 #define ASM_STAC \
17244 661: ASM_NOP3 ; \
17245- .pushsection .altinstr_replacement, "ax" ; \
17246+ .pushsection .altinstr_replacement, "a" ; \
17247 662: __ASM_STAC ; \
17248 .popsection ; \
17249 .pushsection .altinstructions, "a" ; \
17250@@ -56,6 +85,37 @@
17251
17252 #include <asm/alternative.h>
17253
17254+#define __HAVE_ARCH_PAX_OPEN_USERLAND
17255+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
17256+
17257+extern void __pax_open_userland(void);
17258+static __always_inline unsigned long pax_open_userland(void)
17259+{
17260+
17261+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17262+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
17263+ :
17264+ : [open] "i" (__pax_open_userland)
17265+ : "memory", "rax");
17266+#endif
17267+
17268+ return 0;
17269+}
17270+
17271+extern void __pax_close_userland(void);
17272+static __always_inline unsigned long pax_close_userland(void)
17273+{
17274+
17275+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17276+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
17277+ :
17278+ : [close] "i" (__pax_close_userland)
17279+ : "memory", "rax");
17280+#endif
17281+
17282+ return 0;
17283+}
17284+
17285 #ifdef CONFIG_X86_SMAP
17286
17287 static __always_inline void clac(void)
17288diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
17289index b073aae..39f9bdd 100644
17290--- a/arch/x86/include/asm/smp.h
17291+++ b/arch/x86/include/asm/smp.h
17292@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
17293 /* cpus sharing the last level cache: */
17294 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
17295 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
17296-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
17297+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
17298
17299 static inline struct cpumask *cpu_sibling_mask(int cpu)
17300 {
17301@@ -79,7 +79,7 @@ struct smp_ops {
17302
17303 void (*send_call_func_ipi)(const struct cpumask *mask);
17304 void (*send_call_func_single_ipi)(int cpu);
17305-};
17306+} __no_const;
17307
17308 /* Globals due to paravirt */
17309 extern void set_cpu_sibling_map(int cpu);
17310@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
17311 extern int safe_smp_processor_id(void);
17312
17313 #elif defined(CONFIG_X86_64_SMP)
17314-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
17315-
17316-#define stack_smp_processor_id() \
17317-({ \
17318- struct thread_info *ti; \
17319- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
17320- ti->cpu; \
17321-})
17322+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
17323+#define stack_smp_processor_id() raw_smp_processor_id()
17324 #define safe_smp_processor_id() smp_processor_id()
17325
17326 #endif
17327diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
17328index 33692ea..350a534 100644
17329--- a/arch/x86/include/asm/spinlock.h
17330+++ b/arch/x86/include/asm/spinlock.h
17331@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
17332 static inline void arch_read_lock(arch_rwlock_t *rw)
17333 {
17334 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
17335+
17336+#ifdef CONFIG_PAX_REFCOUNT
17337+ "jno 0f\n"
17338+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
17339+ "int $4\n0:\n"
17340+ _ASM_EXTABLE(0b, 0b)
17341+#endif
17342+
17343 "jns 1f\n"
17344 "call __read_lock_failed\n\t"
17345 "1:\n"
17346@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
17347 static inline void arch_write_lock(arch_rwlock_t *rw)
17348 {
17349 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
17350+
17351+#ifdef CONFIG_PAX_REFCOUNT
17352+ "jno 0f\n"
17353+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
17354+ "int $4\n0:\n"
17355+ _ASM_EXTABLE(0b, 0b)
17356+#endif
17357+
17358 "jz 1f\n"
17359 "call __write_lock_failed\n\t"
17360 "1:\n"
17361@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
17362
17363 static inline void arch_read_unlock(arch_rwlock_t *rw)
17364 {
17365- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
17366+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
17367+
17368+#ifdef CONFIG_PAX_REFCOUNT
17369+ "jno 0f\n"
17370+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
17371+ "int $4\n0:\n"
17372+ _ASM_EXTABLE(0b, 0b)
17373+#endif
17374+
17375 :"+m" (rw->lock) : : "memory");
17376 }
17377
17378 static inline void arch_write_unlock(arch_rwlock_t *rw)
17379 {
17380- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
17381+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
17382+
17383+#ifdef CONFIG_PAX_REFCOUNT
17384+ "jno 0f\n"
17385+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
17386+ "int $4\n0:\n"
17387+ _ASM_EXTABLE(0b, 0b)
17388+#endif
17389+
17390 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
17391 }
17392
17393diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
17394index 6a99859..03cb807 100644
17395--- a/arch/x86/include/asm/stackprotector.h
17396+++ b/arch/x86/include/asm/stackprotector.h
17397@@ -47,7 +47,7 @@
17398 * head_32 for boot CPU and setup_per_cpu_areas() for others.
17399 */
17400 #define GDT_STACK_CANARY_INIT \
17401- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
17402+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
17403
17404 /*
17405 * Initialize the stackprotector canary value.
17406@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
17407
17408 static inline void load_stack_canary_segment(void)
17409 {
17410-#ifdef CONFIG_X86_32
17411+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
17412 asm volatile ("mov %0, %%gs" : : "r" (0));
17413 #endif
17414 }
17415diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
17416index 70bbe39..4ae2bd4 100644
17417--- a/arch/x86/include/asm/stacktrace.h
17418+++ b/arch/x86/include/asm/stacktrace.h
17419@@ -11,28 +11,20 @@
17420
17421 extern int kstack_depth_to_print;
17422
17423-struct thread_info;
17424+struct task_struct;
17425 struct stacktrace_ops;
17426
17427-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
17428- unsigned long *stack,
17429- unsigned long bp,
17430- const struct stacktrace_ops *ops,
17431- void *data,
17432- unsigned long *end,
17433- int *graph);
17434+typedef unsigned long walk_stack_t(struct task_struct *task,
17435+ void *stack_start,
17436+ unsigned long *stack,
17437+ unsigned long bp,
17438+ const struct stacktrace_ops *ops,
17439+ void *data,
17440+ unsigned long *end,
17441+ int *graph);
17442
17443-extern unsigned long
17444-print_context_stack(struct thread_info *tinfo,
17445- unsigned long *stack, unsigned long bp,
17446- const struct stacktrace_ops *ops, void *data,
17447- unsigned long *end, int *graph);
17448-
17449-extern unsigned long
17450-print_context_stack_bp(struct thread_info *tinfo,
17451- unsigned long *stack, unsigned long bp,
17452- const struct stacktrace_ops *ops, void *data,
17453- unsigned long *end, int *graph);
17454+extern walk_stack_t print_context_stack;
17455+extern walk_stack_t print_context_stack_bp;
17456
17457 /* Generic stack tracer with callbacks */
17458
17459@@ -40,7 +32,7 @@ struct stacktrace_ops {
17460 void (*address)(void *data, unsigned long address, int reliable);
17461 /* On negative return stop dumping */
17462 int (*stack)(void *data, char *name);
17463- walk_stack_t walk_stack;
17464+ walk_stack_t *walk_stack;
17465 };
17466
17467 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
17468diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
17469index 4ec45b3..a4f0a8a 100644
17470--- a/arch/x86/include/asm/switch_to.h
17471+++ b/arch/x86/include/asm/switch_to.h
17472@@ -108,7 +108,7 @@ do { \
17473 "call __switch_to\n\t" \
17474 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
17475 __switch_canary \
17476- "movq %P[thread_info](%%rsi),%%r8\n\t" \
17477+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
17478 "movq %%rax,%%rdi\n\t" \
17479 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
17480 "jnz ret_from_fork\n\t" \
17481@@ -119,7 +119,7 @@ do { \
17482 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
17483 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
17484 [_tif_fork] "i" (_TIF_FORK), \
17485- [thread_info] "i" (offsetof(struct task_struct, stack)), \
17486+ [thread_info] "m" (current_tinfo), \
17487 [current_task] "m" (current_task) \
17488 __switch_canary_iparam \
17489 : "memory", "cc" __EXTRA_CLOBBER)
17490diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
17491index a1df6e8..e002940 100644
17492--- a/arch/x86/include/asm/thread_info.h
17493+++ b/arch/x86/include/asm/thread_info.h
17494@@ -10,6 +10,7 @@
17495 #include <linux/compiler.h>
17496 #include <asm/page.h>
17497 #include <asm/types.h>
17498+#include <asm/percpu.h>
17499
17500 /*
17501 * low level task data that entry.S needs immediate access to
17502@@ -23,7 +24,6 @@ struct exec_domain;
17503 #include <linux/atomic.h>
17504
17505 struct thread_info {
17506- struct task_struct *task; /* main task structure */
17507 struct exec_domain *exec_domain; /* execution domain */
17508 __u32 flags; /* low level flags */
17509 __u32 status; /* thread synchronous flags */
17510@@ -33,19 +33,13 @@ struct thread_info {
17511 mm_segment_t addr_limit;
17512 struct restart_block restart_block;
17513 void __user *sysenter_return;
17514-#ifdef CONFIG_X86_32
17515- unsigned long previous_esp; /* ESP of the previous stack in
17516- case of nested (IRQ) stacks
17517- */
17518- __u8 supervisor_stack[0];
17519-#endif
17520+ unsigned long lowest_stack;
17521 unsigned int sig_on_uaccess_error:1;
17522 unsigned int uaccess_err:1; /* uaccess failed */
17523 };
17524
17525-#define INIT_THREAD_INFO(tsk) \
17526+#define INIT_THREAD_INFO \
17527 { \
17528- .task = &tsk, \
17529 .exec_domain = &default_exec_domain, \
17530 .flags = 0, \
17531 .cpu = 0, \
17532@@ -56,7 +50,7 @@ struct thread_info {
17533 }, \
17534 }
17535
17536-#define init_thread_info (init_thread_union.thread_info)
17537+#define init_thread_info (init_thread_union.stack)
17538 #define init_stack (init_thread_union.stack)
17539
17540 #else /* !__ASSEMBLY__ */
17541@@ -97,6 +91,7 @@ struct thread_info {
17542 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
17543 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
17544 #define TIF_X32 30 /* 32-bit native x86-64 binary */
17545+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
17546
17547 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
17548 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
17549@@ -121,17 +116,18 @@ struct thread_info {
17550 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
17551 #define _TIF_ADDR32 (1 << TIF_ADDR32)
17552 #define _TIF_X32 (1 << TIF_X32)
17553+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
17554
17555 /* work to do in syscall_trace_enter() */
17556 #define _TIF_WORK_SYSCALL_ENTRY \
17557 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
17558 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
17559- _TIF_NOHZ)
17560+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
17561
17562 /* work to do in syscall_trace_leave() */
17563 #define _TIF_WORK_SYSCALL_EXIT \
17564 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
17565- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
17566+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
17567
17568 /* work to do on interrupt/exception return */
17569 #define _TIF_WORK_MASK \
17570@@ -142,7 +138,7 @@ struct thread_info {
17571 /* work to do on any return to user space */
17572 #define _TIF_ALLWORK_MASK \
17573 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
17574- _TIF_NOHZ)
17575+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
17576
17577 /* Only used for 64 bit */
17578 #define _TIF_DO_NOTIFY_MASK \
17579@@ -158,45 +154,40 @@ struct thread_info {
17580
17581 #define PREEMPT_ACTIVE 0x10000000
17582
17583-#ifdef CONFIG_X86_32
17584-
17585-#define STACK_WARN (THREAD_SIZE/8)
17586-/*
17587- * macros/functions for gaining access to the thread information structure
17588- *
17589- * preempt_count needs to be 1 initially, until the scheduler is functional.
17590- */
17591-#ifndef __ASSEMBLY__
17592-
17593-
17594-/* how to get the current stack pointer from C */
17595-register unsigned long current_stack_pointer asm("esp") __used;
17596-
17597-/* how to get the thread information struct from C */
17598-static inline struct thread_info *current_thread_info(void)
17599-{
17600- return (struct thread_info *)
17601- (current_stack_pointer & ~(THREAD_SIZE - 1));
17602-}
17603-
17604-#else /* !__ASSEMBLY__ */
17605-
17606+#ifdef __ASSEMBLY__
17607 /* how to get the thread information struct from ASM */
17608 #define GET_THREAD_INFO(reg) \
17609- movl $-THREAD_SIZE, reg; \
17610- andl %esp, reg
17611+ mov PER_CPU_VAR(current_tinfo), reg
17612
17613 /* use this one if reg already contains %esp */
17614-#define GET_THREAD_INFO_WITH_ESP(reg) \
17615- andl $-THREAD_SIZE, reg
17616+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
17617+#else
17618+/* how to get the thread information struct from C */
17619+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
17620+
17621+static __always_inline struct thread_info *current_thread_info(void)
17622+{
17623+ return this_cpu_read_stable(current_tinfo);
17624+}
17625+#endif
17626+
17627+#ifdef CONFIG_X86_32
17628+
17629+#define STACK_WARN (THREAD_SIZE/8)
17630+/*
17631+ * macros/functions for gaining access to the thread information structure
17632+ *
17633+ * preempt_count needs to be 1 initially, until the scheduler is functional.
17634+ */
17635+#ifndef __ASSEMBLY__
17636+
17637+/* how to get the current stack pointer from C */
17638+register unsigned long current_stack_pointer asm("esp") __used;
17639
17640 #endif
17641
17642 #else /* X86_32 */
17643
17644-#include <asm/percpu.h>
17645-#define KERNEL_STACK_OFFSET (5*8)
17646-
17647 /*
17648 * macros/functions for gaining access to the thread information structure
17649 * preempt_count needs to be 1 initially, until the scheduler is functional.
17650@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
17651 #ifndef __ASSEMBLY__
17652 DECLARE_PER_CPU(unsigned long, kernel_stack);
17653
17654-static inline struct thread_info *current_thread_info(void)
17655-{
17656- struct thread_info *ti;
17657- ti = (void *)(this_cpu_read_stable(kernel_stack) +
17658- KERNEL_STACK_OFFSET - THREAD_SIZE);
17659- return ti;
17660-}
17661-
17662-#else /* !__ASSEMBLY__ */
17663-
17664-/* how to get the thread information struct from ASM */
17665-#define GET_THREAD_INFO(reg) \
17666- movq PER_CPU_VAR(kernel_stack),reg ; \
17667- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
17668-
17669-/*
17670- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
17671- * a certain register (to be used in assembler memory operands).
17672- */
17673-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
17674-
17675+/* how to get the current stack pointer from C */
17676+register unsigned long current_stack_pointer asm("rsp") __used;
17677 #endif
17678
17679 #endif /* !X86_32 */
17680@@ -283,5 +255,12 @@ static inline bool is_ia32_task(void)
17681 extern void arch_task_cache_init(void);
17682 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
17683 extern void arch_release_task_struct(struct task_struct *tsk);
17684+
17685+#define __HAVE_THREAD_FUNCTIONS
17686+#define task_thread_info(task) (&(task)->tinfo)
17687+#define task_stack_page(task) ((task)->stack)
17688+#define setup_thread_stack(p, org) do {} while (0)
17689+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
17690+
17691 #endif
17692 #endif /* _ASM_X86_THREAD_INFO_H */
17693diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
17694index 50a7fc0..45844c0 100644
17695--- a/arch/x86/include/asm/tlbflush.h
17696+++ b/arch/x86/include/asm/tlbflush.h
17697@@ -17,18 +17,44 @@
17698
17699 static inline void __native_flush_tlb(void)
17700 {
17701+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17702+ unsigned long descriptor[2];
17703+
17704+ descriptor[0] = PCID_KERNEL;
17705+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
17706+ return;
17707+ }
17708+
17709+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17710+ if (static_cpu_has(X86_FEATURE_PCID)) {
17711+ unsigned int cpu = raw_get_cpu();
17712+
17713+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17714+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17715+ raw_put_cpu_no_resched();
17716+ return;
17717+ }
17718+#endif
17719+
17720 native_write_cr3(native_read_cr3());
17721 }
17722
17723 static inline void __native_flush_tlb_global_irq_disabled(void)
17724 {
17725- unsigned long cr4;
17726+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17727+ unsigned long descriptor[2];
17728
17729- cr4 = native_read_cr4();
17730- /* clear PGE */
17731- native_write_cr4(cr4 & ~X86_CR4_PGE);
17732- /* write old PGE again and flush TLBs */
17733- native_write_cr4(cr4);
17734+ descriptor[0] = PCID_KERNEL;
17735+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
17736+ } else {
17737+ unsigned long cr4;
17738+
17739+ cr4 = native_read_cr4();
17740+ /* clear PGE */
17741+ native_write_cr4(cr4 & ~X86_CR4_PGE);
17742+ /* write old PGE again and flush TLBs */
17743+ native_write_cr4(cr4);
17744+ }
17745 }
17746
17747 static inline void __native_flush_tlb_global(void)
17748@@ -49,6 +75,42 @@ static inline void __native_flush_tlb_global(void)
17749
17750 static inline void __native_flush_tlb_single(unsigned long addr)
17751 {
17752+
17753+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17754+ unsigned long descriptor[2];
17755+
17756+ descriptor[0] = PCID_KERNEL;
17757+ descriptor[1] = addr;
17758+
17759+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17760+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
17761+ if (addr < TASK_SIZE_MAX)
17762+ descriptor[1] += pax_user_shadow_base;
17763+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17764+ }
17765+
17766+ descriptor[0] = PCID_USER;
17767+ descriptor[1] = addr;
17768+#endif
17769+
17770+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17771+ return;
17772+ }
17773+
17774+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17775+ if (static_cpu_has(X86_FEATURE_PCID)) {
17776+ unsigned int cpu = raw_get_cpu();
17777+
17778+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
17779+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17780+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17781+ raw_put_cpu_no_resched();
17782+
17783+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
17784+ addr += pax_user_shadow_base;
17785+ }
17786+#endif
17787+
17788 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17789 }
17790
17791diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
17792index 5ee2687..74590b9 100644
17793--- a/arch/x86/include/asm/uaccess.h
17794+++ b/arch/x86/include/asm/uaccess.h
17795@@ -7,6 +7,7 @@
17796 #include <linux/compiler.h>
17797 #include <linux/thread_info.h>
17798 #include <linux/string.h>
17799+#include <linux/sched.h>
17800 #include <asm/asm.h>
17801 #include <asm/page.h>
17802 #include <asm/smap.h>
17803@@ -29,7 +30,12 @@
17804
17805 #define get_ds() (KERNEL_DS)
17806 #define get_fs() (current_thread_info()->addr_limit)
17807+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17808+void __set_fs(mm_segment_t x);
17809+void set_fs(mm_segment_t x);
17810+#else
17811 #define set_fs(x) (current_thread_info()->addr_limit = (x))
17812+#endif
17813
17814 #define segment_eq(a, b) ((a).seg == (b).seg)
17815
17816@@ -77,8 +83,33 @@
17817 * checks that the pointer is in the user space range - after calling
17818 * this function, memory access functions may still return -EFAULT.
17819 */
17820-#define access_ok(type, addr, size) \
17821- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17822+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17823+#define access_ok(type, addr, size) \
17824+({ \
17825+ long __size = size; \
17826+ unsigned long __addr = (unsigned long)addr; \
17827+ unsigned long __addr_ao = __addr & PAGE_MASK; \
17828+ unsigned long __end_ao = __addr + __size - 1; \
17829+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
17830+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
17831+ while(__addr_ao <= __end_ao) { \
17832+ char __c_ao; \
17833+ __addr_ao += PAGE_SIZE; \
17834+ if (__size > PAGE_SIZE) \
17835+ cond_resched(); \
17836+ if (__get_user(__c_ao, (char __user *)__addr)) \
17837+ break; \
17838+ if (type != VERIFY_WRITE) { \
17839+ __addr = __addr_ao; \
17840+ continue; \
17841+ } \
17842+ if (__put_user(__c_ao, (char __user *)__addr)) \
17843+ break; \
17844+ __addr = __addr_ao; \
17845+ } \
17846+ } \
17847+ __ret_ao; \
17848+})
17849
17850 /*
17851 * The exception table consists of pairs of addresses relative to the
17852@@ -165,10 +196,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17853 register __inttype(*(ptr)) __val_gu asm("%edx"); \
17854 __chk_user_ptr(ptr); \
17855 might_fault(); \
17856+ pax_open_userland(); \
17857 asm volatile("call __get_user_%P3" \
17858 : "=a" (__ret_gu), "=r" (__val_gu) \
17859 : "0" (ptr), "i" (sizeof(*(ptr)))); \
17860 (x) = (__typeof__(*(ptr))) __val_gu; \
17861+ pax_close_userland(); \
17862 __ret_gu; \
17863 })
17864
17865@@ -176,13 +209,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17866 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
17867 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
17868
17869-
17870+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17871+#define __copyuser_seg "gs;"
17872+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
17873+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
17874+#else
17875+#define __copyuser_seg
17876+#define __COPYUSER_SET_ES
17877+#define __COPYUSER_RESTORE_ES
17878+#endif
17879
17880 #ifdef CONFIG_X86_32
17881 #define __put_user_asm_u64(x, addr, err, errret) \
17882 asm volatile(ASM_STAC "\n" \
17883- "1: movl %%eax,0(%2)\n" \
17884- "2: movl %%edx,4(%2)\n" \
17885+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
17886+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
17887 "3: " ASM_CLAC "\n" \
17888 ".section .fixup,\"ax\"\n" \
17889 "4: movl %3,%0\n" \
17890@@ -195,8 +236,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17891
17892 #define __put_user_asm_ex_u64(x, addr) \
17893 asm volatile(ASM_STAC "\n" \
17894- "1: movl %%eax,0(%1)\n" \
17895- "2: movl %%edx,4(%1)\n" \
17896+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
17897+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
17898 "3: " ASM_CLAC "\n" \
17899 _ASM_EXTABLE_EX(1b, 2b) \
17900 _ASM_EXTABLE_EX(2b, 3b) \
17901@@ -246,7 +287,8 @@ extern void __put_user_8(void);
17902 __typeof__(*(ptr)) __pu_val; \
17903 __chk_user_ptr(ptr); \
17904 might_fault(); \
17905- __pu_val = x; \
17906+ __pu_val = (x); \
17907+ pax_open_userland(); \
17908 switch (sizeof(*(ptr))) { \
17909 case 1: \
17910 __put_user_x(1, __pu_val, ptr, __ret_pu); \
17911@@ -264,6 +306,7 @@ extern void __put_user_8(void);
17912 __put_user_x(X, __pu_val, ptr, __ret_pu); \
17913 break; \
17914 } \
17915+ pax_close_userland(); \
17916 __ret_pu; \
17917 })
17918
17919@@ -344,8 +387,10 @@ do { \
17920 } while (0)
17921
17922 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17923+do { \
17924+ pax_open_userland(); \
17925 asm volatile(ASM_STAC "\n" \
17926- "1: mov"itype" %2,%"rtype"1\n" \
17927+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
17928 "2: " ASM_CLAC "\n" \
17929 ".section .fixup,\"ax\"\n" \
17930 "3: mov %3,%0\n" \
17931@@ -353,8 +398,10 @@ do { \
17932 " jmp 2b\n" \
17933 ".previous\n" \
17934 _ASM_EXTABLE(1b, 3b) \
17935- : "=r" (err), ltype(x) \
17936- : "m" (__m(addr)), "i" (errret), "0" (err))
17937+ : "=r" (err), ltype (x) \
17938+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
17939+ pax_close_userland(); \
17940+} while (0)
17941
17942 #define __get_user_size_ex(x, ptr, size) \
17943 do { \
17944@@ -378,7 +425,7 @@ do { \
17945 } while (0)
17946
17947 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
17948- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
17949+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
17950 "2:\n" \
17951 _ASM_EXTABLE_EX(1b, 2b) \
17952 : ltype(x) : "m" (__m(addr)))
17953@@ -395,13 +442,24 @@ do { \
17954 int __gu_err; \
17955 unsigned long __gu_val; \
17956 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
17957- (x) = (__force __typeof__(*(ptr)))__gu_val; \
17958+ (x) = (__typeof__(*(ptr)))__gu_val; \
17959 __gu_err; \
17960 })
17961
17962 /* FIXME: this hack is definitely wrong -AK */
17963 struct __large_struct { unsigned long buf[100]; };
17964-#define __m(x) (*(struct __large_struct __user *)(x))
17965+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17966+#define ____m(x) \
17967+({ \
17968+ unsigned long ____x = (unsigned long)(x); \
17969+ if (____x < pax_user_shadow_base) \
17970+ ____x += pax_user_shadow_base; \
17971+ (typeof(x))____x; \
17972+})
17973+#else
17974+#define ____m(x) (x)
17975+#endif
17976+#define __m(x) (*(struct __large_struct __user *)____m(x))
17977
17978 /*
17979 * Tell gcc we read from memory instead of writing: this is because
17980@@ -409,8 +467,10 @@ struct __large_struct { unsigned long buf[100]; };
17981 * aliasing issues.
17982 */
17983 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17984+do { \
17985+ pax_open_userland(); \
17986 asm volatile(ASM_STAC "\n" \
17987- "1: mov"itype" %"rtype"1,%2\n" \
17988+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
17989 "2: " ASM_CLAC "\n" \
17990 ".section .fixup,\"ax\"\n" \
17991 "3: mov %3,%0\n" \
17992@@ -418,10 +478,12 @@ struct __large_struct { unsigned long buf[100]; };
17993 ".previous\n" \
17994 _ASM_EXTABLE(1b, 3b) \
17995 : "=r"(err) \
17996- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
17997+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
17998+ pax_close_userland(); \
17999+} while (0)
18000
18001 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
18002- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
18003+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
18004 "2:\n" \
18005 _ASM_EXTABLE_EX(1b, 2b) \
18006 : : ltype(x), "m" (__m(addr)))
18007@@ -431,11 +493,13 @@ struct __large_struct { unsigned long buf[100]; };
18008 */
18009 #define uaccess_try do { \
18010 current_thread_info()->uaccess_err = 0; \
18011+ pax_open_userland(); \
18012 stac(); \
18013 barrier();
18014
18015 #define uaccess_catch(err) \
18016 clac(); \
18017+ pax_close_userland(); \
18018 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
18019 } while (0)
18020
18021@@ -460,8 +524,12 @@ struct __large_struct { unsigned long buf[100]; };
18022 * On error, the variable @x is set to zero.
18023 */
18024
18025+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18026+#define __get_user(x, ptr) get_user((x), (ptr))
18027+#else
18028 #define __get_user(x, ptr) \
18029 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
18030+#endif
18031
18032 /**
18033 * __put_user: - Write a simple value into user space, with less checking.
18034@@ -483,8 +551,12 @@ struct __large_struct { unsigned long buf[100]; };
18035 * Returns zero on success, or -EFAULT on error.
18036 */
18037
18038+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18039+#define __put_user(x, ptr) put_user((x), (ptr))
18040+#else
18041 #define __put_user(x, ptr) \
18042 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
18043+#endif
18044
18045 #define __get_user_unaligned __get_user
18046 #define __put_user_unaligned __put_user
18047@@ -502,7 +574,7 @@ struct __large_struct { unsigned long buf[100]; };
18048 #define get_user_ex(x, ptr) do { \
18049 unsigned long __gue_val; \
18050 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
18051- (x) = (__force __typeof__(*(ptr)))__gue_val; \
18052+ (x) = (__typeof__(*(ptr)))__gue_val; \
18053 } while (0)
18054
18055 #define put_user_try uaccess_try
18056@@ -519,8 +591,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
18057 extern __must_check long strlen_user(const char __user *str);
18058 extern __must_check long strnlen_user(const char __user *str, long n);
18059
18060-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
18061-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
18062+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
18063+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
18064
18065 /*
18066 * movsl can be slow when source and dest are not both 8-byte aligned
18067diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
18068index 7f760a9..04b1c65 100644
18069--- a/arch/x86/include/asm/uaccess_32.h
18070+++ b/arch/x86/include/asm/uaccess_32.h
18071@@ -11,15 +11,15 @@
18072 #include <asm/page.h>
18073
18074 unsigned long __must_check __copy_to_user_ll
18075- (void __user *to, const void *from, unsigned long n);
18076+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
18077 unsigned long __must_check __copy_from_user_ll
18078- (void *to, const void __user *from, unsigned long n);
18079+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18080 unsigned long __must_check __copy_from_user_ll_nozero
18081- (void *to, const void __user *from, unsigned long n);
18082+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18083 unsigned long __must_check __copy_from_user_ll_nocache
18084- (void *to, const void __user *from, unsigned long n);
18085+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18086 unsigned long __must_check __copy_from_user_ll_nocache_nozero
18087- (void *to, const void __user *from, unsigned long n);
18088+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
18089
18090 /**
18091 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
18092@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
18093 static __always_inline unsigned long __must_check
18094 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
18095 {
18096+ if ((long)n < 0)
18097+ return n;
18098+
18099+ check_object_size(from, n, true);
18100+
18101 if (__builtin_constant_p(n)) {
18102 unsigned long ret;
18103
18104@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
18105 __copy_to_user(void __user *to, const void *from, unsigned long n)
18106 {
18107 might_fault();
18108+
18109 return __copy_to_user_inatomic(to, from, n);
18110 }
18111
18112 static __always_inline unsigned long
18113 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
18114 {
18115+ if ((long)n < 0)
18116+ return n;
18117+
18118 /* Avoid zeroing the tail if the copy fails..
18119 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
18120 * but as the zeroing behaviour is only significant when n is not
18121@@ -137,6 +146,12 @@ static __always_inline unsigned long
18122 __copy_from_user(void *to, const void __user *from, unsigned long n)
18123 {
18124 might_fault();
18125+
18126+ if ((long)n < 0)
18127+ return n;
18128+
18129+ check_object_size(to, n, false);
18130+
18131 if (__builtin_constant_p(n)) {
18132 unsigned long ret;
18133
18134@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
18135 const void __user *from, unsigned long n)
18136 {
18137 might_fault();
18138+
18139+ if ((long)n < 0)
18140+ return n;
18141+
18142 if (__builtin_constant_p(n)) {
18143 unsigned long ret;
18144
18145@@ -181,15 +200,19 @@ static __always_inline unsigned long
18146 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
18147 unsigned long n)
18148 {
18149- return __copy_from_user_ll_nocache_nozero(to, from, n);
18150+ if ((long)n < 0)
18151+ return n;
18152+
18153+ return __copy_from_user_ll_nocache_nozero(to, from, n);
18154 }
18155
18156-unsigned long __must_check copy_to_user(void __user *to,
18157- const void *from, unsigned long n);
18158-unsigned long __must_check _copy_from_user(void *to,
18159- const void __user *from,
18160- unsigned long n);
18161-
18162+extern void copy_to_user_overflow(void)
18163+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18164+ __compiletime_error("copy_to_user() buffer size is not provably correct")
18165+#else
18166+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
18167+#endif
18168+;
18169
18170 extern void copy_from_user_overflow(void)
18171 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18172@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
18173 #endif
18174 ;
18175
18176-static inline unsigned long __must_check copy_from_user(void *to,
18177- const void __user *from,
18178- unsigned long n)
18179+/**
18180+ * copy_to_user: - Copy a block of data into user space.
18181+ * @to: Destination address, in user space.
18182+ * @from: Source address, in kernel space.
18183+ * @n: Number of bytes to copy.
18184+ *
18185+ * Context: User context only. This function may sleep.
18186+ *
18187+ * Copy data from kernel space to user space.
18188+ *
18189+ * Returns number of bytes that could not be copied.
18190+ * On success, this will be zero.
18191+ */
18192+static inline unsigned long __must_check
18193+copy_to_user(void __user *to, const void *from, unsigned long n)
18194 {
18195- int sz = __compiletime_object_size(to);
18196+ size_t sz = __compiletime_object_size(from);
18197
18198- if (likely(sz == -1 || sz >= n))
18199- n = _copy_from_user(to, from, n);
18200- else
18201+ if (unlikely(sz != (size_t)-1 && sz < n))
18202+ copy_to_user_overflow();
18203+ else if (access_ok(VERIFY_WRITE, to, n))
18204+ n = __copy_to_user(to, from, n);
18205+ return n;
18206+}
18207+
18208+/**
18209+ * copy_from_user: - Copy a block of data from user space.
18210+ * @to: Destination address, in kernel space.
18211+ * @from: Source address, in user space.
18212+ * @n: Number of bytes to copy.
18213+ *
18214+ * Context: User context only. This function may sleep.
18215+ *
18216+ * Copy data from user space to kernel space.
18217+ *
18218+ * Returns number of bytes that could not be copied.
18219+ * On success, this will be zero.
18220+ *
18221+ * If some data could not be copied, this function will pad the copied
18222+ * data to the requested size using zero bytes.
18223+ */
18224+static inline unsigned long __must_check
18225+copy_from_user(void *to, const void __user *from, unsigned long n)
18226+{
18227+ size_t sz = __compiletime_object_size(to);
18228+
18229+ check_object_size(to, n, false);
18230+
18231+ if (unlikely(sz != (size_t)-1 && sz < n))
18232 copy_from_user_overflow();
18233-
18234+ else if (access_ok(VERIFY_READ, from, n))
18235+ n = __copy_from_user(to, from, n);
18236+ else if ((long)n > 0)
18237+ memset(to, 0, n);
18238 return n;
18239 }
18240
18241diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
18242index 142810c..1dbe82f 100644
18243--- a/arch/x86/include/asm/uaccess_64.h
18244+++ b/arch/x86/include/asm/uaccess_64.h
18245@@ -10,6 +10,9 @@
18246 #include <asm/alternative.h>
18247 #include <asm/cpufeature.h>
18248 #include <asm/page.h>
18249+#include <asm/pgtable.h>
18250+
18251+#define set_fs(x) (current_thread_info()->addr_limit = (x))
18252
18253 /*
18254 * Copy To/From Userspace
18255@@ -17,13 +20,13 @@
18256
18257 /* Handles exceptions in both to and from, but doesn't do access_ok */
18258 __must_check unsigned long
18259-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
18260+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
18261 __must_check unsigned long
18262-copy_user_generic_string(void *to, const void *from, unsigned len);
18263+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
18264 __must_check unsigned long
18265-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
18266+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
18267
18268-static __always_inline __must_check unsigned long
18269+static __always_inline __must_check __size_overflow(3) unsigned long
18270 copy_user_generic(void *to, const void *from, unsigned len)
18271 {
18272 unsigned ret;
18273@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
18274 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
18275 "=d" (len)),
18276 "1" (to), "2" (from), "3" (len)
18277- : "memory", "rcx", "r8", "r9", "r10", "r11");
18278+ : "memory", "rcx", "r8", "r9", "r11");
18279 return ret;
18280 }
18281
18282+static __always_inline __must_check unsigned long
18283+__copy_to_user(void __user *to, const void *from, unsigned long len);
18284+static __always_inline __must_check unsigned long
18285+__copy_from_user(void *to, const void __user *from, unsigned long len);
18286 __must_check unsigned long
18287-_copy_to_user(void __user *to, const void *from, unsigned len);
18288-__must_check unsigned long
18289-_copy_from_user(void *to, const void __user *from, unsigned len);
18290-__must_check unsigned long
18291-copy_in_user(void __user *to, const void __user *from, unsigned len);
18292+copy_in_user(void __user *to, const void __user *from, unsigned long len);
18293+
18294+extern void copy_to_user_overflow(void)
18295+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18296+ __compiletime_error("copy_to_user() buffer size is not provably correct")
18297+#else
18298+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
18299+#endif
18300+;
18301+
18302+extern void copy_from_user_overflow(void)
18303+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18304+ __compiletime_error("copy_from_user() buffer size is not provably correct")
18305+#else
18306+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
18307+#endif
18308+;
18309
18310 static inline unsigned long __must_check copy_from_user(void *to,
18311 const void __user *from,
18312 unsigned long n)
18313 {
18314- int sz = __compiletime_object_size(to);
18315-
18316 might_fault();
18317- if (likely(sz == -1 || sz >= n))
18318- n = _copy_from_user(to, from, n);
18319-#ifdef CONFIG_DEBUG_VM
18320- else
18321- WARN(1, "Buffer overflow detected!\n");
18322-#endif
18323+
18324+ check_object_size(to, n, false);
18325+
18326+ if (access_ok(VERIFY_READ, from, n))
18327+ n = __copy_from_user(to, from, n);
18328+ else if (n < INT_MAX)
18329+ memset(to, 0, n);
18330 return n;
18331 }
18332
18333 static __always_inline __must_check
18334-int copy_to_user(void __user *dst, const void *src, unsigned size)
18335+int copy_to_user(void __user *dst, const void *src, unsigned long size)
18336 {
18337 might_fault();
18338
18339- return _copy_to_user(dst, src, size);
18340+ if (access_ok(VERIFY_WRITE, dst, size))
18341+ size = __copy_to_user(dst, src, size);
18342+ return size;
18343 }
18344
18345 static __always_inline __must_check
18346-int __copy_from_user(void *dst, const void __user *src, unsigned size)
18347+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
18348 {
18349- int ret = 0;
18350+ size_t sz = __compiletime_object_size(dst);
18351+ unsigned ret = 0;
18352
18353 might_fault();
18354+
18355+ if (size > INT_MAX)
18356+ return size;
18357+
18358+ check_object_size(dst, size, false);
18359+
18360+#ifdef CONFIG_PAX_MEMORY_UDEREF
18361+ if (!__access_ok(VERIFY_READ, src, size))
18362+ return size;
18363+#endif
18364+
18365+ if (unlikely(sz != (size_t)-1 && sz < size)) {
18366+ copy_from_user_overflow();
18367+ return size;
18368+ }
18369+
18370 if (!__builtin_constant_p(size))
18371- return copy_user_generic(dst, (__force void *)src, size);
18372+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18373 switch (size) {
18374- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
18375+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
18376 ret, "b", "b", "=q", 1);
18377 return ret;
18378- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
18379+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
18380 ret, "w", "w", "=r", 2);
18381 return ret;
18382- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
18383+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
18384 ret, "l", "k", "=r", 4);
18385 return ret;
18386- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
18387+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18388 ret, "q", "", "=r", 8);
18389 return ret;
18390 case 10:
18391- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
18392+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18393 ret, "q", "", "=r", 10);
18394 if (unlikely(ret))
18395 return ret;
18396 __get_user_asm(*(u16 *)(8 + (char *)dst),
18397- (u16 __user *)(8 + (char __user *)src),
18398+ (const u16 __user *)(8 + (const char __user *)src),
18399 ret, "w", "w", "=r", 2);
18400 return ret;
18401 case 16:
18402- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
18403+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
18404 ret, "q", "", "=r", 16);
18405 if (unlikely(ret))
18406 return ret;
18407 __get_user_asm(*(u64 *)(8 + (char *)dst),
18408- (u64 __user *)(8 + (char __user *)src),
18409+ (const u64 __user *)(8 + (const char __user *)src),
18410 ret, "q", "", "=r", 8);
18411 return ret;
18412 default:
18413- return copy_user_generic(dst, (__force void *)src, size);
18414+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18415 }
18416 }
18417
18418 static __always_inline __must_check
18419-int __copy_to_user(void __user *dst, const void *src, unsigned size)
18420+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
18421 {
18422- int ret = 0;
18423+ size_t sz = __compiletime_object_size(src);
18424+ unsigned ret = 0;
18425
18426 might_fault();
18427+
18428+ if (size > INT_MAX)
18429+ return size;
18430+
18431+ check_object_size(src, size, true);
18432+
18433+#ifdef CONFIG_PAX_MEMORY_UDEREF
18434+ if (!__access_ok(VERIFY_WRITE, dst, size))
18435+ return size;
18436+#endif
18437+
18438+ if (unlikely(sz != (size_t)-1 && sz < size)) {
18439+ copy_to_user_overflow();
18440+ return size;
18441+ }
18442+
18443 if (!__builtin_constant_p(size))
18444- return copy_user_generic((__force void *)dst, src, size);
18445+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18446 switch (size) {
18447- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
18448+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
18449 ret, "b", "b", "iq", 1);
18450 return ret;
18451- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
18452+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
18453 ret, "w", "w", "ir", 2);
18454 return ret;
18455- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
18456+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
18457 ret, "l", "k", "ir", 4);
18458 return ret;
18459- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
18460+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18461 ret, "q", "", "er", 8);
18462 return ret;
18463 case 10:
18464- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
18465+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18466 ret, "q", "", "er", 10);
18467 if (unlikely(ret))
18468 return ret;
18469 asm("":::"memory");
18470- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
18471+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
18472 ret, "w", "w", "ir", 2);
18473 return ret;
18474 case 16:
18475- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
18476+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
18477 ret, "q", "", "er", 16);
18478 if (unlikely(ret))
18479 return ret;
18480 asm("":::"memory");
18481- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
18482+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
18483 ret, "q", "", "er", 8);
18484 return ret;
18485 default:
18486- return copy_user_generic((__force void *)dst, src, size);
18487+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18488 }
18489 }
18490
18491 static __always_inline __must_check
18492-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18493+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
18494 {
18495- int ret = 0;
18496+ unsigned ret = 0;
18497
18498 might_fault();
18499+
18500+ if (size > INT_MAX)
18501+ return size;
18502+
18503+#ifdef CONFIG_PAX_MEMORY_UDEREF
18504+ if (!__access_ok(VERIFY_READ, src, size))
18505+ return size;
18506+ if (!__access_ok(VERIFY_WRITE, dst, size))
18507+ return size;
18508+#endif
18509+
18510 if (!__builtin_constant_p(size))
18511- return copy_user_generic((__force void *)dst,
18512- (__force void *)src, size);
18513+ return copy_user_generic((__force_kernel void *)____m(dst),
18514+ (__force_kernel const void *)____m(src), size);
18515 switch (size) {
18516 case 1: {
18517 u8 tmp;
18518- __get_user_asm(tmp, (u8 __user *)src,
18519+ __get_user_asm(tmp, (const u8 __user *)src,
18520 ret, "b", "b", "=q", 1);
18521 if (likely(!ret))
18522 __put_user_asm(tmp, (u8 __user *)dst,
18523@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18524 }
18525 case 2: {
18526 u16 tmp;
18527- __get_user_asm(tmp, (u16 __user *)src,
18528+ __get_user_asm(tmp, (const u16 __user *)src,
18529 ret, "w", "w", "=r", 2);
18530 if (likely(!ret))
18531 __put_user_asm(tmp, (u16 __user *)dst,
18532@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18533
18534 case 4: {
18535 u32 tmp;
18536- __get_user_asm(tmp, (u32 __user *)src,
18537+ __get_user_asm(tmp, (const u32 __user *)src,
18538 ret, "l", "k", "=r", 4);
18539 if (likely(!ret))
18540 __put_user_asm(tmp, (u32 __user *)dst,
18541@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18542 }
18543 case 8: {
18544 u64 tmp;
18545- __get_user_asm(tmp, (u64 __user *)src,
18546+ __get_user_asm(tmp, (const u64 __user *)src,
18547 ret, "q", "", "=r", 8);
18548 if (likely(!ret))
18549 __put_user_asm(tmp, (u64 __user *)dst,
18550@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
18551 return ret;
18552 }
18553 default:
18554- return copy_user_generic((__force void *)dst,
18555- (__force void *)src, size);
18556+ return copy_user_generic((__force_kernel void *)____m(dst),
18557+ (__force_kernel const void *)____m(src), size);
18558 }
18559 }
18560
18561-static __must_check __always_inline int
18562-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
18563+static __must_check __always_inline unsigned long
18564+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
18565 {
18566- return copy_user_generic(dst, (__force const void *)src, size);
18567+ if (size > INT_MAX)
18568+ return size;
18569+
18570+#ifdef CONFIG_PAX_MEMORY_UDEREF
18571+ if (!__access_ok(VERIFY_READ, src, size))
18572+ return size;
18573+#endif
18574+
18575+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
18576 }
18577
18578-static __must_check __always_inline int
18579-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
18580+static __must_check __always_inline unsigned long
18581+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
18582 {
18583- return copy_user_generic((__force void *)dst, src, size);
18584+ if (size > INT_MAX)
18585+ return size;
18586+
18587+#ifdef CONFIG_PAX_MEMORY_UDEREF
18588+ if (!__access_ok(VERIFY_WRITE, dst, size))
18589+ return size;
18590+#endif
18591+
18592+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
18593 }
18594
18595-extern long __copy_user_nocache(void *dst, const void __user *src,
18596- unsigned size, int zerorest);
18597+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
18598+ unsigned long size, int zerorest) __size_overflow(3);
18599
18600-static inline int
18601-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
18602+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
18603 {
18604 might_sleep();
18605+
18606+ if (size > INT_MAX)
18607+ return size;
18608+
18609+#ifdef CONFIG_PAX_MEMORY_UDEREF
18610+ if (!__access_ok(VERIFY_READ, src, size))
18611+ return size;
18612+#endif
18613+
18614 return __copy_user_nocache(dst, src, size, 1);
18615 }
18616
18617-static inline int
18618-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
18619- unsigned size)
18620+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
18621+ unsigned long size)
18622 {
18623+ if (size > INT_MAX)
18624+ return size;
18625+
18626+#ifdef CONFIG_PAX_MEMORY_UDEREF
18627+ if (!__access_ok(VERIFY_READ, src, size))
18628+ return size;
18629+#endif
18630+
18631 return __copy_user_nocache(dst, src, size, 0);
18632 }
18633
18634-unsigned long
18635-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
18636+extern unsigned long
18637+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
18638
18639 #endif /* _ASM_X86_UACCESS_64_H */
18640diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
18641index 5b238981..77fdd78 100644
18642--- a/arch/x86/include/asm/word-at-a-time.h
18643+++ b/arch/x86/include/asm/word-at-a-time.h
18644@@ -11,7 +11,7 @@
18645 * and shift, for example.
18646 */
18647 struct word_at_a_time {
18648- const unsigned long one_bits, high_bits;
18649+ unsigned long one_bits, high_bits;
18650 };
18651
18652 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
18653diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
18654index d8d9922..bf6cecb 100644
18655--- a/arch/x86/include/asm/x86_init.h
18656+++ b/arch/x86/include/asm/x86_init.h
18657@@ -129,7 +129,7 @@ struct x86_init_ops {
18658 struct x86_init_timers timers;
18659 struct x86_init_iommu iommu;
18660 struct x86_init_pci pci;
18661-};
18662+} __no_const;
18663
18664 /**
18665 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
18666@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
18667 void (*setup_percpu_clockev)(void);
18668 void (*early_percpu_clock_init)(void);
18669 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
18670-};
18671+} __no_const;
18672
18673 /**
18674 * struct x86_platform_ops - platform specific runtime functions
18675@@ -166,7 +166,7 @@ struct x86_platform_ops {
18676 void (*save_sched_clock_state)(void);
18677 void (*restore_sched_clock_state)(void);
18678 void (*apic_post_init)(void);
18679-};
18680+} __no_const;
18681
18682 struct pci_dev;
18683 struct msi_msg;
18684@@ -180,7 +180,7 @@ struct x86_msi_ops {
18685 void (*teardown_msi_irqs)(struct pci_dev *dev);
18686 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
18687 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
18688-};
18689+} __no_const;
18690
18691 struct IO_APIC_route_entry;
18692 struct io_apic_irq_attr;
18693@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
18694 unsigned int destination, int vector,
18695 struct io_apic_irq_attr *attr);
18696 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
18697-};
18698+} __no_const;
18699
18700 extern struct x86_init_ops x86_init;
18701 extern struct x86_cpuinit_ops x86_cpuinit;
18702diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
18703index 0415cda..3b22adc 100644
18704--- a/arch/x86/include/asm/xsave.h
18705+++ b/arch/x86/include/asm/xsave.h
18706@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
18707 if (unlikely(err))
18708 return -EFAULT;
18709
18710+ pax_open_userland();
18711 __asm__ __volatile__(ASM_STAC "\n"
18712- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
18713+ "1:"
18714+ __copyuser_seg
18715+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
18716 "2: " ASM_CLAC "\n"
18717 ".section .fixup,\"ax\"\n"
18718 "3: movl $-1,%[err]\n"
18719@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
18720 : [err] "=r" (err)
18721 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
18722 : "memory");
18723+ pax_close_userland();
18724 return err;
18725 }
18726
18727 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
18728 {
18729 int err;
18730- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
18731+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
18732 u32 lmask = mask;
18733 u32 hmask = mask >> 32;
18734
18735+ pax_open_userland();
18736 __asm__ __volatile__(ASM_STAC "\n"
18737- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
18738+ "1:"
18739+ __copyuser_seg
18740+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
18741 "2: " ASM_CLAC "\n"
18742 ".section .fixup,\"ax\"\n"
18743 "3: movl $-1,%[err]\n"
18744@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
18745 : [err] "=r" (err)
18746 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
18747 : "memory"); /* memory required? */
18748+ pax_close_userland();
18749 return err;
18750 }
18751
18752diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
18753index bbae024..e1528f9 100644
18754--- a/arch/x86/include/uapi/asm/e820.h
18755+++ b/arch/x86/include/uapi/asm/e820.h
18756@@ -63,7 +63,7 @@ struct e820map {
18757 #define ISA_START_ADDRESS 0xa0000
18758 #define ISA_END_ADDRESS 0x100000
18759
18760-#define BIOS_BEGIN 0x000a0000
18761+#define BIOS_BEGIN 0x000c0000
18762 #define BIOS_END 0x00100000
18763
18764 #define BIOS_ROM_BASE 0xffe00000
18765diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
18766index 7bd3bd3..5dac791 100644
18767--- a/arch/x86/kernel/Makefile
18768+++ b/arch/x86/kernel/Makefile
18769@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
18770 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
18771 obj-$(CONFIG_IRQ_WORK) += irq_work.o
18772 obj-y += probe_roms.o
18773-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
18774+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
18775 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
18776 obj-y += syscall_$(BITS).o
18777 obj-$(CONFIG_X86_64) += vsyscall_64.o
18778diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
18779index 230c8ea..f915130 100644
18780--- a/arch/x86/kernel/acpi/boot.c
18781+++ b/arch/x86/kernel/acpi/boot.c
18782@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
18783 * If your system is blacklisted here, but you find that acpi=force
18784 * works for you, please contact linux-acpi@vger.kernel.org
18785 */
18786-static struct dmi_system_id __initdata acpi_dmi_table[] = {
18787+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
18788 /*
18789 * Boxes that need ACPI disabled
18790 */
18791@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
18792 };
18793
18794 /* second table for DMI checks that should run after early-quirks */
18795-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
18796+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
18797 /*
18798 * HP laptops which use a DSDT reporting as HP/SB400/10000,
18799 * which includes some code which overrides all temperature
18800diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
18801index ec94e11..7fbbec0 100644
18802--- a/arch/x86/kernel/acpi/sleep.c
18803+++ b/arch/x86/kernel/acpi/sleep.c
18804@@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void)
18805 #else /* CONFIG_64BIT */
18806 #ifdef CONFIG_SMP
18807 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
18808+
18809+ pax_open_kernel();
18810 early_gdt_descr.address =
18811 (unsigned long)get_cpu_gdt_table(smp_processor_id());
18812+ pax_close_kernel();
18813+
18814 initial_gs = per_cpu_offset(smp_processor_id());
18815 #endif
18816 initial_code = (unsigned long)wakeup_long64;
18817diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
18818index d1daa66..59fecba 100644
18819--- a/arch/x86/kernel/acpi/wakeup_32.S
18820+++ b/arch/x86/kernel/acpi/wakeup_32.S
18821@@ -29,13 +29,11 @@ wakeup_pmode_return:
18822 # and restore the stack ... but you need gdt for this to work
18823 movl saved_context_esp, %esp
18824
18825- movl %cs:saved_magic, %eax
18826- cmpl $0x12345678, %eax
18827+ cmpl $0x12345678, saved_magic
18828 jne bogus_magic
18829
18830 # jump to place where we left off
18831- movl saved_eip, %eax
18832- jmp *%eax
18833+ jmp *(saved_eip)
18834
18835 bogus_magic:
18836 jmp bogus_magic
18837diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
18838index c15cf9a..0e63558 100644
18839--- a/arch/x86/kernel/alternative.c
18840+++ b/arch/x86/kernel/alternative.c
18841@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
18842 */
18843 for (a = start; a < end; a++) {
18844 instr = (u8 *)&a->instr_offset + a->instr_offset;
18845+
18846+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18847+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18848+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
18849+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18850+#endif
18851+
18852 replacement = (u8 *)&a->repl_offset + a->repl_offset;
18853 BUG_ON(a->replacementlen > a->instrlen);
18854 BUG_ON(a->instrlen > sizeof(insnbuf));
18855@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
18856 for (poff = start; poff < end; poff++) {
18857 u8 *ptr = (u8 *)poff + *poff;
18858
18859+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18860+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18861+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18862+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18863+#endif
18864+
18865 if (!*poff || ptr < text || ptr >= text_end)
18866 continue;
18867 /* turn DS segment override prefix into lock prefix */
18868- if (*ptr == 0x3e)
18869+ if (*ktla_ktva(ptr) == 0x3e)
18870 text_poke(ptr, ((unsigned char []){0xf0}), 1);
18871 }
18872 mutex_unlock(&text_mutex);
18873@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
18874 for (poff = start; poff < end; poff++) {
18875 u8 *ptr = (u8 *)poff + *poff;
18876
18877+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18878+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18879+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18880+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18881+#endif
18882+
18883 if (!*poff || ptr < text || ptr >= text_end)
18884 continue;
18885 /* turn lock prefix into DS segment override prefix */
18886- if (*ptr == 0xf0)
18887+ if (*ktla_ktva(ptr) == 0xf0)
18888 text_poke(ptr, ((unsigned char []){0x3E}), 1);
18889 }
18890 mutex_unlock(&text_mutex);
18891@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
18892
18893 BUG_ON(p->len > MAX_PATCH_LEN);
18894 /* prep the buffer with the original instructions */
18895- memcpy(insnbuf, p->instr, p->len);
18896+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
18897 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
18898 (unsigned long)p->instr, p->len);
18899
18900@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
18901 if (!uniproc_patched || num_possible_cpus() == 1)
18902 free_init_pages("SMP alternatives",
18903 (unsigned long)__smp_locks,
18904- (unsigned long)__smp_locks_end);
18905+ PAGE_ALIGN((unsigned long)__smp_locks_end));
18906 #endif
18907
18908 apply_paravirt(__parainstructions, __parainstructions_end);
18909@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
18910 * instructions. And on the local CPU you need to be protected again NMI or MCE
18911 * handlers seeing an inconsistent instruction while you patch.
18912 */
18913-void *__init_or_module text_poke_early(void *addr, const void *opcode,
18914+void *__kprobes text_poke_early(void *addr, const void *opcode,
18915 size_t len)
18916 {
18917 unsigned long flags;
18918 local_irq_save(flags);
18919- memcpy(addr, opcode, len);
18920+
18921+ pax_open_kernel();
18922+ memcpy(ktla_ktva(addr), opcode, len);
18923 sync_core();
18924+ pax_close_kernel();
18925+
18926 local_irq_restore(flags);
18927 /* Could also do a CLFLUSH here to speed up CPU recovery; but
18928 that causes hangs on some VIA CPUs. */
18929@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
18930 */
18931 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
18932 {
18933- unsigned long flags;
18934- char *vaddr;
18935+ unsigned char *vaddr = ktla_ktva(addr);
18936 struct page *pages[2];
18937- int i;
18938+ size_t i;
18939
18940 if (!core_kernel_text((unsigned long)addr)) {
18941- pages[0] = vmalloc_to_page(addr);
18942- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
18943+ pages[0] = vmalloc_to_page(vaddr);
18944+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
18945 } else {
18946- pages[0] = virt_to_page(addr);
18947+ pages[0] = virt_to_page(vaddr);
18948 WARN_ON(!PageReserved(pages[0]));
18949- pages[1] = virt_to_page(addr + PAGE_SIZE);
18950+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
18951 }
18952 BUG_ON(!pages[0]);
18953- local_irq_save(flags);
18954- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
18955- if (pages[1])
18956- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
18957- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
18958- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
18959- clear_fixmap(FIX_TEXT_POKE0);
18960- if (pages[1])
18961- clear_fixmap(FIX_TEXT_POKE1);
18962- local_flush_tlb();
18963- sync_core();
18964- /* Could also do a CLFLUSH here to speed up CPU recovery; but
18965- that causes hangs on some VIA CPUs. */
18966+ text_poke_early(addr, opcode, len);
18967 for (i = 0; i < len; i++)
18968- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
18969- local_irq_restore(flags);
18970+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
18971 return addr;
18972 }
18973
18974diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
18975index 904611b..004dde6 100644
18976--- a/arch/x86/kernel/apic/apic.c
18977+++ b/arch/x86/kernel/apic/apic.c
18978@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
18979 /*
18980 * Debug level, exported for io_apic.c
18981 */
18982-unsigned int apic_verbosity;
18983+int apic_verbosity;
18984
18985 int pic_mode;
18986
18987@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
18988 apic_write(APIC_ESR, 0);
18989 v1 = apic_read(APIC_ESR);
18990 ack_APIC_irq();
18991- atomic_inc(&irq_err_count);
18992+ atomic_inc_unchecked(&irq_err_count);
18993
18994 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
18995 smp_processor_id(), v0 , v1);
18996diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
18997index 00c77cf..2dc6a2d 100644
18998--- a/arch/x86/kernel/apic/apic_flat_64.c
18999+++ b/arch/x86/kernel/apic/apic_flat_64.c
19000@@ -157,7 +157,7 @@ static int flat_probe(void)
19001 return 1;
19002 }
19003
19004-static struct apic apic_flat = {
19005+static struct apic apic_flat __read_only = {
19006 .name = "flat",
19007 .probe = flat_probe,
19008 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
19009@@ -271,7 +271,7 @@ static int physflat_probe(void)
19010 return 0;
19011 }
19012
19013-static struct apic apic_physflat = {
19014+static struct apic apic_physflat __read_only = {
19015
19016 .name = "physical flat",
19017 .probe = physflat_probe,
19018diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
19019index e145f28..2752888 100644
19020--- a/arch/x86/kernel/apic/apic_noop.c
19021+++ b/arch/x86/kernel/apic/apic_noop.c
19022@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
19023 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
19024 }
19025
19026-struct apic apic_noop = {
19027+struct apic apic_noop __read_only = {
19028 .name = "noop",
19029 .probe = noop_probe,
19030 .acpi_madt_oem_check = NULL,
19031diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
19032index d50e364..543bee3 100644
19033--- a/arch/x86/kernel/apic/bigsmp_32.c
19034+++ b/arch/x86/kernel/apic/bigsmp_32.c
19035@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
19036 return dmi_bigsmp;
19037 }
19038
19039-static struct apic apic_bigsmp = {
19040+static struct apic apic_bigsmp __read_only = {
19041
19042 .name = "bigsmp",
19043 .probe = probe_bigsmp,
19044diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
19045index 0874799..a7a7892 100644
19046--- a/arch/x86/kernel/apic/es7000_32.c
19047+++ b/arch/x86/kernel/apic/es7000_32.c
19048@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
19049 return ret && es7000_apic_is_cluster();
19050 }
19051
19052-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
19053-static struct apic __refdata apic_es7000_cluster = {
19054+static struct apic apic_es7000_cluster __read_only = {
19055
19056 .name = "es7000",
19057 .probe = probe_es7000,
19058@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
19059 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
19060 };
19061
19062-static struct apic __refdata apic_es7000 = {
19063+static struct apic apic_es7000 __read_only = {
19064
19065 .name = "es7000",
19066 .probe = probe_es7000,
19067diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
19068index 9ed796c..e930fe4 100644
19069--- a/arch/x86/kernel/apic/io_apic.c
19070+++ b/arch/x86/kernel/apic/io_apic.c
19071@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
19072 }
19073 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
19074
19075-void lock_vector_lock(void)
19076+void lock_vector_lock(void) __acquires(vector_lock)
19077 {
19078 /* Used to the online set of cpus does not change
19079 * during assign_irq_vector.
19080@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
19081 raw_spin_lock(&vector_lock);
19082 }
19083
19084-void unlock_vector_lock(void)
19085+void unlock_vector_lock(void) __releases(vector_lock)
19086 {
19087 raw_spin_unlock(&vector_lock);
19088 }
19089@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
19090 ack_APIC_irq();
19091 }
19092
19093-atomic_t irq_mis_count;
19094+atomic_unchecked_t irq_mis_count;
19095
19096 #ifdef CONFIG_GENERIC_PENDING_IRQ
19097 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
19098@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
19099 * at the cpu.
19100 */
19101 if (!(v & (1 << (i & 0x1f)))) {
19102- atomic_inc(&irq_mis_count);
19103+ atomic_inc_unchecked(&irq_mis_count);
19104
19105 eoi_ioapic_irq(irq, cfg);
19106 }
19107diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
19108index d661ee9..791fd33 100644
19109--- a/arch/x86/kernel/apic/numaq_32.c
19110+++ b/arch/x86/kernel/apic/numaq_32.c
19111@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
19112 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
19113 }
19114
19115-/* Use __refdata to keep false positive warning calm. */
19116-static struct apic __refdata apic_numaq = {
19117+static struct apic apic_numaq __read_only = {
19118
19119 .name = "NUMAQ",
19120 .probe = probe_numaq,
19121diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
19122index eb35ef9..f184a21 100644
19123--- a/arch/x86/kernel/apic/probe_32.c
19124+++ b/arch/x86/kernel/apic/probe_32.c
19125@@ -72,7 +72,7 @@ static int probe_default(void)
19126 return 1;
19127 }
19128
19129-static struct apic apic_default = {
19130+static struct apic apic_default __read_only = {
19131
19132 .name = "default",
19133 .probe = probe_default,
19134diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
19135index 77c95c0..434f8a4 100644
19136--- a/arch/x86/kernel/apic/summit_32.c
19137+++ b/arch/x86/kernel/apic/summit_32.c
19138@@ -486,7 +486,7 @@ void setup_summit(void)
19139 }
19140 #endif
19141
19142-static struct apic apic_summit = {
19143+static struct apic apic_summit __read_only = {
19144
19145 .name = "summit",
19146 .probe = probe_summit,
19147diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
19148index c88baa4..757aee1 100644
19149--- a/arch/x86/kernel/apic/x2apic_cluster.c
19150+++ b/arch/x86/kernel/apic/x2apic_cluster.c
19151@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
19152 return notifier_from_errno(err);
19153 }
19154
19155-static struct notifier_block __refdata x2apic_cpu_notifier = {
19156+static struct notifier_block x2apic_cpu_notifier = {
19157 .notifier_call = update_clusterinfo,
19158 };
19159
19160@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
19161 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
19162 }
19163
19164-static struct apic apic_x2apic_cluster = {
19165+static struct apic apic_x2apic_cluster __read_only = {
19166
19167 .name = "cluster x2apic",
19168 .probe = x2apic_cluster_probe,
19169diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
19170index 562a76d..a003c0f 100644
19171--- a/arch/x86/kernel/apic/x2apic_phys.c
19172+++ b/arch/x86/kernel/apic/x2apic_phys.c
19173@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
19174 return apic == &apic_x2apic_phys;
19175 }
19176
19177-static struct apic apic_x2apic_phys = {
19178+static struct apic apic_x2apic_phys __read_only = {
19179
19180 .name = "physical x2apic",
19181 .probe = x2apic_phys_probe,
19182diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
19183index 794f6eb..67e1db2 100644
19184--- a/arch/x86/kernel/apic/x2apic_uv_x.c
19185+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
19186@@ -342,7 +342,7 @@ static int uv_probe(void)
19187 return apic == &apic_x2apic_uv_x;
19188 }
19189
19190-static struct apic __refdata apic_x2apic_uv_x = {
19191+static struct apic apic_x2apic_uv_x __read_only = {
19192
19193 .name = "UV large system",
19194 .probe = uv_probe,
19195diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
19196index 53a4e27..038760a 100644
19197--- a/arch/x86/kernel/apm_32.c
19198+++ b/arch/x86/kernel/apm_32.c
19199@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
19200 * This is for buggy BIOS's that refer to (real mode) segment 0x40
19201 * even though they are called in protected mode.
19202 */
19203-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
19204+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
19205 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
19206
19207 static const char driver_version[] = "1.16ac"; /* no spaces */
19208@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
19209 BUG_ON(cpu != 0);
19210 gdt = get_cpu_gdt_table(cpu);
19211 save_desc_40 = gdt[0x40 / 8];
19212+
19213+ pax_open_kernel();
19214 gdt[0x40 / 8] = bad_bios_desc;
19215+ pax_close_kernel();
19216
19217 apm_irq_save(flags);
19218 APM_DO_SAVE_SEGS;
19219@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
19220 &call->esi);
19221 APM_DO_RESTORE_SEGS;
19222 apm_irq_restore(flags);
19223+
19224+ pax_open_kernel();
19225 gdt[0x40 / 8] = save_desc_40;
19226+ pax_close_kernel();
19227+
19228 put_cpu();
19229
19230 return call->eax & 0xff;
19231@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
19232 BUG_ON(cpu != 0);
19233 gdt = get_cpu_gdt_table(cpu);
19234 save_desc_40 = gdt[0x40 / 8];
19235+
19236+ pax_open_kernel();
19237 gdt[0x40 / 8] = bad_bios_desc;
19238+ pax_close_kernel();
19239
19240 apm_irq_save(flags);
19241 APM_DO_SAVE_SEGS;
19242@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
19243 &call->eax);
19244 APM_DO_RESTORE_SEGS;
19245 apm_irq_restore(flags);
19246+
19247+ pax_open_kernel();
19248 gdt[0x40 / 8] = save_desc_40;
19249+ pax_close_kernel();
19250+
19251 put_cpu();
19252 return error;
19253 }
19254@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
19255 * code to that CPU.
19256 */
19257 gdt = get_cpu_gdt_table(0);
19258+
19259+ pax_open_kernel();
19260 set_desc_base(&gdt[APM_CS >> 3],
19261 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
19262 set_desc_base(&gdt[APM_CS_16 >> 3],
19263 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
19264 set_desc_base(&gdt[APM_DS >> 3],
19265 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
19266+ pax_close_kernel();
19267
19268 proc_create("apm", 0, NULL, &apm_file_ops);
19269
19270diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
19271index 2861082..6d4718e 100644
19272--- a/arch/x86/kernel/asm-offsets.c
19273+++ b/arch/x86/kernel/asm-offsets.c
19274@@ -33,6 +33,8 @@ void common(void) {
19275 OFFSET(TI_status, thread_info, status);
19276 OFFSET(TI_addr_limit, thread_info, addr_limit);
19277 OFFSET(TI_preempt_count, thread_info, preempt_count);
19278+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
19279+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
19280
19281 BLANK();
19282 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
19283@@ -53,8 +55,26 @@ void common(void) {
19284 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
19285 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
19286 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
19287+
19288+#ifdef CONFIG_PAX_KERNEXEC
19289+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
19290 #endif
19291
19292+#ifdef CONFIG_PAX_MEMORY_UDEREF
19293+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
19294+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
19295+#ifdef CONFIG_X86_64
19296+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
19297+#endif
19298+#endif
19299+
19300+#endif
19301+
19302+ BLANK();
19303+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
19304+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
19305+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
19306+
19307 #ifdef CONFIG_XEN
19308 BLANK();
19309 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
19310diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
19311index e7c798b..2b2019b 100644
19312--- a/arch/x86/kernel/asm-offsets_64.c
19313+++ b/arch/x86/kernel/asm-offsets_64.c
19314@@ -77,6 +77,7 @@ int main(void)
19315 BLANK();
19316 #undef ENTRY
19317
19318+ DEFINE(TSS_size, sizeof(struct tss_struct));
19319 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
19320 BLANK();
19321
19322diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
19323index b0684e4..22ccfd7 100644
19324--- a/arch/x86/kernel/cpu/Makefile
19325+++ b/arch/x86/kernel/cpu/Makefile
19326@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
19327 CFLAGS_REMOVE_perf_event.o = -pg
19328 endif
19329
19330-# Make sure load_percpu_segment has no stackprotector
19331-nostackp := $(call cc-option, -fno-stack-protector)
19332-CFLAGS_common.o := $(nostackp)
19333-
19334 obj-y := intel_cacheinfo.o scattered.o topology.o
19335 obj-y += proc.o capflags.o powerflags.o common.o
19336 obj-y += rdrand.o
19337diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
19338index 5013a48..0782c53 100644
19339--- a/arch/x86/kernel/cpu/amd.c
19340+++ b/arch/x86/kernel/cpu/amd.c
19341@@ -744,7 +744,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
19342 unsigned int size)
19343 {
19344 /* AMD errata T13 (order #21922) */
19345- if ((c->x86 == 6)) {
19346+ if (c->x86 == 6) {
19347 /* Duron Rev A0 */
19348 if (c->x86_model == 3 && c->x86_mask == 0)
19349 size = 64;
19350diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
19351index 22018f7..df77e23 100644
19352--- a/arch/x86/kernel/cpu/common.c
19353+++ b/arch/x86/kernel/cpu/common.c
19354@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
19355
19356 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
19357
19358-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
19359-#ifdef CONFIG_X86_64
19360- /*
19361- * We need valid kernel segments for data and code in long mode too
19362- * IRET will check the segment types kkeil 2000/10/28
19363- * Also sysret mandates a special GDT layout
19364- *
19365- * TLS descriptors are currently at a different place compared to i386.
19366- * Hopefully nobody expects them at a fixed place (Wine?)
19367- */
19368- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
19369- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
19370- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
19371- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
19372- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
19373- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
19374-#else
19375- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
19376- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19377- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
19378- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
19379- /*
19380- * Segments used for calling PnP BIOS have byte granularity.
19381- * They code segments and data segments have fixed 64k limits,
19382- * the transfer segment sizes are set at run time.
19383- */
19384- /* 32-bit code */
19385- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
19386- /* 16-bit code */
19387- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
19388- /* 16-bit data */
19389- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
19390- /* 16-bit data */
19391- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
19392- /* 16-bit data */
19393- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
19394- /*
19395- * The APM segments have byte granularity and their bases
19396- * are set at run time. All have 64k limits.
19397- */
19398- /* 32-bit code */
19399- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
19400- /* 16-bit code */
19401- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
19402- /* data */
19403- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
19404-
19405- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19406- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
19407- GDT_STACK_CANARY_INIT
19408-#endif
19409-} };
19410-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
19411-
19412 static int __init x86_xsave_setup(char *s)
19413 {
19414 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
19415@@ -288,6 +234,57 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
19416 set_in_cr4(X86_CR4_SMAP);
19417 }
19418
19419+#ifdef CONFIG_X86_64
19420+static __init int setup_disable_pcid(char *arg)
19421+{
19422+ setup_clear_cpu_cap(X86_FEATURE_PCID);
19423+
19424+#ifdef CONFIG_PAX_MEMORY_UDEREF
19425+ if (clone_pgd_mask != ~(pgdval_t)0UL)
19426+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
19427+#endif
19428+
19429+ return 1;
19430+}
19431+__setup("nopcid", setup_disable_pcid);
19432+
19433+static void setup_pcid(struct cpuinfo_x86 *c)
19434+{
19435+ if (!cpu_has(c, X86_FEATURE_PCID)) {
19436+
19437+#ifdef CONFIG_PAX_MEMORY_UDEREF
19438+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
19439+ pax_open_kernel();
19440+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
19441+ pax_close_kernel();
19442+ printk("PAX: slow and weak UDEREF enabled\n");
19443+ } else
19444+ printk("PAX: UDEREF disabled\n");
19445+#endif
19446+
19447+ return;
19448+ }
19449+
19450+ printk("PAX: PCID detected\n");
19451+ set_in_cr4(X86_CR4_PCIDE);
19452+
19453+#ifdef CONFIG_PAX_MEMORY_UDEREF
19454+ pax_open_kernel();
19455+ clone_pgd_mask = ~(pgdval_t)0UL;
19456+ pax_close_kernel();
19457+ if (pax_user_shadow_base)
19458+ printk("PAX: weak UDEREF enabled\n");
19459+ else {
19460+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
19461+ printk("PAX: strong UDEREF enabled\n");
19462+ }
19463+#endif
19464+
19465+ if (cpu_has(c, X86_FEATURE_INVPCID))
19466+ printk("PAX: INVPCID detected\n");
19467+}
19468+#endif
19469+
19470 /*
19471 * Some CPU features depend on higher CPUID levels, which may not always
19472 * be available due to CPUID level capping or broken virtualization
19473@@ -386,7 +383,7 @@ void switch_to_new_gdt(int cpu)
19474 {
19475 struct desc_ptr gdt_descr;
19476
19477- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
19478+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19479 gdt_descr.size = GDT_SIZE - 1;
19480 load_gdt(&gdt_descr);
19481 /* Reload the per-cpu base */
19482@@ -874,6 +871,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
19483 setup_smep(c);
19484 setup_smap(c);
19485
19486+#ifdef CONFIG_X86_64
19487+ setup_pcid(c);
19488+#endif
19489+
19490 /*
19491 * The vendor-specific functions might have changed features.
19492 * Now we do "generic changes."
19493@@ -882,6 +883,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
19494 /* Filter out anything that depends on CPUID levels we don't have */
19495 filter_cpuid_features(c, true);
19496
19497+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
19498+ setup_clear_cpu_cap(X86_FEATURE_SEP);
19499+#endif
19500+
19501 /* If the model name is still unset, do table lookup. */
19502 if (!c->x86_model_id[0]) {
19503 const char *p;
19504@@ -1069,10 +1074,12 @@ static __init int setup_disablecpuid(char *arg)
19505 }
19506 __setup("clearcpuid=", setup_disablecpuid);
19507
19508+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
19509+EXPORT_PER_CPU_SYMBOL(current_tinfo);
19510+
19511 #ifdef CONFIG_X86_64
19512 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
19513-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
19514- (unsigned long) nmi_idt_table };
19515+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
19516
19517 DEFINE_PER_CPU_FIRST(union irq_stack_union,
19518 irq_stack_union) __aligned(PAGE_SIZE);
19519@@ -1086,7 +1093,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
19520 EXPORT_PER_CPU_SYMBOL(current_task);
19521
19522 DEFINE_PER_CPU(unsigned long, kernel_stack) =
19523- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
19524+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
19525 EXPORT_PER_CPU_SYMBOL(kernel_stack);
19526
19527 DEFINE_PER_CPU(char *, irq_stack_ptr) =
19528@@ -1231,7 +1238,7 @@ void __cpuinit cpu_init(void)
19529 load_ucode_ap();
19530
19531 cpu = stack_smp_processor_id();
19532- t = &per_cpu(init_tss, cpu);
19533+ t = init_tss + cpu;
19534 oist = &per_cpu(orig_ist, cpu);
19535
19536 #ifdef CONFIG_NUMA
19537@@ -1257,7 +1264,7 @@ void __cpuinit cpu_init(void)
19538 switch_to_new_gdt(cpu);
19539 loadsegment(fs, 0);
19540
19541- load_idt((const struct desc_ptr *)&idt_descr);
19542+ load_idt(&idt_descr);
19543
19544 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
19545 syscall_init();
19546@@ -1266,7 +1273,6 @@ void __cpuinit cpu_init(void)
19547 wrmsrl(MSR_KERNEL_GS_BASE, 0);
19548 barrier();
19549
19550- x86_configure_nx();
19551 enable_x2apic();
19552
19553 /*
19554@@ -1318,7 +1324,7 @@ void __cpuinit cpu_init(void)
19555 {
19556 int cpu = smp_processor_id();
19557 struct task_struct *curr = current;
19558- struct tss_struct *t = &per_cpu(init_tss, cpu);
19559+ struct tss_struct *t = init_tss + cpu;
19560 struct thread_struct *thread = &curr->thread;
19561
19562 show_ucode_info_early();
19563diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
19564index 7c6f7d5..8cac382 100644
19565--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
19566+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
19567@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
19568 };
19569
19570 #ifdef CONFIG_AMD_NB
19571+static struct attribute *default_attrs_amd_nb[] = {
19572+ &type.attr,
19573+ &level.attr,
19574+ &coherency_line_size.attr,
19575+ &physical_line_partition.attr,
19576+ &ways_of_associativity.attr,
19577+ &number_of_sets.attr,
19578+ &size.attr,
19579+ &shared_cpu_map.attr,
19580+ &shared_cpu_list.attr,
19581+ NULL,
19582+ NULL,
19583+ NULL,
19584+ NULL
19585+};
19586+
19587 static struct attribute ** __cpuinit amd_l3_attrs(void)
19588 {
19589 static struct attribute **attrs;
19590@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
19591
19592 n = ARRAY_SIZE(default_attrs);
19593
19594- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
19595- n += 2;
19596-
19597- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
19598- n += 1;
19599-
19600- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
19601- if (attrs == NULL)
19602- return attrs = default_attrs;
19603-
19604- for (n = 0; default_attrs[n]; n++)
19605- attrs[n] = default_attrs[n];
19606+ attrs = default_attrs_amd_nb;
19607
19608 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
19609 attrs[n++] = &cache_disable_0.attr;
19610@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
19611 .default_attrs = default_attrs,
19612 };
19613
19614+#ifdef CONFIG_AMD_NB
19615+static struct kobj_type ktype_cache_amd_nb = {
19616+ .sysfs_ops = &sysfs_ops,
19617+ .default_attrs = default_attrs_amd_nb,
19618+};
19619+#endif
19620+
19621 static struct kobj_type ktype_percpu_entry = {
19622 .sysfs_ops = &sysfs_ops,
19623 };
19624@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
19625 return retval;
19626 }
19627
19628+#ifdef CONFIG_AMD_NB
19629+ amd_l3_attrs();
19630+#endif
19631+
19632 for (i = 0; i < num_cache_leaves; i++) {
19633+ struct kobj_type *ktype;
19634+
19635 this_object = INDEX_KOBJECT_PTR(cpu, i);
19636 this_object->cpu = cpu;
19637 this_object->index = i;
19638
19639 this_leaf = CPUID4_INFO_IDX(cpu, i);
19640
19641- ktype_cache.default_attrs = default_attrs;
19642+ ktype = &ktype_cache;
19643 #ifdef CONFIG_AMD_NB
19644 if (this_leaf->base.nb)
19645- ktype_cache.default_attrs = amd_l3_attrs();
19646+ ktype = &ktype_cache_amd_nb;
19647 #endif
19648 retval = kobject_init_and_add(&(this_object->kobj),
19649- &ktype_cache,
19650+ ktype,
19651 per_cpu(ici_cache_kobject, cpu),
19652 "index%1lu", i);
19653 if (unlikely(retval)) {
19654@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
19655 return NOTIFY_OK;
19656 }
19657
19658-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
19659+static struct notifier_block cacheinfo_cpu_notifier = {
19660 .notifier_call = cacheinfo_cpu_callback,
19661 };
19662
19663diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
19664index 9239504..b2471ce 100644
19665--- a/arch/x86/kernel/cpu/mcheck/mce.c
19666+++ b/arch/x86/kernel/cpu/mcheck/mce.c
19667@@ -45,6 +45,7 @@
19668 #include <asm/processor.h>
19669 #include <asm/mce.h>
19670 #include <asm/msr.h>
19671+#include <asm/local.h>
19672
19673 #include "mce-internal.h"
19674
19675@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
19676 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
19677 m->cs, m->ip);
19678
19679- if (m->cs == __KERNEL_CS)
19680+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
19681 print_symbol("{%s}", m->ip);
19682 pr_cont("\n");
19683 }
19684@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
19685
19686 #define PANIC_TIMEOUT 5 /* 5 seconds */
19687
19688-static atomic_t mce_paniced;
19689+static atomic_unchecked_t mce_paniced;
19690
19691 static int fake_panic;
19692-static atomic_t mce_fake_paniced;
19693+static atomic_unchecked_t mce_fake_paniced;
19694
19695 /* Panic in progress. Enable interrupts and wait for final IPI */
19696 static void wait_for_panic(void)
19697@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19698 /*
19699 * Make sure only one CPU runs in machine check panic
19700 */
19701- if (atomic_inc_return(&mce_paniced) > 1)
19702+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
19703 wait_for_panic();
19704 barrier();
19705
19706@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19707 console_verbose();
19708 } else {
19709 /* Don't log too much for fake panic */
19710- if (atomic_inc_return(&mce_fake_paniced) > 1)
19711+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
19712 return;
19713 }
19714 /* First print corrected ones that are still unlogged */
19715@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
19716 if (!fake_panic) {
19717 if (panic_timeout == 0)
19718 panic_timeout = mca_cfg.panic_timeout;
19719- panic(msg);
19720+ panic("%s", msg);
19721 } else
19722 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
19723 }
19724@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
19725 * might have been modified by someone else.
19726 */
19727 rmb();
19728- if (atomic_read(&mce_paniced))
19729+ if (atomic_read_unchecked(&mce_paniced))
19730 wait_for_panic();
19731 if (!mca_cfg.monarch_timeout)
19732 goto out;
19733@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
19734 }
19735
19736 /* Call the installed machine check handler for this CPU setup. */
19737-void (*machine_check_vector)(struct pt_regs *, long error_code) =
19738+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
19739 unexpected_machine_check;
19740
19741 /*
19742@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
19743 return;
19744 }
19745
19746+ pax_open_kernel();
19747 machine_check_vector = do_machine_check;
19748+ pax_close_kernel();
19749
19750 __mcheck_cpu_init_generic();
19751 __mcheck_cpu_init_vendor(c);
19752@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
19753 */
19754
19755 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
19756-static int mce_chrdev_open_count; /* #times opened */
19757+static local_t mce_chrdev_open_count; /* #times opened */
19758 static int mce_chrdev_open_exclu; /* already open exclusive? */
19759
19760 static int mce_chrdev_open(struct inode *inode, struct file *file)
19761@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19762 spin_lock(&mce_chrdev_state_lock);
19763
19764 if (mce_chrdev_open_exclu ||
19765- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
19766+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
19767 spin_unlock(&mce_chrdev_state_lock);
19768
19769 return -EBUSY;
19770@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19771
19772 if (file->f_flags & O_EXCL)
19773 mce_chrdev_open_exclu = 1;
19774- mce_chrdev_open_count++;
19775+ local_inc(&mce_chrdev_open_count);
19776
19777 spin_unlock(&mce_chrdev_state_lock);
19778
19779@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
19780 {
19781 spin_lock(&mce_chrdev_state_lock);
19782
19783- mce_chrdev_open_count--;
19784+ local_dec(&mce_chrdev_open_count);
19785 mce_chrdev_open_exclu = 0;
19786
19787 spin_unlock(&mce_chrdev_state_lock);
19788@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
19789 return NOTIFY_OK;
19790 }
19791
19792-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
19793+static struct notifier_block mce_cpu_notifier = {
19794 .notifier_call = mce_cpu_callback,
19795 };
19796
19797@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
19798
19799 for (i = 0; i < mca_cfg.banks; i++) {
19800 struct mce_bank *b = &mce_banks[i];
19801- struct device_attribute *a = &b->attr;
19802+ device_attribute_no_const *a = &b->attr;
19803
19804 sysfs_attr_init(&a->attr);
19805 a->attr.name = b->attrname;
19806@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
19807 static void mce_reset(void)
19808 {
19809 cpu_missing = 0;
19810- atomic_set(&mce_fake_paniced, 0);
19811+ atomic_set_unchecked(&mce_fake_paniced, 0);
19812 atomic_set(&mce_executing, 0);
19813 atomic_set(&mce_callin, 0);
19814 atomic_set(&global_nwo, 0);
19815diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
19816index 1c044b1..37a2a43 100644
19817--- a/arch/x86/kernel/cpu/mcheck/p5.c
19818+++ b/arch/x86/kernel/cpu/mcheck/p5.c
19819@@ -11,6 +11,7 @@
19820 #include <asm/processor.h>
19821 #include <asm/mce.h>
19822 #include <asm/msr.h>
19823+#include <asm/pgtable.h>
19824
19825 /* By default disabled */
19826 int mce_p5_enabled __read_mostly;
19827@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
19828 if (!cpu_has(c, X86_FEATURE_MCE))
19829 return;
19830
19831+ pax_open_kernel();
19832 machine_check_vector = pentium_machine_check;
19833+ pax_close_kernel();
19834 /* Make sure the vector pointer is visible before we enable MCEs: */
19835 wmb();
19836
19837diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19838index 47a1870..8c019a7 100644
19839--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
19840+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19841@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
19842 return notifier_from_errno(err);
19843 }
19844
19845-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
19846+static struct notifier_block thermal_throttle_cpu_notifier =
19847 {
19848 .notifier_call = thermal_throttle_cpu_callback,
19849 };
19850diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
19851index e9a701a..35317d6 100644
19852--- a/arch/x86/kernel/cpu/mcheck/winchip.c
19853+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
19854@@ -10,6 +10,7 @@
19855 #include <asm/processor.h>
19856 #include <asm/mce.h>
19857 #include <asm/msr.h>
19858+#include <asm/pgtable.h>
19859
19860 /* Machine check handler for WinChip C6: */
19861 static void winchip_machine_check(struct pt_regs *regs, long error_code)
19862@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
19863 {
19864 u32 lo, hi;
19865
19866+ pax_open_kernel();
19867 machine_check_vector = winchip_machine_check;
19868+ pax_close_kernel();
19869 /* Make sure the vector pointer is visible before we enable MCEs: */
19870 wmb();
19871
19872diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
19873index ca22b73..9987afe 100644
19874--- a/arch/x86/kernel/cpu/mtrr/main.c
19875+++ b/arch/x86/kernel/cpu/mtrr/main.c
19876@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
19877 u64 size_or_mask, size_and_mask;
19878 static bool mtrr_aps_delayed_init;
19879
19880-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
19881+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
19882
19883 const struct mtrr_ops *mtrr_if;
19884
19885diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
19886index df5e41f..816c719 100644
19887--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
19888+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
19889@@ -25,7 +25,7 @@ struct mtrr_ops {
19890 int (*validate_add_page)(unsigned long base, unsigned long size,
19891 unsigned int type);
19892 int (*have_wrcomb)(void);
19893-};
19894+} __do_const;
19895
19896 extern int generic_get_free_region(unsigned long base, unsigned long size,
19897 int replace_reg);
19898diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
19899index 1025f3c..824f677 100644
19900--- a/arch/x86/kernel/cpu/perf_event.c
19901+++ b/arch/x86/kernel/cpu/perf_event.c
19902@@ -1311,7 +1311,7 @@ static void __init pmu_check_apic(void)
19903 pr_info("no hardware sampling interrupt available.\n");
19904 }
19905
19906-static struct attribute_group x86_pmu_format_group = {
19907+static attribute_group_no_const x86_pmu_format_group = {
19908 .name = "format",
19909 .attrs = NULL,
19910 };
19911@@ -1410,7 +1410,7 @@ static struct attribute *events_attr[] = {
19912 NULL,
19913 };
19914
19915-static struct attribute_group x86_pmu_events_group = {
19916+static attribute_group_no_const x86_pmu_events_group = {
19917 .name = "events",
19918 .attrs = events_attr,
19919 };
19920@@ -1920,7 +1920,7 @@ static unsigned long get_segment_base(unsigned int segment)
19921 if (idx > GDT_ENTRIES)
19922 return 0;
19923
19924- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
19925+ desc = get_cpu_gdt_table(smp_processor_id());
19926 }
19927
19928 return get_desc_base(desc + idx);
19929@@ -2010,7 +2010,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
19930 break;
19931
19932 perf_callchain_store(entry, frame.return_address);
19933- fp = frame.next_frame;
19934+ fp = (const void __force_user *)frame.next_frame;
19935 }
19936 }
19937
19938diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
19939index a9e2207..d70c83a 100644
19940--- a/arch/x86/kernel/cpu/perf_event_intel.c
19941+++ b/arch/x86/kernel/cpu/perf_event_intel.c
19942@@ -2022,10 +2022,10 @@ __init int intel_pmu_init(void)
19943 * v2 and above have a perf capabilities MSR
19944 */
19945 if (version > 1) {
19946- u64 capabilities;
19947+ u64 capabilities = x86_pmu.intel_cap.capabilities;
19948
19949- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
19950- x86_pmu.intel_cap.capabilities = capabilities;
19951+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
19952+ x86_pmu.intel_cap.capabilities = capabilities;
19953 }
19954
19955 intel_ds_init();
19956diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19957index 8aac56b..588fb13 100644
19958--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19959+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19960@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
19961 static int __init uncore_type_init(struct intel_uncore_type *type)
19962 {
19963 struct intel_uncore_pmu *pmus;
19964- struct attribute_group *attr_group;
19965+ attribute_group_no_const *attr_group;
19966 struct attribute **attrs;
19967 int i, j;
19968
19969@@ -3518,7 +3518,7 @@ static int
19970 return NOTIFY_OK;
19971 }
19972
19973-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
19974+static struct notifier_block uncore_cpu_nb = {
19975 .notifier_call = uncore_cpu_notifier,
19976 /*
19977 * to migrate uncore events, our notifier should be executed
19978diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19979index f952891..4722ad4 100644
19980--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19981+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19982@@ -488,7 +488,7 @@ struct intel_uncore_box {
19983 struct uncore_event_desc {
19984 struct kobj_attribute attr;
19985 const char *config;
19986-};
19987+} __do_const;
19988
19989 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
19990 { \
19991diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
19992index 1e4dbcf..b9a34c2 100644
19993--- a/arch/x86/kernel/cpuid.c
19994+++ b/arch/x86/kernel/cpuid.c
19995@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
19996 return notifier_from_errno(err);
19997 }
19998
19999-static struct notifier_block __refdata cpuid_class_cpu_notifier =
20000+static struct notifier_block cpuid_class_cpu_notifier =
20001 {
20002 .notifier_call = cpuid_class_cpu_callback,
20003 };
20004diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
20005index 74467fe..18793d5 100644
20006--- a/arch/x86/kernel/crash.c
20007+++ b/arch/x86/kernel/crash.c
20008@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
20009 {
20010 #ifdef CONFIG_X86_32
20011 struct pt_regs fixed_regs;
20012-#endif
20013
20014-#ifdef CONFIG_X86_32
20015- if (!user_mode_vm(regs)) {
20016+ if (!user_mode(regs)) {
20017 crash_fixup_ss_esp(&fixed_regs, regs);
20018 regs = &fixed_regs;
20019 }
20020diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
20021index afa64ad..dce67dd 100644
20022--- a/arch/x86/kernel/crash_dump_64.c
20023+++ b/arch/x86/kernel/crash_dump_64.c
20024@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
20025 return -ENOMEM;
20026
20027 if (userbuf) {
20028- if (copy_to_user(buf, vaddr + offset, csize)) {
20029+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
20030 iounmap(vaddr);
20031 return -EFAULT;
20032 }
20033diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
20034index 155a13f..1672b9b 100644
20035--- a/arch/x86/kernel/doublefault_32.c
20036+++ b/arch/x86/kernel/doublefault_32.c
20037@@ -11,7 +11,7 @@
20038
20039 #define DOUBLEFAULT_STACKSIZE (1024)
20040 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
20041-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
20042+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
20043
20044 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
20045
20046@@ -21,7 +21,7 @@ static void doublefault_fn(void)
20047 unsigned long gdt, tss;
20048
20049 native_store_gdt(&gdt_desc);
20050- gdt = gdt_desc.address;
20051+ gdt = (unsigned long)gdt_desc.address;
20052
20053 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
20054
20055@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
20056 /* 0x2 bit is always set */
20057 .flags = X86_EFLAGS_SF | 0x2,
20058 .sp = STACK_START,
20059- .es = __USER_DS,
20060+ .es = __KERNEL_DS,
20061 .cs = __KERNEL_CS,
20062 .ss = __KERNEL_DS,
20063- .ds = __USER_DS,
20064+ .ds = __KERNEL_DS,
20065 .fs = __KERNEL_PERCPU,
20066
20067 .__cr3 = __pa_nodebug(swapper_pg_dir),
20068diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
20069index deb6421..76bbc12 100644
20070--- a/arch/x86/kernel/dumpstack.c
20071+++ b/arch/x86/kernel/dumpstack.c
20072@@ -2,6 +2,9 @@
20073 * Copyright (C) 1991, 1992 Linus Torvalds
20074 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
20075 */
20076+#ifdef CONFIG_GRKERNSEC_HIDESYM
20077+#define __INCLUDED_BY_HIDESYM 1
20078+#endif
20079 #include <linux/kallsyms.h>
20080 #include <linux/kprobes.h>
20081 #include <linux/uaccess.h>
20082@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
20083 static void
20084 print_ftrace_graph_addr(unsigned long addr, void *data,
20085 const struct stacktrace_ops *ops,
20086- struct thread_info *tinfo, int *graph)
20087+ struct task_struct *task, int *graph)
20088 {
20089- struct task_struct *task;
20090 unsigned long ret_addr;
20091 int index;
20092
20093 if (addr != (unsigned long)return_to_handler)
20094 return;
20095
20096- task = tinfo->task;
20097 index = task->curr_ret_stack;
20098
20099 if (!task->ret_stack || index < *graph)
20100@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20101 static inline void
20102 print_ftrace_graph_addr(unsigned long addr, void *data,
20103 const struct stacktrace_ops *ops,
20104- struct thread_info *tinfo, int *graph)
20105+ struct task_struct *task, int *graph)
20106 { }
20107 #endif
20108
20109@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20110 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
20111 */
20112
20113-static inline int valid_stack_ptr(struct thread_info *tinfo,
20114- void *p, unsigned int size, void *end)
20115+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
20116 {
20117- void *t = tinfo;
20118 if (end) {
20119 if (p < end && p >= (end-THREAD_SIZE))
20120 return 1;
20121@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
20122 }
20123
20124 unsigned long
20125-print_context_stack(struct thread_info *tinfo,
20126+print_context_stack(struct task_struct *task, void *stack_start,
20127 unsigned long *stack, unsigned long bp,
20128 const struct stacktrace_ops *ops, void *data,
20129 unsigned long *end, int *graph)
20130 {
20131 struct stack_frame *frame = (struct stack_frame *)bp;
20132
20133- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
20134+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
20135 unsigned long addr;
20136
20137 addr = *stack;
20138@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
20139 } else {
20140 ops->address(data, addr, 0);
20141 }
20142- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20143+ print_ftrace_graph_addr(addr, data, ops, task, graph);
20144 }
20145 stack++;
20146 }
20147@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
20148 EXPORT_SYMBOL_GPL(print_context_stack);
20149
20150 unsigned long
20151-print_context_stack_bp(struct thread_info *tinfo,
20152+print_context_stack_bp(struct task_struct *task, void *stack_start,
20153 unsigned long *stack, unsigned long bp,
20154 const struct stacktrace_ops *ops, void *data,
20155 unsigned long *end, int *graph)
20156@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20157 struct stack_frame *frame = (struct stack_frame *)bp;
20158 unsigned long *ret_addr = &frame->return_address;
20159
20160- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
20161+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
20162 unsigned long addr = *ret_addr;
20163
20164 if (!__kernel_text_address(addr))
20165@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20166 ops->address(data, addr, 1);
20167 frame = frame->next_frame;
20168 ret_addr = &frame->return_address;
20169- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20170+ print_ftrace_graph_addr(addr, data, ops, task, graph);
20171 }
20172
20173 return (unsigned long)frame;
20174@@ -150,7 +149,7 @@ static int print_trace_stack(void *data, char *name)
20175 static void print_trace_address(void *data, unsigned long addr, int reliable)
20176 {
20177 touch_nmi_watchdog();
20178- printk(data);
20179+ printk("%s", (char *)data);
20180 printk_address(addr, reliable);
20181 }
20182
20183@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
20184 }
20185 EXPORT_SYMBOL_GPL(oops_begin);
20186
20187+extern void gr_handle_kernel_exploit(void);
20188+
20189 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20190 {
20191 if (regs && kexec_should_crash(current))
20192@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20193 panic("Fatal exception in interrupt");
20194 if (panic_on_oops)
20195 panic("Fatal exception");
20196- do_exit(signr);
20197+
20198+ gr_handle_kernel_exploit();
20199+
20200+ do_group_exit(signr);
20201 }
20202
20203 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20204@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20205 print_modules();
20206 show_regs(regs);
20207 #ifdef CONFIG_X86_32
20208- if (user_mode_vm(regs)) {
20209+ if (user_mode(regs)) {
20210 sp = regs->sp;
20211 ss = regs->ss & 0xffff;
20212 } else {
20213@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
20214 unsigned long flags = oops_begin();
20215 int sig = SIGSEGV;
20216
20217- if (!user_mode_vm(regs))
20218+ if (!user_mode(regs))
20219 report_bug(regs->ip, regs);
20220
20221 if (__die(str, regs, err))
20222diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
20223index f2a1770..540657f 100644
20224--- a/arch/x86/kernel/dumpstack_32.c
20225+++ b/arch/x86/kernel/dumpstack_32.c
20226@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20227 bp = stack_frame(task, regs);
20228
20229 for (;;) {
20230- struct thread_info *context;
20231+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
20232
20233- context = (struct thread_info *)
20234- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
20235- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
20236+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
20237
20238- stack = (unsigned long *)context->previous_esp;
20239- if (!stack)
20240+ if (stack_start == task_stack_page(task))
20241 break;
20242+ stack = *(unsigned long **)stack_start;
20243 if (ops->stack(data, "IRQ") < 0)
20244 break;
20245 touch_nmi_watchdog();
20246@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
20247 int i;
20248
20249 show_regs_print_info(KERN_EMERG);
20250- __show_regs(regs, !user_mode_vm(regs));
20251+ __show_regs(regs, !user_mode(regs));
20252
20253 /*
20254 * When in-kernel, we also print out the stack and code at the
20255 * time of the fault..
20256 */
20257- if (!user_mode_vm(regs)) {
20258+ if (!user_mode(regs)) {
20259 unsigned int code_prologue = code_bytes * 43 / 64;
20260 unsigned int code_len = code_bytes;
20261 unsigned char c;
20262 u8 *ip;
20263+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
20264
20265 pr_emerg("Stack:\n");
20266 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
20267
20268 pr_emerg("Code:");
20269
20270- ip = (u8 *)regs->ip - code_prologue;
20271+ ip = (u8 *)regs->ip - code_prologue + cs_base;
20272 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
20273 /* try starting at IP */
20274- ip = (u8 *)regs->ip;
20275+ ip = (u8 *)regs->ip + cs_base;
20276 code_len = code_len - code_prologue + 1;
20277 }
20278 for (i = 0; i < code_len; i++, ip++) {
20279@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
20280 pr_cont(" Bad EIP value.");
20281 break;
20282 }
20283- if (ip == (u8 *)regs->ip)
20284+ if (ip == (u8 *)regs->ip + cs_base)
20285 pr_cont(" <%02x>", c);
20286 else
20287 pr_cont(" %02x", c);
20288@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
20289 {
20290 unsigned short ud2;
20291
20292+ ip = ktla_ktva(ip);
20293 if (ip < PAGE_OFFSET)
20294 return 0;
20295 if (probe_kernel_address((unsigned short *)ip, ud2))
20296@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
20297
20298 return ud2 == 0x0b0f;
20299 }
20300+
20301+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20302+void pax_check_alloca(unsigned long size)
20303+{
20304+ unsigned long sp = (unsigned long)&sp, stack_left;
20305+
20306+ /* all kernel stacks are of the same size */
20307+ stack_left = sp & (THREAD_SIZE - 1);
20308+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20309+}
20310+EXPORT_SYMBOL(pax_check_alloca);
20311+#endif
20312diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
20313index addb207..99635fa 100644
20314--- a/arch/x86/kernel/dumpstack_64.c
20315+++ b/arch/x86/kernel/dumpstack_64.c
20316@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20317 unsigned long *irq_stack_end =
20318 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
20319 unsigned used = 0;
20320- struct thread_info *tinfo;
20321 int graph = 0;
20322 unsigned long dummy;
20323+ void *stack_start;
20324
20325 if (!task)
20326 task = current;
20327@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20328 * current stack address. If the stacks consist of nested
20329 * exceptions
20330 */
20331- tinfo = task_thread_info(task);
20332 for (;;) {
20333 char *id;
20334 unsigned long *estack_end;
20335+
20336 estack_end = in_exception_stack(cpu, (unsigned long)stack,
20337 &used, &id);
20338
20339@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20340 if (ops->stack(data, id) < 0)
20341 break;
20342
20343- bp = ops->walk_stack(tinfo, stack, bp, ops,
20344+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
20345 data, estack_end, &graph);
20346 ops->stack(data, "<EOE>");
20347 /*
20348@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20349 * second-to-last pointer (index -2 to end) in the
20350 * exception stack:
20351 */
20352+ if ((u16)estack_end[-1] != __KERNEL_DS)
20353+ goto out;
20354 stack = (unsigned long *) estack_end[-2];
20355 continue;
20356 }
20357@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20358 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
20359 if (ops->stack(data, "IRQ") < 0)
20360 break;
20361- bp = ops->walk_stack(tinfo, stack, bp,
20362+ bp = ops->walk_stack(task, irq_stack, stack, bp,
20363 ops, data, irq_stack_end, &graph);
20364 /*
20365 * We link to the next stack (which would be
20366@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
20367 /*
20368 * This handles the process stack:
20369 */
20370- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
20371+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
20372+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
20373+out:
20374 put_cpu();
20375 }
20376 EXPORT_SYMBOL(dump_trace);
20377@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
20378
20379 return ud2 == 0x0b0f;
20380 }
20381+
20382+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20383+void pax_check_alloca(unsigned long size)
20384+{
20385+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
20386+ unsigned cpu, used;
20387+ char *id;
20388+
20389+ /* check the process stack first */
20390+ stack_start = (unsigned long)task_stack_page(current);
20391+ stack_end = stack_start + THREAD_SIZE;
20392+ if (likely(stack_start <= sp && sp < stack_end)) {
20393+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
20394+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20395+ return;
20396+ }
20397+
20398+ cpu = get_cpu();
20399+
20400+ /* check the irq stacks */
20401+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
20402+ stack_start = stack_end - IRQ_STACK_SIZE;
20403+ if (stack_start <= sp && sp < stack_end) {
20404+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
20405+ put_cpu();
20406+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20407+ return;
20408+ }
20409+
20410+ /* check the exception stacks */
20411+ used = 0;
20412+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
20413+ stack_start = stack_end - EXCEPTION_STKSZ;
20414+ if (stack_end && stack_start <= sp && sp < stack_end) {
20415+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
20416+ put_cpu();
20417+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
20418+ return;
20419+ }
20420+
20421+ put_cpu();
20422+
20423+ /* unknown stack */
20424+ BUG();
20425+}
20426+EXPORT_SYMBOL(pax_check_alloca);
20427+#endif
20428diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
20429index d32abea..74daf4f 100644
20430--- a/arch/x86/kernel/e820.c
20431+++ b/arch/x86/kernel/e820.c
20432@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
20433
20434 static void early_panic(char *msg)
20435 {
20436- early_printk(msg);
20437- panic(msg);
20438+ early_printk("%s", msg);
20439+ panic("%s", msg);
20440 }
20441
20442 static int userdef __initdata;
20443diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
20444index d15f575..d692043 100644
20445--- a/arch/x86/kernel/early_printk.c
20446+++ b/arch/x86/kernel/early_printk.c
20447@@ -7,6 +7,7 @@
20448 #include <linux/pci_regs.h>
20449 #include <linux/pci_ids.h>
20450 #include <linux/errno.h>
20451+#include <linux/sched.h>
20452 #include <asm/io.h>
20453 #include <asm/processor.h>
20454 #include <asm/fcntl.h>
20455diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
20456index 8f3e2de..6b71e39 100644
20457--- a/arch/x86/kernel/entry_32.S
20458+++ b/arch/x86/kernel/entry_32.S
20459@@ -177,13 +177,153 @@
20460 /*CFI_REL_OFFSET gs, PT_GS*/
20461 .endm
20462 .macro SET_KERNEL_GS reg
20463+
20464+#ifdef CONFIG_CC_STACKPROTECTOR
20465 movl $(__KERNEL_STACK_CANARY), \reg
20466+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20467+ movl $(__USER_DS), \reg
20468+#else
20469+ xorl \reg, \reg
20470+#endif
20471+
20472 movl \reg, %gs
20473 .endm
20474
20475 #endif /* CONFIG_X86_32_LAZY_GS */
20476
20477-.macro SAVE_ALL
20478+.macro pax_enter_kernel
20479+#ifdef CONFIG_PAX_KERNEXEC
20480+ call pax_enter_kernel
20481+#endif
20482+.endm
20483+
20484+.macro pax_exit_kernel
20485+#ifdef CONFIG_PAX_KERNEXEC
20486+ call pax_exit_kernel
20487+#endif
20488+.endm
20489+
20490+#ifdef CONFIG_PAX_KERNEXEC
20491+ENTRY(pax_enter_kernel)
20492+#ifdef CONFIG_PARAVIRT
20493+ pushl %eax
20494+ pushl %ecx
20495+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
20496+ mov %eax, %esi
20497+#else
20498+ mov %cr0, %esi
20499+#endif
20500+ bts $16, %esi
20501+ jnc 1f
20502+ mov %cs, %esi
20503+ cmp $__KERNEL_CS, %esi
20504+ jz 3f
20505+ ljmp $__KERNEL_CS, $3f
20506+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
20507+2:
20508+#ifdef CONFIG_PARAVIRT
20509+ mov %esi, %eax
20510+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
20511+#else
20512+ mov %esi, %cr0
20513+#endif
20514+3:
20515+#ifdef CONFIG_PARAVIRT
20516+ popl %ecx
20517+ popl %eax
20518+#endif
20519+ ret
20520+ENDPROC(pax_enter_kernel)
20521+
20522+ENTRY(pax_exit_kernel)
20523+#ifdef CONFIG_PARAVIRT
20524+ pushl %eax
20525+ pushl %ecx
20526+#endif
20527+ mov %cs, %esi
20528+ cmp $__KERNEXEC_KERNEL_CS, %esi
20529+ jnz 2f
20530+#ifdef CONFIG_PARAVIRT
20531+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
20532+ mov %eax, %esi
20533+#else
20534+ mov %cr0, %esi
20535+#endif
20536+ btr $16, %esi
20537+ ljmp $__KERNEL_CS, $1f
20538+1:
20539+#ifdef CONFIG_PARAVIRT
20540+ mov %esi, %eax
20541+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
20542+#else
20543+ mov %esi, %cr0
20544+#endif
20545+2:
20546+#ifdef CONFIG_PARAVIRT
20547+ popl %ecx
20548+ popl %eax
20549+#endif
20550+ ret
20551+ENDPROC(pax_exit_kernel)
20552+#endif
20553+
20554+ .macro pax_erase_kstack
20555+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20556+ call pax_erase_kstack
20557+#endif
20558+ .endm
20559+
20560+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20561+/*
20562+ * ebp: thread_info
20563+ */
20564+ENTRY(pax_erase_kstack)
20565+ pushl %edi
20566+ pushl %ecx
20567+ pushl %eax
20568+
20569+ mov TI_lowest_stack(%ebp), %edi
20570+ mov $-0xBEEF, %eax
20571+ std
20572+
20573+1: mov %edi, %ecx
20574+ and $THREAD_SIZE_asm - 1, %ecx
20575+ shr $2, %ecx
20576+ repne scasl
20577+ jecxz 2f
20578+
20579+ cmp $2*16, %ecx
20580+ jc 2f
20581+
20582+ mov $2*16, %ecx
20583+ repe scasl
20584+ jecxz 2f
20585+ jne 1b
20586+
20587+2: cld
20588+ mov %esp, %ecx
20589+ sub %edi, %ecx
20590+
20591+ cmp $THREAD_SIZE_asm, %ecx
20592+ jb 3f
20593+ ud2
20594+3:
20595+
20596+ shr $2, %ecx
20597+ rep stosl
20598+
20599+ mov TI_task_thread_sp0(%ebp), %edi
20600+ sub $128, %edi
20601+ mov %edi, TI_lowest_stack(%ebp)
20602+
20603+ popl %eax
20604+ popl %ecx
20605+ popl %edi
20606+ ret
20607+ENDPROC(pax_erase_kstack)
20608+#endif
20609+
20610+.macro __SAVE_ALL _DS
20611 cld
20612 PUSH_GS
20613 pushl_cfi %fs
20614@@ -206,7 +346,7 @@
20615 CFI_REL_OFFSET ecx, 0
20616 pushl_cfi %ebx
20617 CFI_REL_OFFSET ebx, 0
20618- movl $(__USER_DS), %edx
20619+ movl $\_DS, %edx
20620 movl %edx, %ds
20621 movl %edx, %es
20622 movl $(__KERNEL_PERCPU), %edx
20623@@ -214,6 +354,15 @@
20624 SET_KERNEL_GS %edx
20625 .endm
20626
20627+.macro SAVE_ALL
20628+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20629+ __SAVE_ALL __KERNEL_DS
20630+ pax_enter_kernel
20631+#else
20632+ __SAVE_ALL __USER_DS
20633+#endif
20634+.endm
20635+
20636 .macro RESTORE_INT_REGS
20637 popl_cfi %ebx
20638 CFI_RESTORE ebx
20639@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
20640 popfl_cfi
20641 jmp syscall_exit
20642 CFI_ENDPROC
20643-END(ret_from_fork)
20644+ENDPROC(ret_from_fork)
20645
20646 ENTRY(ret_from_kernel_thread)
20647 CFI_STARTPROC
20648@@ -344,7 +493,15 @@ ret_from_intr:
20649 andl $SEGMENT_RPL_MASK, %eax
20650 #endif
20651 cmpl $USER_RPL, %eax
20652+
20653+#ifdef CONFIG_PAX_KERNEXEC
20654+ jae resume_userspace
20655+
20656+ pax_exit_kernel
20657+ jmp resume_kernel
20658+#else
20659 jb resume_kernel # not returning to v8086 or userspace
20660+#endif
20661
20662 ENTRY(resume_userspace)
20663 LOCKDEP_SYS_EXIT
20664@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
20665 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
20666 # int/exception return?
20667 jne work_pending
20668- jmp restore_all
20669-END(ret_from_exception)
20670+ jmp restore_all_pax
20671+ENDPROC(ret_from_exception)
20672
20673 #ifdef CONFIG_PREEMPT
20674 ENTRY(resume_kernel)
20675@@ -372,7 +529,7 @@ need_resched:
20676 jz restore_all
20677 call preempt_schedule_irq
20678 jmp need_resched
20679-END(resume_kernel)
20680+ENDPROC(resume_kernel)
20681 #endif
20682 CFI_ENDPROC
20683 /*
20684@@ -406,30 +563,45 @@ sysenter_past_esp:
20685 /*CFI_REL_OFFSET cs, 0*/
20686 /*
20687 * Push current_thread_info()->sysenter_return to the stack.
20688- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
20689- * pushed above; +8 corresponds to copy_thread's esp0 setting.
20690 */
20691- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
20692+ pushl_cfi $0
20693 CFI_REL_OFFSET eip, 0
20694
20695 pushl_cfi %eax
20696 SAVE_ALL
20697+ GET_THREAD_INFO(%ebp)
20698+ movl TI_sysenter_return(%ebp),%ebp
20699+ movl %ebp,PT_EIP(%esp)
20700 ENABLE_INTERRUPTS(CLBR_NONE)
20701
20702 /*
20703 * Load the potential sixth argument from user stack.
20704 * Careful about security.
20705 */
20706+ movl PT_OLDESP(%esp),%ebp
20707+
20708+#ifdef CONFIG_PAX_MEMORY_UDEREF
20709+ mov PT_OLDSS(%esp),%ds
20710+1: movl %ds:(%ebp),%ebp
20711+ push %ss
20712+ pop %ds
20713+#else
20714 cmpl $__PAGE_OFFSET-3,%ebp
20715 jae syscall_fault
20716 ASM_STAC
20717 1: movl (%ebp),%ebp
20718 ASM_CLAC
20719+#endif
20720+
20721 movl %ebp,PT_EBP(%esp)
20722 _ASM_EXTABLE(1b,syscall_fault)
20723
20724 GET_THREAD_INFO(%ebp)
20725
20726+#ifdef CONFIG_PAX_RANDKSTACK
20727+ pax_erase_kstack
20728+#endif
20729+
20730 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
20731 jnz sysenter_audit
20732 sysenter_do_call:
20733@@ -444,12 +616,24 @@ sysenter_do_call:
20734 testl $_TIF_ALLWORK_MASK, %ecx
20735 jne sysexit_audit
20736 sysenter_exit:
20737+
20738+#ifdef CONFIG_PAX_RANDKSTACK
20739+ pushl_cfi %eax
20740+ movl %esp, %eax
20741+ call pax_randomize_kstack
20742+ popl_cfi %eax
20743+#endif
20744+
20745+ pax_erase_kstack
20746+
20747 /* if something modifies registers it must also disable sysexit */
20748 movl PT_EIP(%esp), %edx
20749 movl PT_OLDESP(%esp), %ecx
20750 xorl %ebp,%ebp
20751 TRACE_IRQS_ON
20752 1: mov PT_FS(%esp), %fs
20753+2: mov PT_DS(%esp), %ds
20754+3: mov PT_ES(%esp), %es
20755 PTGS_TO_GS
20756 ENABLE_INTERRUPTS_SYSEXIT
20757
20758@@ -466,6 +650,9 @@ sysenter_audit:
20759 movl %eax,%edx /* 2nd arg: syscall number */
20760 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
20761 call __audit_syscall_entry
20762+
20763+ pax_erase_kstack
20764+
20765 pushl_cfi %ebx
20766 movl PT_EAX(%esp),%eax /* reload syscall number */
20767 jmp sysenter_do_call
20768@@ -491,10 +678,16 @@ sysexit_audit:
20769
20770 CFI_ENDPROC
20771 .pushsection .fixup,"ax"
20772-2: movl $0,PT_FS(%esp)
20773+4: movl $0,PT_FS(%esp)
20774+ jmp 1b
20775+5: movl $0,PT_DS(%esp)
20776+ jmp 1b
20777+6: movl $0,PT_ES(%esp)
20778 jmp 1b
20779 .popsection
20780- _ASM_EXTABLE(1b,2b)
20781+ _ASM_EXTABLE(1b,4b)
20782+ _ASM_EXTABLE(2b,5b)
20783+ _ASM_EXTABLE(3b,6b)
20784 PTGS_TO_GS_EX
20785 ENDPROC(ia32_sysenter_target)
20786
20787@@ -509,6 +702,11 @@ ENTRY(system_call)
20788 pushl_cfi %eax # save orig_eax
20789 SAVE_ALL
20790 GET_THREAD_INFO(%ebp)
20791+
20792+#ifdef CONFIG_PAX_RANDKSTACK
20793+ pax_erase_kstack
20794+#endif
20795+
20796 # system call tracing in operation / emulation
20797 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
20798 jnz syscall_trace_entry
20799@@ -527,6 +725,15 @@ syscall_exit:
20800 testl $_TIF_ALLWORK_MASK, %ecx # current->work
20801 jne syscall_exit_work
20802
20803+restore_all_pax:
20804+
20805+#ifdef CONFIG_PAX_RANDKSTACK
20806+ movl %esp, %eax
20807+ call pax_randomize_kstack
20808+#endif
20809+
20810+ pax_erase_kstack
20811+
20812 restore_all:
20813 TRACE_IRQS_IRET
20814 restore_all_notrace:
20815@@ -583,14 +790,34 @@ ldt_ss:
20816 * compensating for the offset by changing to the ESPFIX segment with
20817 * a base address that matches for the difference.
20818 */
20819-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
20820+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
20821 mov %esp, %edx /* load kernel esp */
20822 mov PT_OLDESP(%esp), %eax /* load userspace esp */
20823 mov %dx, %ax /* eax: new kernel esp */
20824 sub %eax, %edx /* offset (low word is 0) */
20825+#ifdef CONFIG_SMP
20826+ movl PER_CPU_VAR(cpu_number), %ebx
20827+ shll $PAGE_SHIFT_asm, %ebx
20828+ addl $cpu_gdt_table, %ebx
20829+#else
20830+ movl $cpu_gdt_table, %ebx
20831+#endif
20832 shr $16, %edx
20833- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
20834- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
20835+
20836+#ifdef CONFIG_PAX_KERNEXEC
20837+ mov %cr0, %esi
20838+ btr $16, %esi
20839+ mov %esi, %cr0
20840+#endif
20841+
20842+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
20843+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
20844+
20845+#ifdef CONFIG_PAX_KERNEXEC
20846+ bts $16, %esi
20847+ mov %esi, %cr0
20848+#endif
20849+
20850 pushl_cfi $__ESPFIX_SS
20851 pushl_cfi %eax /* new kernel esp */
20852 /* Disable interrupts, but do not irqtrace this section: we
20853@@ -619,20 +846,18 @@ work_resched:
20854 movl TI_flags(%ebp), %ecx
20855 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
20856 # than syscall tracing?
20857- jz restore_all
20858+ jz restore_all_pax
20859 testb $_TIF_NEED_RESCHED, %cl
20860 jnz work_resched
20861
20862 work_notifysig: # deal with pending signals and
20863 # notify-resume requests
20864+ movl %esp, %eax
20865 #ifdef CONFIG_VM86
20866 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
20867- movl %esp, %eax
20868 jne work_notifysig_v86 # returning to kernel-space or
20869 # vm86-space
20870 1:
20871-#else
20872- movl %esp, %eax
20873 #endif
20874 TRACE_IRQS_ON
20875 ENABLE_INTERRUPTS(CLBR_NONE)
20876@@ -653,7 +878,7 @@ work_notifysig_v86:
20877 movl %eax, %esp
20878 jmp 1b
20879 #endif
20880-END(work_pending)
20881+ENDPROC(work_pending)
20882
20883 # perform syscall exit tracing
20884 ALIGN
20885@@ -661,11 +886,14 @@ syscall_trace_entry:
20886 movl $-ENOSYS,PT_EAX(%esp)
20887 movl %esp, %eax
20888 call syscall_trace_enter
20889+
20890+ pax_erase_kstack
20891+
20892 /* What it returned is what we'll actually use. */
20893 cmpl $(NR_syscalls), %eax
20894 jnae syscall_call
20895 jmp syscall_exit
20896-END(syscall_trace_entry)
20897+ENDPROC(syscall_trace_entry)
20898
20899 # perform syscall exit tracing
20900 ALIGN
20901@@ -678,21 +906,25 @@ syscall_exit_work:
20902 movl %esp, %eax
20903 call syscall_trace_leave
20904 jmp resume_userspace
20905-END(syscall_exit_work)
20906+ENDPROC(syscall_exit_work)
20907 CFI_ENDPROC
20908
20909 RING0_INT_FRAME # can't unwind into user space anyway
20910 syscall_fault:
20911+#ifdef CONFIG_PAX_MEMORY_UDEREF
20912+ push %ss
20913+ pop %ds
20914+#endif
20915 ASM_CLAC
20916 GET_THREAD_INFO(%ebp)
20917 movl $-EFAULT,PT_EAX(%esp)
20918 jmp resume_userspace
20919-END(syscall_fault)
20920+ENDPROC(syscall_fault)
20921
20922 syscall_badsys:
20923 movl $-ENOSYS,PT_EAX(%esp)
20924 jmp resume_userspace
20925-END(syscall_badsys)
20926+ENDPROC(syscall_badsys)
20927 CFI_ENDPROC
20928 /*
20929 * End of kprobes section
20930@@ -708,8 +940,15 @@ END(syscall_badsys)
20931 * normal stack and adjusts ESP with the matching offset.
20932 */
20933 /* fixup the stack */
20934- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
20935- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
20936+#ifdef CONFIG_SMP
20937+ movl PER_CPU_VAR(cpu_number), %ebx
20938+ shll $PAGE_SHIFT_asm, %ebx
20939+ addl $cpu_gdt_table, %ebx
20940+#else
20941+ movl $cpu_gdt_table, %ebx
20942+#endif
20943+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
20944+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
20945 shl $16, %eax
20946 addl %esp, %eax /* the adjusted stack pointer */
20947 pushl_cfi $__KERNEL_DS
20948@@ -762,7 +1001,7 @@ vector=vector+1
20949 .endr
20950 2: jmp common_interrupt
20951 .endr
20952-END(irq_entries_start)
20953+ENDPROC(irq_entries_start)
20954
20955 .previous
20956 END(interrupt)
20957@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
20958 pushl_cfi $do_coprocessor_error
20959 jmp error_code
20960 CFI_ENDPROC
20961-END(coprocessor_error)
20962+ENDPROC(coprocessor_error)
20963
20964 ENTRY(simd_coprocessor_error)
20965 RING0_INT_FRAME
20966@@ -826,7 +1065,7 @@ ENTRY(simd_coprocessor_error)
20967 .section .altinstructions,"a"
20968 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
20969 .previous
20970-.section .altinstr_replacement,"ax"
20971+.section .altinstr_replacement,"a"
20972 663: pushl $do_simd_coprocessor_error
20973 664:
20974 .previous
20975@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
20976 #endif
20977 jmp error_code
20978 CFI_ENDPROC
20979-END(simd_coprocessor_error)
20980+ENDPROC(simd_coprocessor_error)
20981
20982 ENTRY(device_not_available)
20983 RING0_INT_FRAME
20984@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
20985 pushl_cfi $do_device_not_available
20986 jmp error_code
20987 CFI_ENDPROC
20988-END(device_not_available)
20989+ENDPROC(device_not_available)
20990
20991 #ifdef CONFIG_PARAVIRT
20992 ENTRY(native_iret)
20993 iret
20994 _ASM_EXTABLE(native_iret, iret_exc)
20995-END(native_iret)
20996+ENDPROC(native_iret)
20997
20998 ENTRY(native_irq_enable_sysexit)
20999 sti
21000 sysexit
21001-END(native_irq_enable_sysexit)
21002+ENDPROC(native_irq_enable_sysexit)
21003 #endif
21004
21005 ENTRY(overflow)
21006@@ -865,7 +1104,7 @@ ENTRY(overflow)
21007 pushl_cfi $do_overflow
21008 jmp error_code
21009 CFI_ENDPROC
21010-END(overflow)
21011+ENDPROC(overflow)
21012
21013 ENTRY(bounds)
21014 RING0_INT_FRAME
21015@@ -874,7 +1113,7 @@ ENTRY(bounds)
21016 pushl_cfi $do_bounds
21017 jmp error_code
21018 CFI_ENDPROC
21019-END(bounds)
21020+ENDPROC(bounds)
21021
21022 ENTRY(invalid_op)
21023 RING0_INT_FRAME
21024@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
21025 pushl_cfi $do_invalid_op
21026 jmp error_code
21027 CFI_ENDPROC
21028-END(invalid_op)
21029+ENDPROC(invalid_op)
21030
21031 ENTRY(coprocessor_segment_overrun)
21032 RING0_INT_FRAME
21033@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
21034 pushl_cfi $do_coprocessor_segment_overrun
21035 jmp error_code
21036 CFI_ENDPROC
21037-END(coprocessor_segment_overrun)
21038+ENDPROC(coprocessor_segment_overrun)
21039
21040 ENTRY(invalid_TSS)
21041 RING0_EC_FRAME
21042@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
21043 pushl_cfi $do_invalid_TSS
21044 jmp error_code
21045 CFI_ENDPROC
21046-END(invalid_TSS)
21047+ENDPROC(invalid_TSS)
21048
21049 ENTRY(segment_not_present)
21050 RING0_EC_FRAME
21051@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
21052 pushl_cfi $do_segment_not_present
21053 jmp error_code
21054 CFI_ENDPROC
21055-END(segment_not_present)
21056+ENDPROC(segment_not_present)
21057
21058 ENTRY(stack_segment)
21059 RING0_EC_FRAME
21060@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
21061 pushl_cfi $do_stack_segment
21062 jmp error_code
21063 CFI_ENDPROC
21064-END(stack_segment)
21065+ENDPROC(stack_segment)
21066
21067 ENTRY(alignment_check)
21068 RING0_EC_FRAME
21069@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
21070 pushl_cfi $do_alignment_check
21071 jmp error_code
21072 CFI_ENDPROC
21073-END(alignment_check)
21074+ENDPROC(alignment_check)
21075
21076 ENTRY(divide_error)
21077 RING0_INT_FRAME
21078@@ -933,7 +1172,7 @@ ENTRY(divide_error)
21079 pushl_cfi $do_divide_error
21080 jmp error_code
21081 CFI_ENDPROC
21082-END(divide_error)
21083+ENDPROC(divide_error)
21084
21085 #ifdef CONFIG_X86_MCE
21086 ENTRY(machine_check)
21087@@ -943,7 +1182,7 @@ ENTRY(machine_check)
21088 pushl_cfi machine_check_vector
21089 jmp error_code
21090 CFI_ENDPROC
21091-END(machine_check)
21092+ENDPROC(machine_check)
21093 #endif
21094
21095 ENTRY(spurious_interrupt_bug)
21096@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
21097 pushl_cfi $do_spurious_interrupt_bug
21098 jmp error_code
21099 CFI_ENDPROC
21100-END(spurious_interrupt_bug)
21101+ENDPROC(spurious_interrupt_bug)
21102 /*
21103 * End of kprobes section
21104 */
21105@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
21106
21107 ENTRY(mcount)
21108 ret
21109-END(mcount)
21110+ENDPROC(mcount)
21111
21112 ENTRY(ftrace_caller)
21113 cmpl $0, function_trace_stop
21114@@ -1096,7 +1335,7 @@ ftrace_graph_call:
21115 .globl ftrace_stub
21116 ftrace_stub:
21117 ret
21118-END(ftrace_caller)
21119+ENDPROC(ftrace_caller)
21120
21121 ENTRY(ftrace_regs_caller)
21122 pushf /* push flags before compare (in cs location) */
21123@@ -1197,7 +1436,7 @@ trace:
21124 popl %ecx
21125 popl %eax
21126 jmp ftrace_stub
21127-END(mcount)
21128+ENDPROC(mcount)
21129 #endif /* CONFIG_DYNAMIC_FTRACE */
21130 #endif /* CONFIG_FUNCTION_TRACER */
21131
21132@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
21133 popl %ecx
21134 popl %eax
21135 ret
21136-END(ftrace_graph_caller)
21137+ENDPROC(ftrace_graph_caller)
21138
21139 .globl return_to_handler
21140 return_to_handler:
21141@@ -1271,15 +1510,18 @@ error_code:
21142 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
21143 REG_TO_PTGS %ecx
21144 SET_KERNEL_GS %ecx
21145- movl $(__USER_DS), %ecx
21146+ movl $(__KERNEL_DS), %ecx
21147 movl %ecx, %ds
21148 movl %ecx, %es
21149+
21150+ pax_enter_kernel
21151+
21152 TRACE_IRQS_OFF
21153 movl %esp,%eax # pt_regs pointer
21154 call *%edi
21155 jmp ret_from_exception
21156 CFI_ENDPROC
21157-END(page_fault)
21158+ENDPROC(page_fault)
21159
21160 /*
21161 * Debug traps and NMI can happen at the one SYSENTER instruction
21162@@ -1322,7 +1564,7 @@ debug_stack_correct:
21163 call do_debug
21164 jmp ret_from_exception
21165 CFI_ENDPROC
21166-END(debug)
21167+ENDPROC(debug)
21168
21169 /*
21170 * NMI is doubly nasty. It can happen _while_ we're handling
21171@@ -1360,6 +1602,9 @@ nmi_stack_correct:
21172 xorl %edx,%edx # zero error code
21173 movl %esp,%eax # pt_regs pointer
21174 call do_nmi
21175+
21176+ pax_exit_kernel
21177+
21178 jmp restore_all_notrace
21179 CFI_ENDPROC
21180
21181@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
21182 FIXUP_ESPFIX_STACK # %eax == %esp
21183 xorl %edx,%edx # zero error code
21184 call do_nmi
21185+
21186+ pax_exit_kernel
21187+
21188 RESTORE_REGS
21189 lss 12+4(%esp), %esp # back to espfix stack
21190 CFI_ADJUST_CFA_OFFSET -24
21191 jmp irq_return
21192 CFI_ENDPROC
21193-END(nmi)
21194+ENDPROC(nmi)
21195
21196 ENTRY(int3)
21197 RING0_INT_FRAME
21198@@ -1414,14 +1662,14 @@ ENTRY(int3)
21199 call do_int3
21200 jmp ret_from_exception
21201 CFI_ENDPROC
21202-END(int3)
21203+ENDPROC(int3)
21204
21205 ENTRY(general_protection)
21206 RING0_EC_FRAME
21207 pushl_cfi $do_general_protection
21208 jmp error_code
21209 CFI_ENDPROC
21210-END(general_protection)
21211+ENDPROC(general_protection)
21212
21213 #ifdef CONFIG_KVM_GUEST
21214 ENTRY(async_page_fault)
21215@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
21216 pushl_cfi $do_async_page_fault
21217 jmp error_code
21218 CFI_ENDPROC
21219-END(async_page_fault)
21220+ENDPROC(async_page_fault)
21221 #endif
21222
21223 /*
21224diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
21225index 7272089..0b74104 100644
21226--- a/arch/x86/kernel/entry_64.S
21227+++ b/arch/x86/kernel/entry_64.S
21228@@ -59,6 +59,8 @@
21229 #include <asm/context_tracking.h>
21230 #include <asm/smap.h>
21231 #include <linux/err.h>
21232+#include <asm/pgtable.h>
21233+#include <asm/alternative-asm.h>
21234
21235 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
21236 #include <linux/elf-em.h>
21237@@ -80,8 +82,9 @@
21238 #ifdef CONFIG_DYNAMIC_FTRACE
21239
21240 ENTRY(function_hook)
21241+ pax_force_retaddr
21242 retq
21243-END(function_hook)
21244+ENDPROC(function_hook)
21245
21246 /* skip is set if stack has been adjusted */
21247 .macro ftrace_caller_setup skip=0
21248@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
21249 #endif
21250
21251 GLOBAL(ftrace_stub)
21252+ pax_force_retaddr
21253 retq
21254-END(ftrace_caller)
21255+ENDPROC(ftrace_caller)
21256
21257 ENTRY(ftrace_regs_caller)
21258 /* Save the current flags before compare (in SS location)*/
21259@@ -191,7 +195,7 @@ ftrace_restore_flags:
21260 popfq
21261 jmp ftrace_stub
21262
21263-END(ftrace_regs_caller)
21264+ENDPROC(ftrace_regs_caller)
21265
21266
21267 #else /* ! CONFIG_DYNAMIC_FTRACE */
21268@@ -212,6 +216,7 @@ ENTRY(function_hook)
21269 #endif
21270
21271 GLOBAL(ftrace_stub)
21272+ pax_force_retaddr
21273 retq
21274
21275 trace:
21276@@ -225,12 +230,13 @@ trace:
21277 #endif
21278 subq $MCOUNT_INSN_SIZE, %rdi
21279
21280+ pax_force_fptr ftrace_trace_function
21281 call *ftrace_trace_function
21282
21283 MCOUNT_RESTORE_FRAME
21284
21285 jmp ftrace_stub
21286-END(function_hook)
21287+ENDPROC(function_hook)
21288 #endif /* CONFIG_DYNAMIC_FTRACE */
21289 #endif /* CONFIG_FUNCTION_TRACER */
21290
21291@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
21292
21293 MCOUNT_RESTORE_FRAME
21294
21295+ pax_force_retaddr
21296 retq
21297-END(ftrace_graph_caller)
21298+ENDPROC(ftrace_graph_caller)
21299
21300 GLOBAL(return_to_handler)
21301 subq $24, %rsp
21302@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
21303 movq 8(%rsp), %rdx
21304 movq (%rsp), %rax
21305 addq $24, %rsp
21306+ pax_force_fptr %rdi
21307 jmp *%rdi
21308+ENDPROC(return_to_handler)
21309 #endif
21310
21311
21312@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
21313 ENDPROC(native_usergs_sysret64)
21314 #endif /* CONFIG_PARAVIRT */
21315
21316+ .macro ljmpq sel, off
21317+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
21318+ .byte 0x48; ljmp *1234f(%rip)
21319+ .pushsection .rodata
21320+ .align 16
21321+ 1234: .quad \off; .word \sel
21322+ .popsection
21323+#else
21324+ pushq $\sel
21325+ pushq $\off
21326+ lretq
21327+#endif
21328+ .endm
21329+
21330+ .macro pax_enter_kernel
21331+ pax_set_fptr_mask
21332+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21333+ call pax_enter_kernel
21334+#endif
21335+ .endm
21336+
21337+ .macro pax_exit_kernel
21338+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21339+ call pax_exit_kernel
21340+#endif
21341+
21342+ .endm
21343+
21344+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21345+ENTRY(pax_enter_kernel)
21346+ pushq %rdi
21347+
21348+#ifdef CONFIG_PARAVIRT
21349+ PV_SAVE_REGS(CLBR_RDI)
21350+#endif
21351+
21352+#ifdef CONFIG_PAX_KERNEXEC
21353+ GET_CR0_INTO_RDI
21354+ bts $16,%rdi
21355+ jnc 3f
21356+ mov %cs,%edi
21357+ cmp $__KERNEL_CS,%edi
21358+ jnz 2f
21359+1:
21360+#endif
21361+
21362+#ifdef CONFIG_PAX_MEMORY_UDEREF
21363+ 661: jmp 111f
21364+ .pushsection .altinstr_replacement, "a"
21365+ 662: ASM_NOP2
21366+ .popsection
21367+ .pushsection .altinstructions, "a"
21368+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21369+ .popsection
21370+ GET_CR3_INTO_RDI
21371+ cmp $0,%dil
21372+ jnz 112f
21373+ mov $__KERNEL_DS,%edi
21374+ mov %edi,%ss
21375+ jmp 111f
21376+112: cmp $1,%dil
21377+ jz 113f
21378+ ud2
21379+113: sub $4097,%rdi
21380+ bts $63,%rdi
21381+ SET_RDI_INTO_CR3
21382+ mov $__UDEREF_KERNEL_DS,%edi
21383+ mov %edi,%ss
21384+111:
21385+#endif
21386+
21387+#ifdef CONFIG_PARAVIRT
21388+ PV_RESTORE_REGS(CLBR_RDI)
21389+#endif
21390+
21391+ popq %rdi
21392+ pax_force_retaddr
21393+ retq
21394+
21395+#ifdef CONFIG_PAX_KERNEXEC
21396+2: ljmpq __KERNEL_CS,1b
21397+3: ljmpq __KERNEXEC_KERNEL_CS,4f
21398+4: SET_RDI_INTO_CR0
21399+ jmp 1b
21400+#endif
21401+ENDPROC(pax_enter_kernel)
21402+
21403+ENTRY(pax_exit_kernel)
21404+ pushq %rdi
21405+
21406+#ifdef CONFIG_PARAVIRT
21407+ PV_SAVE_REGS(CLBR_RDI)
21408+#endif
21409+
21410+#ifdef CONFIG_PAX_KERNEXEC
21411+ mov %cs,%rdi
21412+ cmp $__KERNEXEC_KERNEL_CS,%edi
21413+ jz 2f
21414+ GET_CR0_INTO_RDI
21415+ bts $16,%rdi
21416+ jnc 4f
21417+1:
21418+#endif
21419+
21420+#ifdef CONFIG_PAX_MEMORY_UDEREF
21421+ 661: jmp 111f
21422+ .pushsection .altinstr_replacement, "a"
21423+ 662: ASM_NOP2
21424+ .popsection
21425+ .pushsection .altinstructions, "a"
21426+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21427+ .popsection
21428+ mov %ss,%edi
21429+ cmp $__UDEREF_KERNEL_DS,%edi
21430+ jnz 111f
21431+ GET_CR3_INTO_RDI
21432+ cmp $0,%dil
21433+ jz 112f
21434+ ud2
21435+112: add $4097,%rdi
21436+ bts $63,%rdi
21437+ SET_RDI_INTO_CR3
21438+ mov $__KERNEL_DS,%edi
21439+ mov %edi,%ss
21440+111:
21441+#endif
21442+
21443+#ifdef CONFIG_PARAVIRT
21444+ PV_RESTORE_REGS(CLBR_RDI);
21445+#endif
21446+
21447+ popq %rdi
21448+ pax_force_retaddr
21449+ retq
21450+
21451+#ifdef CONFIG_PAX_KERNEXEC
21452+2: GET_CR0_INTO_RDI
21453+ btr $16,%rdi
21454+ jnc 4f
21455+ ljmpq __KERNEL_CS,3f
21456+3: SET_RDI_INTO_CR0
21457+ jmp 1b
21458+4: ud2
21459+ jmp 4b
21460+#endif
21461+ENDPROC(pax_exit_kernel)
21462+#endif
21463+
21464+ .macro pax_enter_kernel_user
21465+ pax_set_fptr_mask
21466+#ifdef CONFIG_PAX_MEMORY_UDEREF
21467+ call pax_enter_kernel_user
21468+#endif
21469+ .endm
21470+
21471+ .macro pax_exit_kernel_user
21472+#ifdef CONFIG_PAX_MEMORY_UDEREF
21473+ call pax_exit_kernel_user
21474+#endif
21475+#ifdef CONFIG_PAX_RANDKSTACK
21476+ pushq %rax
21477+ pushq %r11
21478+ call pax_randomize_kstack
21479+ popq %r11
21480+ popq %rax
21481+#endif
21482+ .endm
21483+
21484+#ifdef CONFIG_PAX_MEMORY_UDEREF
21485+ENTRY(pax_enter_kernel_user)
21486+ pushq %rdi
21487+ pushq %rbx
21488+
21489+#ifdef CONFIG_PARAVIRT
21490+ PV_SAVE_REGS(CLBR_RDI)
21491+#endif
21492+
21493+ 661: jmp 111f
21494+ .pushsection .altinstr_replacement, "a"
21495+ 662: ASM_NOP2
21496+ .popsection
21497+ .pushsection .altinstructions, "a"
21498+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21499+ .popsection
21500+ GET_CR3_INTO_RDI
21501+ cmp $1,%dil
21502+ jnz 4f
21503+ sub $4097,%rdi
21504+ bts $63,%rdi
21505+ SET_RDI_INTO_CR3
21506+ jmp 3f
21507+111:
21508+
21509+ GET_CR3_INTO_RDI
21510+ mov %rdi,%rbx
21511+ add $__START_KERNEL_map,%rbx
21512+ sub phys_base(%rip),%rbx
21513+
21514+#ifdef CONFIG_PARAVIRT
21515+ cmpl $0, pv_info+PARAVIRT_enabled
21516+ jz 1f
21517+ pushq %rdi
21518+ i = 0
21519+ .rept USER_PGD_PTRS
21520+ mov i*8(%rbx),%rsi
21521+ mov $0,%sil
21522+ lea i*8(%rbx),%rdi
21523+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
21524+ i = i + 1
21525+ .endr
21526+ popq %rdi
21527+ jmp 2f
21528+1:
21529+#endif
21530+
21531+ i = 0
21532+ .rept USER_PGD_PTRS
21533+ movb $0,i*8(%rbx)
21534+ i = i + 1
21535+ .endr
21536+
21537+2: SET_RDI_INTO_CR3
21538+
21539+#ifdef CONFIG_PAX_KERNEXEC
21540+ GET_CR0_INTO_RDI
21541+ bts $16,%rdi
21542+ SET_RDI_INTO_CR0
21543+#endif
21544+
21545+3:
21546+
21547+#ifdef CONFIG_PARAVIRT
21548+ PV_RESTORE_REGS(CLBR_RDI)
21549+#endif
21550+
21551+ popq %rbx
21552+ popq %rdi
21553+ pax_force_retaddr
21554+ retq
21555+4: ud2
21556+ENDPROC(pax_enter_kernel_user)
21557+
21558+ENTRY(pax_exit_kernel_user)
21559+ pushq %rdi
21560+ pushq %rbx
21561+
21562+#ifdef CONFIG_PARAVIRT
21563+ PV_SAVE_REGS(CLBR_RDI)
21564+#endif
21565+
21566+ GET_CR3_INTO_RDI
21567+ 661: jmp 1f
21568+ .pushsection .altinstr_replacement, "a"
21569+ 662: ASM_NOP2
21570+ .popsection
21571+ .pushsection .altinstructions, "a"
21572+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21573+ .popsection
21574+ cmp $0,%dil
21575+ jnz 3f
21576+ add $4097,%rdi
21577+ bts $63,%rdi
21578+ SET_RDI_INTO_CR3
21579+ jmp 2f
21580+1:
21581+
21582+ mov %rdi,%rbx
21583+
21584+#ifdef CONFIG_PAX_KERNEXEC
21585+ GET_CR0_INTO_RDI
21586+ btr $16,%rdi
21587+ jnc 3f
21588+ SET_RDI_INTO_CR0
21589+#endif
21590+
21591+ add $__START_KERNEL_map,%rbx
21592+ sub phys_base(%rip),%rbx
21593+
21594+#ifdef CONFIG_PARAVIRT
21595+ cmpl $0, pv_info+PARAVIRT_enabled
21596+ jz 1f
21597+ i = 0
21598+ .rept USER_PGD_PTRS
21599+ mov i*8(%rbx),%rsi
21600+ mov $0x67,%sil
21601+ lea i*8(%rbx),%rdi
21602+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
21603+ i = i + 1
21604+ .endr
21605+ jmp 2f
21606+1:
21607+#endif
21608+
21609+ i = 0
21610+ .rept USER_PGD_PTRS
21611+ movb $0x67,i*8(%rbx)
21612+ i = i + 1
21613+ .endr
21614+2:
21615+
21616+#ifdef CONFIG_PARAVIRT
21617+ PV_RESTORE_REGS(CLBR_RDI)
21618+#endif
21619+
21620+ popq %rbx
21621+ popq %rdi
21622+ pax_force_retaddr
21623+ retq
21624+3: ud2
21625+ENDPROC(pax_exit_kernel_user)
21626+#endif
21627+
21628+ .macro pax_enter_kernel_nmi
21629+ pax_set_fptr_mask
21630+
21631+#ifdef CONFIG_PAX_KERNEXEC
21632+ GET_CR0_INTO_RDI
21633+ bts $16,%rdi
21634+ jc 110f
21635+ SET_RDI_INTO_CR0
21636+ or $2,%ebx
21637+110:
21638+#endif
21639+
21640+#ifdef CONFIG_PAX_MEMORY_UDEREF
21641+ 661: jmp 111f
21642+ .pushsection .altinstr_replacement, "a"
21643+ 662: ASM_NOP2
21644+ .popsection
21645+ .pushsection .altinstructions, "a"
21646+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
21647+ .popsection
21648+ GET_CR3_INTO_RDI
21649+ cmp $0,%dil
21650+ jz 111f
21651+ sub $4097,%rdi
21652+ or $4,%ebx
21653+ bts $63,%rdi
21654+ SET_RDI_INTO_CR3
21655+ mov $__UDEREF_KERNEL_DS,%edi
21656+ mov %edi,%ss
21657+111:
21658+#endif
21659+ .endm
21660+
21661+ .macro pax_exit_kernel_nmi
21662+#ifdef CONFIG_PAX_KERNEXEC
21663+ btr $1,%ebx
21664+ jnc 110f
21665+ GET_CR0_INTO_RDI
21666+ btr $16,%rdi
21667+ SET_RDI_INTO_CR0
21668+110:
21669+#endif
21670+
21671+#ifdef CONFIG_PAX_MEMORY_UDEREF
21672+ btr $2,%ebx
21673+ jnc 111f
21674+ GET_CR3_INTO_RDI
21675+ add $4097,%rdi
21676+ bts $63,%rdi
21677+ SET_RDI_INTO_CR3
21678+ mov $__KERNEL_DS,%edi
21679+ mov %edi,%ss
21680+111:
21681+#endif
21682+ .endm
21683+
21684+ .macro pax_erase_kstack
21685+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21686+ call pax_erase_kstack
21687+#endif
21688+ .endm
21689+
21690+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21691+ENTRY(pax_erase_kstack)
21692+ pushq %rdi
21693+ pushq %rcx
21694+ pushq %rax
21695+ pushq %r11
21696+
21697+ GET_THREAD_INFO(%r11)
21698+ mov TI_lowest_stack(%r11), %rdi
21699+ mov $-0xBEEF, %rax
21700+ std
21701+
21702+1: mov %edi, %ecx
21703+ and $THREAD_SIZE_asm - 1, %ecx
21704+ shr $3, %ecx
21705+ repne scasq
21706+ jecxz 2f
21707+
21708+ cmp $2*8, %ecx
21709+ jc 2f
21710+
21711+ mov $2*8, %ecx
21712+ repe scasq
21713+ jecxz 2f
21714+ jne 1b
21715+
21716+2: cld
21717+ mov %esp, %ecx
21718+ sub %edi, %ecx
21719+
21720+ cmp $THREAD_SIZE_asm, %rcx
21721+ jb 3f
21722+ ud2
21723+3:
21724+
21725+ shr $3, %ecx
21726+ rep stosq
21727+
21728+ mov TI_task_thread_sp0(%r11), %rdi
21729+ sub $256, %rdi
21730+ mov %rdi, TI_lowest_stack(%r11)
21731+
21732+ popq %r11
21733+ popq %rax
21734+ popq %rcx
21735+ popq %rdi
21736+ pax_force_retaddr
21737+ ret
21738+ENDPROC(pax_erase_kstack)
21739+#endif
21740
21741 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
21742 #ifdef CONFIG_TRACE_IRQFLAGS
21743@@ -375,8 +808,8 @@ ENDPROC(native_usergs_sysret64)
21744 .endm
21745
21746 .macro UNFAKE_STACK_FRAME
21747- addq $8*6, %rsp
21748- CFI_ADJUST_CFA_OFFSET -(6*8)
21749+ addq $8*6 + ARG_SKIP, %rsp
21750+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
21751 .endm
21752
21753 /*
21754@@ -463,7 +896,7 @@ ENDPROC(native_usergs_sysret64)
21755 movq %rsp, %rsi
21756
21757 leaq -RBP(%rsp),%rdi /* arg1 for handler */
21758- testl $3, CS-RBP(%rsi)
21759+ testb $3, CS-RBP(%rsi)
21760 je 1f
21761 SWAPGS
21762 /*
21763@@ -498,9 +931,10 @@ ENTRY(save_rest)
21764 movq_cfi r15, R15+16
21765 movq %r11, 8(%rsp) /* return address */
21766 FIXUP_TOP_OF_STACK %r11, 16
21767+ pax_force_retaddr
21768 ret
21769 CFI_ENDPROC
21770-END(save_rest)
21771+ENDPROC(save_rest)
21772
21773 /* save complete stack frame */
21774 .pushsection .kprobes.text, "ax"
21775@@ -529,9 +963,10 @@ ENTRY(save_paranoid)
21776 js 1f /* negative -> in kernel */
21777 SWAPGS
21778 xorl %ebx,%ebx
21779-1: ret
21780+1: pax_force_retaddr_bts
21781+ ret
21782 CFI_ENDPROC
21783-END(save_paranoid)
21784+ENDPROC(save_paranoid)
21785 .popsection
21786
21787 /*
21788@@ -553,7 +988,7 @@ ENTRY(ret_from_fork)
21789
21790 RESTORE_REST
21791
21792- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21793+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21794 jz 1f
21795
21796 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
21797@@ -571,7 +1006,7 @@ ENTRY(ret_from_fork)
21798 RESTORE_REST
21799 jmp int_ret_from_sys_call
21800 CFI_ENDPROC
21801-END(ret_from_fork)
21802+ENDPROC(ret_from_fork)
21803
21804 /*
21805 * System call entry. Up to 6 arguments in registers are supported.
21806@@ -608,7 +1043,7 @@ END(ret_from_fork)
21807 ENTRY(system_call)
21808 CFI_STARTPROC simple
21809 CFI_SIGNAL_FRAME
21810- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
21811+ CFI_DEF_CFA rsp,0
21812 CFI_REGISTER rip,rcx
21813 /*CFI_REGISTER rflags,r11*/
21814 SWAPGS_UNSAFE_STACK
21815@@ -621,16 +1056,23 @@ GLOBAL(system_call_after_swapgs)
21816
21817 movq %rsp,PER_CPU_VAR(old_rsp)
21818 movq PER_CPU_VAR(kernel_stack),%rsp
21819+ SAVE_ARGS 8*6,0
21820+ pax_enter_kernel_user
21821+
21822+#ifdef CONFIG_PAX_RANDKSTACK
21823+ pax_erase_kstack
21824+#endif
21825+
21826 /*
21827 * No need to follow this irqs off/on section - it's straight
21828 * and short:
21829 */
21830 ENABLE_INTERRUPTS(CLBR_NONE)
21831- SAVE_ARGS 8,0
21832 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
21833 movq %rcx,RIP-ARGOFFSET(%rsp)
21834 CFI_REL_OFFSET rip,RIP-ARGOFFSET
21835- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21836+ GET_THREAD_INFO(%rcx)
21837+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
21838 jnz tracesys
21839 system_call_fastpath:
21840 #if __SYSCALL_MASK == ~0
21841@@ -640,7 +1082,7 @@ system_call_fastpath:
21842 cmpl $__NR_syscall_max,%eax
21843 #endif
21844 ja badsys
21845- movq %r10,%rcx
21846+ movq R10-ARGOFFSET(%rsp),%rcx
21847 call *sys_call_table(,%rax,8) # XXX: rip relative
21848 movq %rax,RAX-ARGOFFSET(%rsp)
21849 /*
21850@@ -654,10 +1096,13 @@ sysret_check:
21851 LOCKDEP_SYS_EXIT
21852 DISABLE_INTERRUPTS(CLBR_NONE)
21853 TRACE_IRQS_OFF
21854- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
21855+ GET_THREAD_INFO(%rcx)
21856+ movl TI_flags(%rcx),%edx
21857 andl %edi,%edx
21858 jnz sysret_careful
21859 CFI_REMEMBER_STATE
21860+ pax_exit_kernel_user
21861+ pax_erase_kstack
21862 /*
21863 * sysretq will re-enable interrupts:
21864 */
21865@@ -709,14 +1154,18 @@ badsys:
21866 * jump back to the normal fast path.
21867 */
21868 auditsys:
21869- movq %r10,%r9 /* 6th arg: 4th syscall arg */
21870+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
21871 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
21872 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
21873 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
21874 movq %rax,%rsi /* 2nd arg: syscall number */
21875 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
21876 call __audit_syscall_entry
21877+
21878+ pax_erase_kstack
21879+
21880 LOAD_ARGS 0 /* reload call-clobbered registers */
21881+ pax_set_fptr_mask
21882 jmp system_call_fastpath
21883
21884 /*
21885@@ -737,7 +1186,7 @@ sysret_audit:
21886 /* Do syscall tracing */
21887 tracesys:
21888 #ifdef CONFIG_AUDITSYSCALL
21889- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21890+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
21891 jz auditsys
21892 #endif
21893 SAVE_REST
21894@@ -745,12 +1194,16 @@ tracesys:
21895 FIXUP_TOP_OF_STACK %rdi
21896 movq %rsp,%rdi
21897 call syscall_trace_enter
21898+
21899+ pax_erase_kstack
21900+
21901 /*
21902 * Reload arg registers from stack in case ptrace changed them.
21903 * We don't reload %rax because syscall_trace_enter() returned
21904 * the value it wants us to use in the table lookup.
21905 */
21906 LOAD_ARGS ARGOFFSET, 1
21907+ pax_set_fptr_mask
21908 RESTORE_REST
21909 #if __SYSCALL_MASK == ~0
21910 cmpq $__NR_syscall_max,%rax
21911@@ -759,7 +1212,7 @@ tracesys:
21912 cmpl $__NR_syscall_max,%eax
21913 #endif
21914 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
21915- movq %r10,%rcx /* fixup for C */
21916+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
21917 call *sys_call_table(,%rax,8)
21918 movq %rax,RAX-ARGOFFSET(%rsp)
21919 /* Use IRET because user could have changed frame */
21920@@ -780,7 +1233,9 @@ GLOBAL(int_with_check)
21921 andl %edi,%edx
21922 jnz int_careful
21923 andl $~TS_COMPAT,TI_status(%rcx)
21924- jmp retint_swapgs
21925+ pax_exit_kernel_user
21926+ pax_erase_kstack
21927+ jmp retint_swapgs_pax
21928
21929 /* Either reschedule or signal or syscall exit tracking needed. */
21930 /* First do a reschedule test. */
21931@@ -826,7 +1281,7 @@ int_restore_rest:
21932 TRACE_IRQS_OFF
21933 jmp int_with_check
21934 CFI_ENDPROC
21935-END(system_call)
21936+ENDPROC(system_call)
21937
21938 .macro FORK_LIKE func
21939 ENTRY(stub_\func)
21940@@ -839,9 +1294,10 @@ ENTRY(stub_\func)
21941 DEFAULT_FRAME 0 8 /* offset 8: return address */
21942 call sys_\func
21943 RESTORE_TOP_OF_STACK %r11, 8
21944+ pax_force_retaddr
21945 ret $REST_SKIP /* pop extended registers */
21946 CFI_ENDPROC
21947-END(stub_\func)
21948+ENDPROC(stub_\func)
21949 .endm
21950
21951 .macro FIXED_FRAME label,func
21952@@ -851,9 +1307,10 @@ ENTRY(\label)
21953 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
21954 call \func
21955 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
21956+ pax_force_retaddr
21957 ret
21958 CFI_ENDPROC
21959-END(\label)
21960+ENDPROC(\label)
21961 .endm
21962
21963 FORK_LIKE clone
21964@@ -870,9 +1327,10 @@ ENTRY(ptregscall_common)
21965 movq_cfi_restore R12+8, r12
21966 movq_cfi_restore RBP+8, rbp
21967 movq_cfi_restore RBX+8, rbx
21968+ pax_force_retaddr
21969 ret $REST_SKIP /* pop extended registers */
21970 CFI_ENDPROC
21971-END(ptregscall_common)
21972+ENDPROC(ptregscall_common)
21973
21974 ENTRY(stub_execve)
21975 CFI_STARTPROC
21976@@ -885,7 +1343,7 @@ ENTRY(stub_execve)
21977 RESTORE_REST
21978 jmp int_ret_from_sys_call
21979 CFI_ENDPROC
21980-END(stub_execve)
21981+ENDPROC(stub_execve)
21982
21983 /*
21984 * sigreturn is special because it needs to restore all registers on return.
21985@@ -902,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
21986 RESTORE_REST
21987 jmp int_ret_from_sys_call
21988 CFI_ENDPROC
21989-END(stub_rt_sigreturn)
21990+ENDPROC(stub_rt_sigreturn)
21991
21992 #ifdef CONFIG_X86_X32_ABI
21993 ENTRY(stub_x32_rt_sigreturn)
21994@@ -916,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
21995 RESTORE_REST
21996 jmp int_ret_from_sys_call
21997 CFI_ENDPROC
21998-END(stub_x32_rt_sigreturn)
21999+ENDPROC(stub_x32_rt_sigreturn)
22000
22001 ENTRY(stub_x32_execve)
22002 CFI_STARTPROC
22003@@ -930,7 +1388,7 @@ ENTRY(stub_x32_execve)
22004 RESTORE_REST
22005 jmp int_ret_from_sys_call
22006 CFI_ENDPROC
22007-END(stub_x32_execve)
22008+ENDPROC(stub_x32_execve)
22009
22010 #endif
22011
22012@@ -967,7 +1425,7 @@ vector=vector+1
22013 2: jmp common_interrupt
22014 .endr
22015 CFI_ENDPROC
22016-END(irq_entries_start)
22017+ENDPROC(irq_entries_start)
22018
22019 .previous
22020 END(interrupt)
22021@@ -987,6 +1445,16 @@ END(interrupt)
22022 subq $ORIG_RAX-RBP, %rsp
22023 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
22024 SAVE_ARGS_IRQ
22025+#ifdef CONFIG_PAX_MEMORY_UDEREF
22026+ testb $3, CS(%rdi)
22027+ jnz 1f
22028+ pax_enter_kernel
22029+ jmp 2f
22030+1: pax_enter_kernel_user
22031+2:
22032+#else
22033+ pax_enter_kernel
22034+#endif
22035 call \func
22036 .endm
22037
22038@@ -1019,7 +1487,7 @@ ret_from_intr:
22039
22040 exit_intr:
22041 GET_THREAD_INFO(%rcx)
22042- testl $3,CS-ARGOFFSET(%rsp)
22043+ testb $3,CS-ARGOFFSET(%rsp)
22044 je retint_kernel
22045
22046 /* Interrupt came from user space */
22047@@ -1041,12 +1509,16 @@ retint_swapgs: /* return to user-space */
22048 * The iretq could re-enable interrupts:
22049 */
22050 DISABLE_INTERRUPTS(CLBR_ANY)
22051+ pax_exit_kernel_user
22052+retint_swapgs_pax:
22053 TRACE_IRQS_IRETQ
22054 SWAPGS
22055 jmp restore_args
22056
22057 retint_restore_args: /* return to kernel space */
22058 DISABLE_INTERRUPTS(CLBR_ANY)
22059+ pax_exit_kernel
22060+ pax_force_retaddr (RIP-ARGOFFSET)
22061 /*
22062 * The iretq could re-enable interrupts:
22063 */
22064@@ -1129,7 +1601,7 @@ ENTRY(retint_kernel)
22065 #endif
22066
22067 CFI_ENDPROC
22068-END(common_interrupt)
22069+ENDPROC(common_interrupt)
22070 /*
22071 * End of kprobes section
22072 */
22073@@ -1147,7 +1619,7 @@ ENTRY(\sym)
22074 interrupt \do_sym
22075 jmp ret_from_intr
22076 CFI_ENDPROC
22077-END(\sym)
22078+ENDPROC(\sym)
22079 .endm
22080
22081 #ifdef CONFIG_SMP
22082@@ -1208,12 +1680,22 @@ ENTRY(\sym)
22083 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22084 call error_entry
22085 DEFAULT_FRAME 0
22086+#ifdef CONFIG_PAX_MEMORY_UDEREF
22087+ testb $3, CS(%rsp)
22088+ jnz 1f
22089+ pax_enter_kernel
22090+ jmp 2f
22091+1: pax_enter_kernel_user
22092+2:
22093+#else
22094+ pax_enter_kernel
22095+#endif
22096 movq %rsp,%rdi /* pt_regs pointer */
22097 xorl %esi,%esi /* no error code */
22098 call \do_sym
22099 jmp error_exit /* %ebx: no swapgs flag */
22100 CFI_ENDPROC
22101-END(\sym)
22102+ENDPROC(\sym)
22103 .endm
22104
22105 .macro paranoidzeroentry sym do_sym
22106@@ -1226,15 +1708,25 @@ ENTRY(\sym)
22107 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22108 call save_paranoid
22109 TRACE_IRQS_OFF
22110+#ifdef CONFIG_PAX_MEMORY_UDEREF
22111+ testb $3, CS(%rsp)
22112+ jnz 1f
22113+ pax_enter_kernel
22114+ jmp 2f
22115+1: pax_enter_kernel_user
22116+2:
22117+#else
22118+ pax_enter_kernel
22119+#endif
22120 movq %rsp,%rdi /* pt_regs pointer */
22121 xorl %esi,%esi /* no error code */
22122 call \do_sym
22123 jmp paranoid_exit /* %ebx: no swapgs flag */
22124 CFI_ENDPROC
22125-END(\sym)
22126+ENDPROC(\sym)
22127 .endm
22128
22129-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
22130+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
22131 .macro paranoidzeroentry_ist sym do_sym ist
22132 ENTRY(\sym)
22133 INTR_FRAME
22134@@ -1245,14 +1737,30 @@ ENTRY(\sym)
22135 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22136 call save_paranoid
22137 TRACE_IRQS_OFF_DEBUG
22138+#ifdef CONFIG_PAX_MEMORY_UDEREF
22139+ testb $3, CS(%rsp)
22140+ jnz 1f
22141+ pax_enter_kernel
22142+ jmp 2f
22143+1: pax_enter_kernel_user
22144+2:
22145+#else
22146+ pax_enter_kernel
22147+#endif
22148 movq %rsp,%rdi /* pt_regs pointer */
22149 xorl %esi,%esi /* no error code */
22150+#ifdef CONFIG_SMP
22151+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
22152+ lea init_tss(%r12), %r12
22153+#else
22154+ lea init_tss(%rip), %r12
22155+#endif
22156 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
22157 call \do_sym
22158 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
22159 jmp paranoid_exit /* %ebx: no swapgs flag */
22160 CFI_ENDPROC
22161-END(\sym)
22162+ENDPROC(\sym)
22163 .endm
22164
22165 .macro errorentry sym do_sym
22166@@ -1264,13 +1772,23 @@ ENTRY(\sym)
22167 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22168 call error_entry
22169 DEFAULT_FRAME 0
22170+#ifdef CONFIG_PAX_MEMORY_UDEREF
22171+ testb $3, CS(%rsp)
22172+ jnz 1f
22173+ pax_enter_kernel
22174+ jmp 2f
22175+1: pax_enter_kernel_user
22176+2:
22177+#else
22178+ pax_enter_kernel
22179+#endif
22180 movq %rsp,%rdi /* pt_regs pointer */
22181 movq ORIG_RAX(%rsp),%rsi /* get error code */
22182 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
22183 call \do_sym
22184 jmp error_exit /* %ebx: no swapgs flag */
22185 CFI_ENDPROC
22186-END(\sym)
22187+ENDPROC(\sym)
22188 .endm
22189
22190 /* error code is on the stack already */
22191@@ -1284,13 +1802,23 @@ ENTRY(\sym)
22192 call save_paranoid
22193 DEFAULT_FRAME 0
22194 TRACE_IRQS_OFF
22195+#ifdef CONFIG_PAX_MEMORY_UDEREF
22196+ testb $3, CS(%rsp)
22197+ jnz 1f
22198+ pax_enter_kernel
22199+ jmp 2f
22200+1: pax_enter_kernel_user
22201+2:
22202+#else
22203+ pax_enter_kernel
22204+#endif
22205 movq %rsp,%rdi /* pt_regs pointer */
22206 movq ORIG_RAX(%rsp),%rsi /* get error code */
22207 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
22208 call \do_sym
22209 jmp paranoid_exit /* %ebx: no swapgs flag */
22210 CFI_ENDPROC
22211-END(\sym)
22212+ENDPROC(\sym)
22213 .endm
22214
22215 zeroentry divide_error do_divide_error
22216@@ -1320,9 +1848,10 @@ gs_change:
22217 2: mfence /* workaround */
22218 SWAPGS
22219 popfq_cfi
22220+ pax_force_retaddr
22221 ret
22222 CFI_ENDPROC
22223-END(native_load_gs_index)
22224+ENDPROC(native_load_gs_index)
22225
22226 _ASM_EXTABLE(gs_change,bad_gs)
22227 .section .fixup,"ax"
22228@@ -1350,9 +1879,10 @@ ENTRY(call_softirq)
22229 CFI_DEF_CFA_REGISTER rsp
22230 CFI_ADJUST_CFA_OFFSET -8
22231 decl PER_CPU_VAR(irq_count)
22232+ pax_force_retaddr
22233 ret
22234 CFI_ENDPROC
22235-END(call_softirq)
22236+ENDPROC(call_softirq)
22237
22238 #ifdef CONFIG_XEN
22239 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
22240@@ -1390,7 +1920,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
22241 decl PER_CPU_VAR(irq_count)
22242 jmp error_exit
22243 CFI_ENDPROC
22244-END(xen_do_hypervisor_callback)
22245+ENDPROC(xen_do_hypervisor_callback)
22246
22247 /*
22248 * Hypervisor uses this for application faults while it executes.
22249@@ -1449,7 +1979,7 @@ ENTRY(xen_failsafe_callback)
22250 SAVE_ALL
22251 jmp error_exit
22252 CFI_ENDPROC
22253-END(xen_failsafe_callback)
22254+ENDPROC(xen_failsafe_callback)
22255
22256 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
22257 xen_hvm_callback_vector xen_evtchn_do_upcall
22258@@ -1501,18 +2031,33 @@ ENTRY(paranoid_exit)
22259 DEFAULT_FRAME
22260 DISABLE_INTERRUPTS(CLBR_NONE)
22261 TRACE_IRQS_OFF_DEBUG
22262- testl %ebx,%ebx /* swapgs needed? */
22263+ testl $1,%ebx /* swapgs needed? */
22264 jnz paranoid_restore
22265- testl $3,CS(%rsp)
22266+ testb $3,CS(%rsp)
22267 jnz paranoid_userspace
22268+#ifdef CONFIG_PAX_MEMORY_UDEREF
22269+ pax_exit_kernel
22270+ TRACE_IRQS_IRETQ 0
22271+ SWAPGS_UNSAFE_STACK
22272+ RESTORE_ALL 8
22273+ pax_force_retaddr_bts
22274+ jmp irq_return
22275+#endif
22276 paranoid_swapgs:
22277+#ifdef CONFIG_PAX_MEMORY_UDEREF
22278+ pax_exit_kernel_user
22279+#else
22280+ pax_exit_kernel
22281+#endif
22282 TRACE_IRQS_IRETQ 0
22283 SWAPGS_UNSAFE_STACK
22284 RESTORE_ALL 8
22285 jmp irq_return
22286 paranoid_restore:
22287+ pax_exit_kernel
22288 TRACE_IRQS_IRETQ_DEBUG 0
22289 RESTORE_ALL 8
22290+ pax_force_retaddr_bts
22291 jmp irq_return
22292 paranoid_userspace:
22293 GET_THREAD_INFO(%rcx)
22294@@ -1541,7 +2086,7 @@ paranoid_schedule:
22295 TRACE_IRQS_OFF
22296 jmp paranoid_userspace
22297 CFI_ENDPROC
22298-END(paranoid_exit)
22299+ENDPROC(paranoid_exit)
22300
22301 /*
22302 * Exception entry point. This expects an error code/orig_rax on the stack.
22303@@ -1568,12 +2113,13 @@ ENTRY(error_entry)
22304 movq_cfi r14, R14+8
22305 movq_cfi r15, R15+8
22306 xorl %ebx,%ebx
22307- testl $3,CS+8(%rsp)
22308+ testb $3,CS+8(%rsp)
22309 je error_kernelspace
22310 error_swapgs:
22311 SWAPGS
22312 error_sti:
22313 TRACE_IRQS_OFF
22314+ pax_force_retaddr_bts
22315 ret
22316
22317 /*
22318@@ -1600,7 +2146,7 @@ bstep_iret:
22319 movq %rcx,RIP+8(%rsp)
22320 jmp error_swapgs
22321 CFI_ENDPROC
22322-END(error_entry)
22323+ENDPROC(error_entry)
22324
22325
22326 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
22327@@ -1611,7 +2157,7 @@ ENTRY(error_exit)
22328 DISABLE_INTERRUPTS(CLBR_NONE)
22329 TRACE_IRQS_OFF
22330 GET_THREAD_INFO(%rcx)
22331- testl %eax,%eax
22332+ testl $1,%eax
22333 jne retint_kernel
22334 LOCKDEP_SYS_EXIT_IRQ
22335 movl TI_flags(%rcx),%edx
22336@@ -1620,7 +2166,7 @@ ENTRY(error_exit)
22337 jnz retint_careful
22338 jmp retint_swapgs
22339 CFI_ENDPROC
22340-END(error_exit)
22341+ENDPROC(error_exit)
22342
22343 /*
22344 * Test if a given stack is an NMI stack or not.
22345@@ -1678,9 +2224,11 @@ ENTRY(nmi)
22346 * If %cs was not the kernel segment, then the NMI triggered in user
22347 * space, which means it is definitely not nested.
22348 */
22349+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
22350+ je 1f
22351 cmpl $__KERNEL_CS, 16(%rsp)
22352 jne first_nmi
22353-
22354+1:
22355 /*
22356 * Check the special variable on the stack to see if NMIs are
22357 * executing.
22358@@ -1714,8 +2262,7 @@ nested_nmi:
22359
22360 1:
22361 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
22362- leaq -1*8(%rsp), %rdx
22363- movq %rdx, %rsp
22364+ subq $8, %rsp
22365 CFI_ADJUST_CFA_OFFSET 1*8
22366 leaq -10*8(%rsp), %rdx
22367 pushq_cfi $__KERNEL_DS
22368@@ -1733,6 +2280,7 @@ nested_nmi_out:
22369 CFI_RESTORE rdx
22370
22371 /* No need to check faults here */
22372+# pax_force_retaddr_bts
22373 INTERRUPT_RETURN
22374
22375 CFI_RESTORE_STATE
22376@@ -1849,6 +2397,8 @@ end_repeat_nmi:
22377 */
22378 movq %cr2, %r12
22379
22380+ pax_enter_kernel_nmi
22381+
22382 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
22383 movq %rsp,%rdi
22384 movq $-1,%rsi
22385@@ -1861,26 +2411,31 @@ end_repeat_nmi:
22386 movq %r12, %cr2
22387 1:
22388
22389- testl %ebx,%ebx /* swapgs needed? */
22390+ testl $1,%ebx /* swapgs needed? */
22391 jnz nmi_restore
22392 nmi_swapgs:
22393 SWAPGS_UNSAFE_STACK
22394 nmi_restore:
22395+ pax_exit_kernel_nmi
22396 /* Pop the extra iret frame at once */
22397 RESTORE_ALL 6*8
22398+ testb $3, 8(%rsp)
22399+ jnz 1f
22400+ pax_force_retaddr_bts
22401+1:
22402
22403 /* Clear the NMI executing stack variable */
22404 movq $0, 5*8(%rsp)
22405 jmp irq_return
22406 CFI_ENDPROC
22407-END(nmi)
22408+ENDPROC(nmi)
22409
22410 ENTRY(ignore_sysret)
22411 CFI_STARTPROC
22412 mov $-ENOSYS,%eax
22413 sysret
22414 CFI_ENDPROC
22415-END(ignore_sysret)
22416+ENDPROC(ignore_sysret)
22417
22418 /*
22419 * End of kprobes section
22420diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
22421index 42a392a..fbbd930 100644
22422--- a/arch/x86/kernel/ftrace.c
22423+++ b/arch/x86/kernel/ftrace.c
22424@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
22425 {
22426 unsigned char replaced[MCOUNT_INSN_SIZE];
22427
22428+ ip = ktla_ktva(ip);
22429+
22430 /*
22431 * Note: Due to modules and __init, code can
22432 * disappear and change, we need to protect against faulting
22433@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
22434 unsigned char old[MCOUNT_INSN_SIZE], *new;
22435 int ret;
22436
22437- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
22438+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
22439 new = ftrace_call_replace(ip, (unsigned long)func);
22440
22441 /* See comment above by declaration of modifying_ftrace_code */
22442@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
22443 /* Also update the regs callback function */
22444 if (!ret) {
22445 ip = (unsigned long)(&ftrace_regs_call);
22446- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
22447+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
22448 new = ftrace_call_replace(ip, (unsigned long)func);
22449 ret = ftrace_modify_code(ip, old, new);
22450 }
22451@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
22452 * kernel identity mapping to modify code.
22453 */
22454 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
22455- ip = (unsigned long)__va(__pa_symbol(ip));
22456+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
22457
22458 return probe_kernel_write((void *)ip, val, size);
22459 }
22460@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
22461 unsigned char replaced[MCOUNT_INSN_SIZE];
22462 unsigned char brk = BREAKPOINT_INSTRUCTION;
22463
22464- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
22465+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
22466 return -EFAULT;
22467
22468 /* Make sure it is what we expect it to be */
22469@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
22470 return ret;
22471
22472 fail_update:
22473- probe_kernel_write((void *)ip, &old_code[0], 1);
22474+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
22475 goto out;
22476 }
22477
22478@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
22479 {
22480 unsigned char code[MCOUNT_INSN_SIZE];
22481
22482+ ip = ktla_ktva(ip);
22483+
22484 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
22485 return -EFAULT;
22486
22487diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
22488index 55b6761..a6456fc 100644
22489--- a/arch/x86/kernel/head64.c
22490+++ b/arch/x86/kernel/head64.c
22491@@ -67,12 +67,12 @@ again:
22492 pgd = *pgd_p;
22493
22494 /*
22495- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
22496- * critical -- __PAGE_OFFSET would point us back into the dynamic
22497+ * The use of __early_va rather than __va here is critical:
22498+ * __va would point us back into the dynamic
22499 * range and we might end up looping forever...
22500 */
22501 if (pgd)
22502- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
22503+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
22504 else {
22505 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
22506 reset_early_page_tables();
22507@@ -82,13 +82,13 @@ again:
22508 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
22509 for (i = 0; i < PTRS_PER_PUD; i++)
22510 pud_p[i] = 0;
22511- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
22512+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
22513 }
22514 pud_p += pud_index(address);
22515 pud = *pud_p;
22516
22517 if (pud)
22518- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
22519+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
22520 else {
22521 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
22522 reset_early_page_tables();
22523@@ -98,7 +98,7 @@ again:
22524 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
22525 for (i = 0; i < PTRS_PER_PMD; i++)
22526 pmd_p[i] = 0;
22527- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
22528+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
22529 }
22530 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
22531 pmd_p[pmd_index(address)] = pmd;
22532@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
22533 if (console_loglevel == 10)
22534 early_printk("Kernel alive\n");
22535
22536- clear_page(init_level4_pgt);
22537 /* set init_level4_pgt kernel high mapping*/
22538 init_level4_pgt[511] = early_level4_pgt[511];
22539
22540diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
22541index 73afd11..0ef46f2 100644
22542--- a/arch/x86/kernel/head_32.S
22543+++ b/arch/x86/kernel/head_32.S
22544@@ -26,6 +26,12 @@
22545 /* Physical address */
22546 #define pa(X) ((X) - __PAGE_OFFSET)
22547
22548+#ifdef CONFIG_PAX_KERNEXEC
22549+#define ta(X) (X)
22550+#else
22551+#define ta(X) ((X) - __PAGE_OFFSET)
22552+#endif
22553+
22554 /*
22555 * References to members of the new_cpu_data structure.
22556 */
22557@@ -55,11 +61,7 @@
22558 * and small than max_low_pfn, otherwise will waste some page table entries
22559 */
22560
22561-#if PTRS_PER_PMD > 1
22562-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
22563-#else
22564-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
22565-#endif
22566+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
22567
22568 /* Number of possible pages in the lowmem region */
22569 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
22570@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
22571 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
22572
22573 /*
22574+ * Real beginning of normal "text" segment
22575+ */
22576+ENTRY(stext)
22577+ENTRY(_stext)
22578+
22579+/*
22580 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
22581 * %esi points to the real-mode code as a 32-bit pointer.
22582 * CS and DS must be 4 GB flat segments, but we don't depend on
22583@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
22584 * can.
22585 */
22586 __HEAD
22587+
22588+#ifdef CONFIG_PAX_KERNEXEC
22589+ jmp startup_32
22590+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
22591+.fill PAGE_SIZE-5,1,0xcc
22592+#endif
22593+
22594 ENTRY(startup_32)
22595 movl pa(stack_start),%ecx
22596
22597@@ -106,6 +121,59 @@ ENTRY(startup_32)
22598 2:
22599 leal -__PAGE_OFFSET(%ecx),%esp
22600
22601+#ifdef CONFIG_SMP
22602+ movl $pa(cpu_gdt_table),%edi
22603+ movl $__per_cpu_load,%eax
22604+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
22605+ rorl $16,%eax
22606+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
22607+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
22608+ movl $__per_cpu_end - 1,%eax
22609+ subl $__per_cpu_start,%eax
22610+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
22611+#endif
22612+
22613+#ifdef CONFIG_PAX_MEMORY_UDEREF
22614+ movl $NR_CPUS,%ecx
22615+ movl $pa(cpu_gdt_table),%edi
22616+1:
22617+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
22618+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
22619+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
22620+ addl $PAGE_SIZE_asm,%edi
22621+ loop 1b
22622+#endif
22623+
22624+#ifdef CONFIG_PAX_KERNEXEC
22625+ movl $pa(boot_gdt),%edi
22626+ movl $__LOAD_PHYSICAL_ADDR,%eax
22627+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
22628+ rorl $16,%eax
22629+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
22630+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
22631+ rorl $16,%eax
22632+
22633+ ljmp $(__BOOT_CS),$1f
22634+1:
22635+
22636+ movl $NR_CPUS,%ecx
22637+ movl $pa(cpu_gdt_table),%edi
22638+ addl $__PAGE_OFFSET,%eax
22639+1:
22640+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
22641+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
22642+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
22643+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
22644+ rorl $16,%eax
22645+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
22646+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
22647+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
22648+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
22649+ rorl $16,%eax
22650+ addl $PAGE_SIZE_asm,%edi
22651+ loop 1b
22652+#endif
22653+
22654 /*
22655 * Clear BSS first so that there are no surprises...
22656 */
22657@@ -201,8 +269,11 @@ ENTRY(startup_32)
22658 movl %eax, pa(max_pfn_mapped)
22659
22660 /* Do early initialization of the fixmap area */
22661- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
22662- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
22663+#ifdef CONFIG_COMPAT_VDSO
22664+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
22665+#else
22666+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
22667+#endif
22668 #else /* Not PAE */
22669
22670 page_pde_offset = (__PAGE_OFFSET >> 20);
22671@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
22672 movl %eax, pa(max_pfn_mapped)
22673
22674 /* Do early initialization of the fixmap area */
22675- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
22676- movl %eax,pa(initial_page_table+0xffc)
22677+#ifdef CONFIG_COMPAT_VDSO
22678+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
22679+#else
22680+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
22681+#endif
22682 #endif
22683
22684 #ifdef CONFIG_PARAVIRT
22685@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
22686 cmpl $num_subarch_entries, %eax
22687 jae bad_subarch
22688
22689- movl pa(subarch_entries)(,%eax,4), %eax
22690- subl $__PAGE_OFFSET, %eax
22691- jmp *%eax
22692+ jmp *pa(subarch_entries)(,%eax,4)
22693
22694 bad_subarch:
22695 WEAK(lguest_entry)
22696@@ -261,10 +333,10 @@ WEAK(xen_entry)
22697 __INITDATA
22698
22699 subarch_entries:
22700- .long default_entry /* normal x86/PC */
22701- .long lguest_entry /* lguest hypervisor */
22702- .long xen_entry /* Xen hypervisor */
22703- .long default_entry /* Moorestown MID */
22704+ .long ta(default_entry) /* normal x86/PC */
22705+ .long ta(lguest_entry) /* lguest hypervisor */
22706+ .long ta(xen_entry) /* Xen hypervisor */
22707+ .long ta(default_entry) /* Moorestown MID */
22708 num_subarch_entries = (. - subarch_entries) / 4
22709 .previous
22710 #else
22711@@ -355,6 +427,7 @@ default_entry:
22712 movl pa(mmu_cr4_features),%eax
22713 movl %eax,%cr4
22714
22715+#ifdef CONFIG_X86_PAE
22716 testb $X86_CR4_PAE, %al # check if PAE is enabled
22717 jz enable_paging
22718
22719@@ -383,6 +456,9 @@ default_entry:
22720 /* Make changes effective */
22721 wrmsr
22722
22723+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
22724+#endif
22725+
22726 enable_paging:
22727
22728 /*
22729@@ -451,14 +527,20 @@ is486:
22730 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
22731 movl %eax,%ss # after changing gdt.
22732
22733- movl $(__USER_DS),%eax # DS/ES contains default USER segment
22734+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
22735 movl %eax,%ds
22736 movl %eax,%es
22737
22738 movl $(__KERNEL_PERCPU), %eax
22739 movl %eax,%fs # set this cpu's percpu
22740
22741+#ifdef CONFIG_CC_STACKPROTECTOR
22742 movl $(__KERNEL_STACK_CANARY),%eax
22743+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22744+ movl $(__USER_DS),%eax
22745+#else
22746+ xorl %eax,%eax
22747+#endif
22748 movl %eax,%gs
22749
22750 xorl %eax,%eax # Clear LDT
22751@@ -534,8 +616,11 @@ setup_once:
22752 * relocation. Manually set base address in stack canary
22753 * segment descriptor.
22754 */
22755- movl $gdt_page,%eax
22756+ movl $cpu_gdt_table,%eax
22757 movl $stack_canary,%ecx
22758+#ifdef CONFIG_SMP
22759+ addl $__per_cpu_load,%ecx
22760+#endif
22761 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
22762 shrl $16, %ecx
22763 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
22764@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
22765 /* This is global to keep gas from relaxing the jumps */
22766 ENTRY(early_idt_handler)
22767 cld
22768- cmpl $2,%ss:early_recursion_flag
22769+ cmpl $1,%ss:early_recursion_flag
22770 je hlt_loop
22771 incl %ss:early_recursion_flag
22772
22773@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
22774 pushl (20+6*4)(%esp) /* trapno */
22775 pushl $fault_msg
22776 call printk
22777-#endif
22778 call dump_stack
22779+#endif
22780 hlt_loop:
22781 hlt
22782 jmp hlt_loop
22783@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
22784 /* This is the default interrupt "handler" :-) */
22785 ALIGN
22786 ignore_int:
22787- cld
22788 #ifdef CONFIG_PRINTK
22789+ cmpl $2,%ss:early_recursion_flag
22790+ je hlt_loop
22791+ incl %ss:early_recursion_flag
22792+ cld
22793 pushl %eax
22794 pushl %ecx
22795 pushl %edx
22796@@ -634,9 +722,6 @@ ignore_int:
22797 movl $(__KERNEL_DS),%eax
22798 movl %eax,%ds
22799 movl %eax,%es
22800- cmpl $2,early_recursion_flag
22801- je hlt_loop
22802- incl early_recursion_flag
22803 pushl 16(%esp)
22804 pushl 24(%esp)
22805 pushl 32(%esp)
22806@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
22807 /*
22808 * BSS section
22809 */
22810-__PAGE_ALIGNED_BSS
22811- .align PAGE_SIZE
22812 #ifdef CONFIG_X86_PAE
22813+.section .initial_pg_pmd,"a",@progbits
22814 initial_pg_pmd:
22815 .fill 1024*KPMDS,4,0
22816 #else
22817+.section .initial_page_table,"a",@progbits
22818 ENTRY(initial_page_table)
22819 .fill 1024,4,0
22820 #endif
22821+.section .initial_pg_fixmap,"a",@progbits
22822 initial_pg_fixmap:
22823 .fill 1024,4,0
22824+.section .empty_zero_page,"a",@progbits
22825 ENTRY(empty_zero_page)
22826 .fill 4096,1,0
22827+.section .swapper_pg_dir,"a",@progbits
22828 ENTRY(swapper_pg_dir)
22829+#ifdef CONFIG_X86_PAE
22830+ .fill 4,8,0
22831+#else
22832 .fill 1024,4,0
22833+#endif
22834+
22835+/*
22836+ * The IDT has to be page-aligned to simplify the Pentium
22837+ * F0 0F bug workaround.. We have a special link segment
22838+ * for this.
22839+ */
22840+.section .idt,"a",@progbits
22841+ENTRY(idt_table)
22842+ .fill 256,8,0
22843
22844 /*
22845 * This starts the data section.
22846 */
22847 #ifdef CONFIG_X86_PAE
22848-__PAGE_ALIGNED_DATA
22849- /* Page-aligned for the benefit of paravirt? */
22850- .align PAGE_SIZE
22851+.section .initial_page_table,"a",@progbits
22852 ENTRY(initial_page_table)
22853 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
22854 # if KPMDS == 3
22855@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
22856 # error "Kernel PMDs should be 1, 2 or 3"
22857 # endif
22858 .align PAGE_SIZE /* needs to be page-sized too */
22859+
22860+#ifdef CONFIG_PAX_PER_CPU_PGD
22861+ENTRY(cpu_pgd)
22862+ .rept 2*NR_CPUS
22863+ .fill 4,8,0
22864+ .endr
22865+#endif
22866+
22867 #endif
22868
22869 .data
22870 .balign 4
22871 ENTRY(stack_start)
22872- .long init_thread_union+THREAD_SIZE
22873+ .long init_thread_union+THREAD_SIZE-8
22874
22875 __INITRODATA
22876 int_msg:
22877@@ -744,7 +851,7 @@ fault_msg:
22878 * segment size, and 32-bit linear address value:
22879 */
22880
22881- .data
22882+.section .rodata,"a",@progbits
22883 .globl boot_gdt_descr
22884 .globl idt_descr
22885
22886@@ -753,7 +860,7 @@ fault_msg:
22887 .word 0 # 32 bit align gdt_desc.address
22888 boot_gdt_descr:
22889 .word __BOOT_DS+7
22890- .long boot_gdt - __PAGE_OFFSET
22891+ .long pa(boot_gdt)
22892
22893 .word 0 # 32-bit align idt_desc.address
22894 idt_descr:
22895@@ -764,7 +871,7 @@ idt_descr:
22896 .word 0 # 32 bit align gdt_desc.address
22897 ENTRY(early_gdt_descr)
22898 .word GDT_ENTRIES*8-1
22899- .long gdt_page /* Overwritten for secondary CPUs */
22900+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
22901
22902 /*
22903 * The boot_gdt must mirror the equivalent in setup.S and is
22904@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
22905 .align L1_CACHE_BYTES
22906 ENTRY(boot_gdt)
22907 .fill GDT_ENTRY_BOOT_CS,8,0
22908- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
22909- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
22910+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
22911+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
22912+
22913+ .align PAGE_SIZE_asm
22914+ENTRY(cpu_gdt_table)
22915+ .rept NR_CPUS
22916+ .quad 0x0000000000000000 /* NULL descriptor */
22917+ .quad 0x0000000000000000 /* 0x0b reserved */
22918+ .quad 0x0000000000000000 /* 0x13 reserved */
22919+ .quad 0x0000000000000000 /* 0x1b reserved */
22920+
22921+#ifdef CONFIG_PAX_KERNEXEC
22922+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
22923+#else
22924+ .quad 0x0000000000000000 /* 0x20 unused */
22925+#endif
22926+
22927+ .quad 0x0000000000000000 /* 0x28 unused */
22928+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
22929+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
22930+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
22931+ .quad 0x0000000000000000 /* 0x4b reserved */
22932+ .quad 0x0000000000000000 /* 0x53 reserved */
22933+ .quad 0x0000000000000000 /* 0x5b reserved */
22934+
22935+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
22936+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
22937+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
22938+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
22939+
22940+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
22941+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
22942+
22943+ /*
22944+ * Segments used for calling PnP BIOS have byte granularity.
22945+ * The code segments and data segments have fixed 64k limits,
22946+ * the transfer segment sizes are set at run time.
22947+ */
22948+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
22949+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
22950+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
22951+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
22952+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
22953+
22954+ /*
22955+ * The APM segments have byte granularity and their bases
22956+ * are set at run time. All have 64k limits.
22957+ */
22958+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
22959+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
22960+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
22961+
22962+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
22963+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
22964+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
22965+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
22966+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
22967+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
22968+
22969+ /* Be sure this is zeroed to avoid false validations in Xen */
22970+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
22971+ .endr
22972diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
22973index a836860..1b5c665 100644
22974--- a/arch/x86/kernel/head_64.S
22975+++ b/arch/x86/kernel/head_64.S
22976@@ -20,6 +20,8 @@
22977 #include <asm/processor-flags.h>
22978 #include <asm/percpu.h>
22979 #include <asm/nops.h>
22980+#include <asm/cpufeature.h>
22981+#include <asm/alternative-asm.h>
22982
22983 #ifdef CONFIG_PARAVIRT
22984 #include <asm/asm-offsets.h>
22985@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
22986 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
22987 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
22988 L3_START_KERNEL = pud_index(__START_KERNEL_map)
22989+L4_VMALLOC_START = pgd_index(VMALLOC_START)
22990+L3_VMALLOC_START = pud_index(VMALLOC_START)
22991+L4_VMALLOC_END = pgd_index(VMALLOC_END)
22992+L3_VMALLOC_END = pud_index(VMALLOC_END)
22993+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
22994+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
22995
22996 .text
22997 __HEAD
22998@@ -89,11 +97,23 @@ startup_64:
22999 * Fixup the physical addresses in the page table
23000 */
23001 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
23002+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
23003+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
23004+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
23005+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
23006+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
23007
23008- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
23009- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
23010+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
23011+#ifndef CONFIG_XEN
23012+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
23013+#endif
23014
23015- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
23016+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
23017+
23018+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
23019+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
23020+
23021+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
23022
23023 /*
23024 * Set up the identity mapping for the switchover. These
23025@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64)
23026 movq $(init_level4_pgt - __START_KERNEL_map), %rax
23027 1:
23028
23029- /* Enable PAE mode and PGE */
23030- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
23031+ /* Enable PAE mode and PSE/PGE */
23032+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
23033 movq %rcx, %cr4
23034
23035 /* Setup early boot stage 4 level pagetables. */
23036@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64)
23037 movl $MSR_EFER, %ecx
23038 rdmsr
23039 btsl $_EFER_SCE, %eax /* Enable System Call */
23040- btl $20,%edi /* No Execute supported? */
23041+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
23042 jnc 1f
23043 btsl $_EFER_NX, %eax
23044 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
23045+ leaq init_level4_pgt(%rip), %rdi
23046+#ifndef CONFIG_EFI
23047+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
23048+#endif
23049+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
23050+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
23051+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
23052+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
23053 1: wrmsr /* Make changes effective */
23054
23055 /* Setup cr0 */
23056@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64)
23057 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
23058 * address given in m16:64.
23059 */
23060+ pax_set_fptr_mask
23061 movq initial_code(%rip),%rax
23062 pushq $0 # fake return address to stop unwinder
23063 pushq $__KERNEL_CS # set correct cs
23064@@ -388,7 +417,7 @@ ENTRY(early_idt_handler)
23065 call dump_stack
23066 #ifdef CONFIG_KALLSYMS
23067 leaq early_idt_ripmsg(%rip),%rdi
23068- movq 40(%rsp),%rsi # %rip again
23069+ movq 88(%rsp),%rsi # %rip again
23070 call __print_symbol
23071 #endif
23072 #endif /* EARLY_PRINTK */
23073@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler)
23074 early_recursion_flag:
23075 .long 0
23076
23077+ .section .rodata,"a",@progbits
23078 #ifdef CONFIG_EARLY_PRINTK
23079 early_idt_msg:
23080 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
23081@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt)
23082 NEXT_PAGE(early_dynamic_pgts)
23083 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
23084
23085- .data
23086+ .section .rodata,"a",@progbits
23087
23088-#ifndef CONFIG_XEN
23089 NEXT_PAGE(init_level4_pgt)
23090- .fill 512,8,0
23091-#else
23092-NEXT_PAGE(init_level4_pgt)
23093- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23094 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
23095 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23096+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
23097+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
23098+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
23099+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
23100+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
23101+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23102 .org init_level4_pgt + L4_START_KERNEL*8, 0
23103 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
23104 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
23105
23106+#ifdef CONFIG_PAX_PER_CPU_PGD
23107+NEXT_PAGE(cpu_pgd)
23108+ .rept 2*NR_CPUS
23109+ .fill 512,8,0
23110+ .endr
23111+#endif
23112+
23113 NEXT_PAGE(level3_ident_pgt)
23114 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23115+#ifdef CONFIG_XEN
23116 .fill 511, 8, 0
23117+#else
23118+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
23119+ .fill 510,8,0
23120+#endif
23121+
23122+NEXT_PAGE(level3_vmalloc_start_pgt)
23123+ .fill 512,8,0
23124+
23125+NEXT_PAGE(level3_vmalloc_end_pgt)
23126+ .fill 512,8,0
23127+
23128+NEXT_PAGE(level3_vmemmap_pgt)
23129+ .fill L3_VMEMMAP_START,8,0
23130+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23131+
23132 NEXT_PAGE(level2_ident_pgt)
23133- /* Since I easily can, map the first 1G.
23134+ /* Since I easily can, map the first 2G.
23135 * Don't set NX because code runs from these pages.
23136 */
23137- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
23138-#endif
23139+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
23140
23141 NEXT_PAGE(level3_kernel_pgt)
23142 .fill L3_START_KERNEL,8,0
23143@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt)
23144 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
23145 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
23146
23147+NEXT_PAGE(level2_vmemmap_pgt)
23148+ .fill 512,8,0
23149+
23150 NEXT_PAGE(level2_kernel_pgt)
23151 /*
23152 * 512 MB kernel mapping. We spend a full page on this pagetable
23153@@ -488,39 +544,70 @@ NEXT_PAGE(level2_kernel_pgt)
23154 KERNEL_IMAGE_SIZE/PMD_SIZE)
23155
23156 NEXT_PAGE(level2_fixmap_pgt)
23157- .fill 506,8,0
23158- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
23159- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
23160- .fill 5,8,0
23161+ .fill 507,8,0
23162+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
23163+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
23164+ .fill 4,8,0
23165
23166-NEXT_PAGE(level1_fixmap_pgt)
23167+NEXT_PAGE(level1_vsyscall_pgt)
23168 .fill 512,8,0
23169
23170 #undef PMDS
23171
23172- .data
23173+ .align PAGE_SIZE
23174+ENTRY(cpu_gdt_table)
23175+ .rept NR_CPUS
23176+ .quad 0x0000000000000000 /* NULL descriptor */
23177+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
23178+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
23179+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
23180+ .quad 0x00cffb000000ffff /* __USER32_CS */
23181+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
23182+ .quad 0x00affb000000ffff /* __USER_CS */
23183+
23184+#ifdef CONFIG_PAX_KERNEXEC
23185+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
23186+#else
23187+ .quad 0x0 /* unused */
23188+#endif
23189+
23190+ .quad 0,0 /* TSS */
23191+ .quad 0,0 /* LDT */
23192+ .quad 0,0,0 /* three TLS descriptors */
23193+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
23194+ /* asm/segment.h:GDT_ENTRIES must match this */
23195+
23196+#ifdef CONFIG_PAX_MEMORY_UDEREF
23197+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
23198+#else
23199+ .quad 0x0 /* unused */
23200+#endif
23201+
23202+ /* zero the remaining page */
23203+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
23204+ .endr
23205+
23206 .align 16
23207 .globl early_gdt_descr
23208 early_gdt_descr:
23209 .word GDT_ENTRIES*8-1
23210 early_gdt_descr_base:
23211- .quad INIT_PER_CPU_VAR(gdt_page)
23212+ .quad cpu_gdt_table
23213
23214 ENTRY(phys_base)
23215 /* This must match the first entry in level2_kernel_pgt */
23216 .quad 0x0000000000000000
23217
23218 #include "../../x86/xen/xen-head.S"
23219-
23220- .section .bss, "aw", @nobits
23221+
23222+ .section .rodata,"a",@progbits
23223+NEXT_PAGE(empty_zero_page)
23224+ .skip PAGE_SIZE
23225+
23226 .align PAGE_SIZE
23227 ENTRY(idt_table)
23228- .skip IDT_ENTRIES * 16
23229+ .fill 512,8,0
23230
23231 .align L1_CACHE_BYTES
23232 ENTRY(nmi_idt_table)
23233- .skip IDT_ENTRIES * 16
23234-
23235- __PAGE_ALIGNED_BSS
23236-NEXT_PAGE(empty_zero_page)
23237- .skip PAGE_SIZE
23238+ .fill 512,8,0
23239diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
23240index 0fa6912..b37438b 100644
23241--- a/arch/x86/kernel/i386_ksyms_32.c
23242+++ b/arch/x86/kernel/i386_ksyms_32.c
23243@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
23244 EXPORT_SYMBOL(cmpxchg8b_emu);
23245 #endif
23246
23247+EXPORT_SYMBOL_GPL(cpu_gdt_table);
23248+
23249 /* Networking helper routines. */
23250 EXPORT_SYMBOL(csum_partial_copy_generic);
23251+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
23252+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
23253
23254 EXPORT_SYMBOL(__get_user_1);
23255 EXPORT_SYMBOL(__get_user_2);
23256@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
23257
23258 EXPORT_SYMBOL(csum_partial);
23259 EXPORT_SYMBOL(empty_zero_page);
23260+
23261+#ifdef CONFIG_PAX_KERNEXEC
23262+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
23263+#endif
23264+
23265+#ifdef CONFIG_PAX_PER_CPU_PGD
23266+EXPORT_SYMBOL(cpu_pgd);
23267+#endif
23268diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
23269index f7ea30d..6318acc 100644
23270--- a/arch/x86/kernel/i387.c
23271+++ b/arch/x86/kernel/i387.c
23272@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
23273 static inline bool interrupted_user_mode(void)
23274 {
23275 struct pt_regs *regs = get_irq_regs();
23276- return regs && user_mode_vm(regs);
23277+ return regs && user_mode(regs);
23278 }
23279
23280 /*
23281diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
23282index 9a5c460..84868423 100644
23283--- a/arch/x86/kernel/i8259.c
23284+++ b/arch/x86/kernel/i8259.c
23285@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
23286 static void make_8259A_irq(unsigned int irq)
23287 {
23288 disable_irq_nosync(irq);
23289- io_apic_irqs &= ~(1<<irq);
23290+ io_apic_irqs &= ~(1UL<<irq);
23291 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
23292 i8259A_chip.name);
23293 enable_irq(irq);
23294@@ -209,7 +209,7 @@ spurious_8259A_irq:
23295 "spurious 8259A interrupt: IRQ%d.\n", irq);
23296 spurious_irq_mask |= irqmask;
23297 }
23298- atomic_inc(&irq_err_count);
23299+ atomic_inc_unchecked(&irq_err_count);
23300 /*
23301 * Theoretically we do not have to handle this IRQ,
23302 * but in Linux this does not cause problems and is
23303@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
23304 /* (slave's support for AEOI in flat mode is to be investigated) */
23305 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
23306
23307+ pax_open_kernel();
23308 if (auto_eoi)
23309 /*
23310 * In AEOI mode we just have to mask the interrupt
23311 * when acking.
23312 */
23313- i8259A_chip.irq_mask_ack = disable_8259A_irq;
23314+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
23315 else
23316- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
23317+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
23318+ pax_close_kernel();
23319
23320 udelay(100); /* wait for 8259A to initialize */
23321
23322diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
23323index a979b5b..1d6db75 100644
23324--- a/arch/x86/kernel/io_delay.c
23325+++ b/arch/x86/kernel/io_delay.c
23326@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
23327 * Quirk table for systems that misbehave (lock up, etc.) if port
23328 * 0x80 is used:
23329 */
23330-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
23331+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
23332 {
23333 .callback = dmi_io_delay_0xed_port,
23334 .ident = "Compaq Presario V6000",
23335diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
23336index 4ddaf66..6292f4e 100644
23337--- a/arch/x86/kernel/ioport.c
23338+++ b/arch/x86/kernel/ioport.c
23339@@ -6,6 +6,7 @@
23340 #include <linux/sched.h>
23341 #include <linux/kernel.h>
23342 #include <linux/capability.h>
23343+#include <linux/security.h>
23344 #include <linux/errno.h>
23345 #include <linux/types.h>
23346 #include <linux/ioport.h>
23347@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
23348
23349 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
23350 return -EINVAL;
23351+#ifdef CONFIG_GRKERNSEC_IO
23352+ if (turn_on && grsec_disable_privio) {
23353+ gr_handle_ioperm();
23354+ return -EPERM;
23355+ }
23356+#endif
23357 if (turn_on && !capable(CAP_SYS_RAWIO))
23358 return -EPERM;
23359
23360@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
23361 * because the ->io_bitmap_max value must match the bitmap
23362 * contents:
23363 */
23364- tss = &per_cpu(init_tss, get_cpu());
23365+ tss = init_tss + get_cpu();
23366
23367 if (turn_on)
23368 bitmap_clear(t->io_bitmap_ptr, from, num);
23369@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
23370 return -EINVAL;
23371 /* Trying to gain more privileges? */
23372 if (level > old) {
23373+#ifdef CONFIG_GRKERNSEC_IO
23374+ if (grsec_disable_privio) {
23375+ gr_handle_iopl();
23376+ return -EPERM;
23377+ }
23378+#endif
23379 if (!capable(CAP_SYS_RAWIO))
23380 return -EPERM;
23381 }
23382diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
23383index ac0631d..ff7cb62 100644
23384--- a/arch/x86/kernel/irq.c
23385+++ b/arch/x86/kernel/irq.c
23386@@ -18,7 +18,7 @@
23387 #include <asm/mce.h>
23388 #include <asm/hw_irq.h>
23389
23390-atomic_t irq_err_count;
23391+atomic_unchecked_t irq_err_count;
23392
23393 /* Function pointer for generic interrupt vector handling */
23394 void (*x86_platform_ipi_callback)(void) = NULL;
23395@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
23396 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
23397 seq_printf(p, " Machine check polls\n");
23398 #endif
23399- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
23400+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
23401 #if defined(CONFIG_X86_IO_APIC)
23402- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
23403+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
23404 #endif
23405 return 0;
23406 }
23407@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
23408
23409 u64 arch_irq_stat(void)
23410 {
23411- u64 sum = atomic_read(&irq_err_count);
23412+ u64 sum = atomic_read_unchecked(&irq_err_count);
23413 return sum;
23414 }
23415
23416diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
23417index 344faf8..355f60d 100644
23418--- a/arch/x86/kernel/irq_32.c
23419+++ b/arch/x86/kernel/irq_32.c
23420@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
23421 __asm__ __volatile__("andl %%esp,%0" :
23422 "=r" (sp) : "0" (THREAD_SIZE - 1));
23423
23424- return sp < (sizeof(struct thread_info) + STACK_WARN);
23425+ return sp < STACK_WARN;
23426 }
23427
23428 static void print_stack_overflow(void)
23429@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
23430 * per-CPU IRQ handling contexts (thread information and stack)
23431 */
23432 union irq_ctx {
23433- struct thread_info tinfo;
23434- u32 stack[THREAD_SIZE/sizeof(u32)];
23435+ unsigned long previous_esp;
23436+ u32 stack[THREAD_SIZE/sizeof(u32)];
23437 } __attribute__((aligned(THREAD_SIZE)));
23438
23439 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
23440@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
23441 static inline int
23442 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23443 {
23444- union irq_ctx *curctx, *irqctx;
23445+ union irq_ctx *irqctx;
23446 u32 *isp, arg1, arg2;
23447
23448- curctx = (union irq_ctx *) current_thread_info();
23449 irqctx = __this_cpu_read(hardirq_ctx);
23450
23451 /*
23452@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23453 * handler) we can't do that and just have to keep using the
23454 * current stack (which is the irq stack already after all)
23455 */
23456- if (unlikely(curctx == irqctx))
23457+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
23458 return 0;
23459
23460 /* build the stack frame on the IRQ stack */
23461- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
23462- irqctx->tinfo.task = curctx->tinfo.task;
23463- irqctx->tinfo.previous_esp = current_stack_pointer;
23464+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
23465+ irqctx->previous_esp = current_stack_pointer;
23466
23467- /* Copy the preempt_count so that the [soft]irq checks work. */
23468- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
23469+#ifdef CONFIG_PAX_MEMORY_UDEREF
23470+ __set_fs(MAKE_MM_SEG(0));
23471+#endif
23472
23473 if (unlikely(overflow))
23474 call_on_stack(print_stack_overflow, isp);
23475@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23476 : "0" (irq), "1" (desc), "2" (isp),
23477 "D" (desc->handle_irq)
23478 : "memory", "cc", "ecx");
23479+
23480+#ifdef CONFIG_PAX_MEMORY_UDEREF
23481+ __set_fs(current_thread_info()->addr_limit);
23482+#endif
23483+
23484 return 1;
23485 }
23486
23487@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
23488 */
23489 void __cpuinit irq_ctx_init(int cpu)
23490 {
23491- union irq_ctx *irqctx;
23492-
23493 if (per_cpu(hardirq_ctx, cpu))
23494 return;
23495
23496- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
23497- THREADINFO_GFP,
23498- THREAD_SIZE_ORDER));
23499- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
23500- irqctx->tinfo.cpu = cpu;
23501- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
23502- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
23503-
23504- per_cpu(hardirq_ctx, cpu) = irqctx;
23505-
23506- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
23507- THREADINFO_GFP,
23508- THREAD_SIZE_ORDER));
23509- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
23510- irqctx->tinfo.cpu = cpu;
23511- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
23512-
23513- per_cpu(softirq_ctx, cpu) = irqctx;
23514+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
23515+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
23516+
23517+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
23518+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
23519
23520 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
23521 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
23522@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
23523 asmlinkage void do_softirq(void)
23524 {
23525 unsigned long flags;
23526- struct thread_info *curctx;
23527 union irq_ctx *irqctx;
23528 u32 *isp;
23529
23530@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
23531 local_irq_save(flags);
23532
23533 if (local_softirq_pending()) {
23534- curctx = current_thread_info();
23535 irqctx = __this_cpu_read(softirq_ctx);
23536- irqctx->tinfo.task = curctx->task;
23537- irqctx->tinfo.previous_esp = current_stack_pointer;
23538+ irqctx->previous_esp = current_stack_pointer;
23539
23540 /* build the stack frame on the softirq stack */
23541- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
23542+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
23543+
23544+#ifdef CONFIG_PAX_MEMORY_UDEREF
23545+ __set_fs(MAKE_MM_SEG(0));
23546+#endif
23547
23548 call_on_stack(__do_softirq, isp);
23549+
23550+#ifdef CONFIG_PAX_MEMORY_UDEREF
23551+ __set_fs(current_thread_info()->addr_limit);
23552+#endif
23553+
23554 /*
23555 * Shouldn't happen, we returned above if in_interrupt():
23556 */
23557@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
23558 if (unlikely(!desc))
23559 return false;
23560
23561- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
23562+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
23563 if (unlikely(overflow))
23564 print_stack_overflow();
23565 desc->handle_irq(irq, desc);
23566diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
23567index d04d3ec..ea4b374 100644
23568--- a/arch/x86/kernel/irq_64.c
23569+++ b/arch/x86/kernel/irq_64.c
23570@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
23571 u64 estack_top, estack_bottom;
23572 u64 curbase = (u64)task_stack_page(current);
23573
23574- if (user_mode_vm(regs))
23575+ if (user_mode(regs))
23576 return;
23577
23578 if (regs->sp >= curbase + sizeof(struct thread_info) +
23579diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
23580index dc1404b..bbc43e7 100644
23581--- a/arch/x86/kernel/kdebugfs.c
23582+++ b/arch/x86/kernel/kdebugfs.c
23583@@ -27,7 +27,7 @@ struct setup_data_node {
23584 u32 len;
23585 };
23586
23587-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
23588+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
23589 size_t count, loff_t *ppos)
23590 {
23591 struct setup_data_node *node = file->private_data;
23592diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
23593index 836f832..a8bda67 100644
23594--- a/arch/x86/kernel/kgdb.c
23595+++ b/arch/x86/kernel/kgdb.c
23596@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
23597 #ifdef CONFIG_X86_32
23598 switch (regno) {
23599 case GDB_SS:
23600- if (!user_mode_vm(regs))
23601+ if (!user_mode(regs))
23602 *(unsigned long *)mem = __KERNEL_DS;
23603 break;
23604 case GDB_SP:
23605- if (!user_mode_vm(regs))
23606+ if (!user_mode(regs))
23607 *(unsigned long *)mem = kernel_stack_pointer(regs);
23608 break;
23609 case GDB_GS:
23610@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
23611 bp->attr.bp_addr = breakinfo[breakno].addr;
23612 bp->attr.bp_len = breakinfo[breakno].len;
23613 bp->attr.bp_type = breakinfo[breakno].type;
23614- info->address = breakinfo[breakno].addr;
23615+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
23616+ info->address = ktla_ktva(breakinfo[breakno].addr);
23617+ else
23618+ info->address = breakinfo[breakno].addr;
23619 info->len = breakinfo[breakno].len;
23620 info->type = breakinfo[breakno].type;
23621 val = arch_install_hw_breakpoint(bp);
23622@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
23623 case 'k':
23624 /* clear the trace bit */
23625 linux_regs->flags &= ~X86_EFLAGS_TF;
23626- atomic_set(&kgdb_cpu_doing_single_step, -1);
23627+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
23628
23629 /* set the trace bit if we're stepping */
23630 if (remcomInBuffer[0] == 's') {
23631 linux_regs->flags |= X86_EFLAGS_TF;
23632- atomic_set(&kgdb_cpu_doing_single_step,
23633+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
23634 raw_smp_processor_id());
23635 }
23636
23637@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
23638
23639 switch (cmd) {
23640 case DIE_DEBUG:
23641- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
23642+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
23643 if (user_mode(regs))
23644 return single_step_cont(regs, args);
23645 break;
23646@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
23647 #endif /* CONFIG_DEBUG_RODATA */
23648
23649 bpt->type = BP_BREAKPOINT;
23650- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
23651+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
23652 BREAK_INSTR_SIZE);
23653 if (err)
23654 return err;
23655- err = probe_kernel_write((char *)bpt->bpt_addr,
23656+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
23657 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
23658 #ifdef CONFIG_DEBUG_RODATA
23659 if (!err)
23660@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
23661 return -EBUSY;
23662 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
23663 BREAK_INSTR_SIZE);
23664- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
23665+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
23666 if (err)
23667 return err;
23668 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
23669@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
23670 if (mutex_is_locked(&text_mutex))
23671 goto knl_write;
23672 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
23673- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
23674+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
23675 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
23676 goto knl_write;
23677 return err;
23678 knl_write:
23679 #endif /* CONFIG_DEBUG_RODATA */
23680- return probe_kernel_write((char *)bpt->bpt_addr,
23681+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
23682 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
23683 }
23684
23685diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
23686index 211bce4..6e2580a 100644
23687--- a/arch/x86/kernel/kprobes/core.c
23688+++ b/arch/x86/kernel/kprobes/core.c
23689@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
23690 s32 raddr;
23691 } __packed *insn;
23692
23693- insn = (struct __arch_relative_insn *)from;
23694+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
23695+
23696+ pax_open_kernel();
23697 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
23698 insn->op = op;
23699+ pax_close_kernel();
23700 }
23701
23702 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
23703@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
23704 kprobe_opcode_t opcode;
23705 kprobe_opcode_t *orig_opcodes = opcodes;
23706
23707- if (search_exception_tables((unsigned long)opcodes))
23708+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
23709 return 0; /* Page fault may occur on this address. */
23710
23711 retry:
23712@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
23713 * for the first byte, we can recover the original instruction
23714 * from it and kp->opcode.
23715 */
23716- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
23717+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
23718 buf[0] = kp->opcode;
23719- return (unsigned long)buf;
23720+ return ktva_ktla((unsigned long)buf);
23721 }
23722
23723 /*
23724@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
23725 /* Another subsystem puts a breakpoint, failed to recover */
23726 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
23727 return 0;
23728+ pax_open_kernel();
23729 memcpy(dest, insn.kaddr, insn.length);
23730+ pax_close_kernel();
23731
23732 #ifdef CONFIG_X86_64
23733 if (insn_rip_relative(&insn)) {
23734@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
23735 return 0;
23736 }
23737 disp = (u8 *) dest + insn_offset_displacement(&insn);
23738+ pax_open_kernel();
23739 *(s32 *) disp = (s32) newdisp;
23740+ pax_close_kernel();
23741 }
23742 #endif
23743 return insn.length;
23744@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
23745 * nor set current_kprobe, because it doesn't use single
23746 * stepping.
23747 */
23748- regs->ip = (unsigned long)p->ainsn.insn;
23749+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
23750 preempt_enable_no_resched();
23751 return;
23752 }
23753@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
23754 regs->flags &= ~X86_EFLAGS_IF;
23755 /* single step inline if the instruction is an int3 */
23756 if (p->opcode == BREAKPOINT_INSTRUCTION)
23757- regs->ip = (unsigned long)p->addr;
23758+ regs->ip = ktla_ktva((unsigned long)p->addr);
23759 else
23760- regs->ip = (unsigned long)p->ainsn.insn;
23761+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
23762 }
23763
23764 /*
23765@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
23766 setup_singlestep(p, regs, kcb, 0);
23767 return 1;
23768 }
23769- } else if (*addr != BREAKPOINT_INSTRUCTION) {
23770+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
23771 /*
23772 * The breakpoint instruction was removed right
23773 * after we hit it. Another cpu has removed
23774@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
23775 " movq %rax, 152(%rsp)\n"
23776 RESTORE_REGS_STRING
23777 " popfq\n"
23778+#ifdef KERNEXEC_PLUGIN
23779+ " btsq $63,(%rsp)\n"
23780+#endif
23781 #else
23782 " pushf\n"
23783 SAVE_REGS_STRING
23784@@ -779,7 +789,7 @@ static void __kprobes
23785 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
23786 {
23787 unsigned long *tos = stack_addr(regs);
23788- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
23789+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
23790 unsigned long orig_ip = (unsigned long)p->addr;
23791 kprobe_opcode_t *insn = p->ainsn.insn;
23792
23793@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
23794 struct die_args *args = data;
23795 int ret = NOTIFY_DONE;
23796
23797- if (args->regs && user_mode_vm(args->regs))
23798+ if (args->regs && user_mode(args->regs))
23799 return ret;
23800
23801 switch (val) {
23802diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
23803index 76dc6f0..66bdfc3 100644
23804--- a/arch/x86/kernel/kprobes/opt.c
23805+++ b/arch/x86/kernel/kprobes/opt.c
23806@@ -79,6 +79,7 @@ found:
23807 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
23808 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
23809 {
23810+ pax_open_kernel();
23811 #ifdef CONFIG_X86_64
23812 *addr++ = 0x48;
23813 *addr++ = 0xbf;
23814@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
23815 *addr++ = 0xb8;
23816 #endif
23817 *(unsigned long *)addr = val;
23818+ pax_close_kernel();
23819 }
23820
23821 static void __used __kprobes kprobes_optinsn_template_holder(void)
23822@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23823 * Verify if the address gap is in 2GB range, because this uses
23824 * a relative jump.
23825 */
23826- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
23827+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
23828 if (abs(rel) > 0x7fffffff)
23829 return -ERANGE;
23830
23831@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23832 op->optinsn.size = ret;
23833
23834 /* Copy arch-dep-instance from template */
23835- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
23836+ pax_open_kernel();
23837+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
23838+ pax_close_kernel();
23839
23840 /* Set probe information */
23841 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
23842
23843 /* Set probe function call */
23844- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
23845+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
23846
23847 /* Set returning jmp instruction at the tail of out-of-line buffer */
23848- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
23849+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
23850 (u8 *)op->kp.addr + op->optinsn.size);
23851
23852 flush_icache_range((unsigned long) buf,
23853@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
23854 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
23855
23856 /* Backup instructions which will be replaced by jump address */
23857- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
23858+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
23859 RELATIVE_ADDR_SIZE);
23860
23861 insn_buf[0] = RELATIVEJUMP_OPCODE;
23862@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
23863 /* This kprobe is really able to run optimized path. */
23864 op = container_of(p, struct optimized_kprobe, kp);
23865 /* Detour through copied instructions */
23866- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
23867+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
23868 if (!reenter)
23869 reset_current_kprobe();
23870 preempt_enable_no_resched();
23871diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
23872index cd6d9a5..16245a4 100644
23873--- a/arch/x86/kernel/kvm.c
23874+++ b/arch/x86/kernel/kvm.c
23875@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
23876 return NOTIFY_OK;
23877 }
23878
23879-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
23880+static struct notifier_block kvm_cpu_notifier = {
23881 .notifier_call = kvm_cpu_notify,
23882 };
23883 #endif
23884diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
23885index ebc9873..1b9724b 100644
23886--- a/arch/x86/kernel/ldt.c
23887+++ b/arch/x86/kernel/ldt.c
23888@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
23889 if (reload) {
23890 #ifdef CONFIG_SMP
23891 preempt_disable();
23892- load_LDT(pc);
23893+ load_LDT_nolock(pc);
23894 if (!cpumask_equal(mm_cpumask(current->mm),
23895 cpumask_of(smp_processor_id())))
23896 smp_call_function(flush_ldt, current->mm, 1);
23897 preempt_enable();
23898 #else
23899- load_LDT(pc);
23900+ load_LDT_nolock(pc);
23901 #endif
23902 }
23903 if (oldsize) {
23904@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
23905 return err;
23906
23907 for (i = 0; i < old->size; i++)
23908- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
23909+ write_ldt_entry(new->ldt, i, old->ldt + i);
23910 return 0;
23911 }
23912
23913@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
23914 retval = copy_ldt(&mm->context, &old_mm->context);
23915 mutex_unlock(&old_mm->context.lock);
23916 }
23917+
23918+ if (tsk == current) {
23919+ mm->context.vdso = 0;
23920+
23921+#ifdef CONFIG_X86_32
23922+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23923+ mm->context.user_cs_base = 0UL;
23924+ mm->context.user_cs_limit = ~0UL;
23925+
23926+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
23927+ cpus_clear(mm->context.cpu_user_cs_mask);
23928+#endif
23929+
23930+#endif
23931+#endif
23932+
23933+ }
23934+
23935 return retval;
23936 }
23937
23938@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
23939 }
23940 }
23941
23942+#ifdef CONFIG_PAX_SEGMEXEC
23943+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
23944+ error = -EINVAL;
23945+ goto out_unlock;
23946+ }
23947+#endif
23948+
23949 fill_ldt(&ldt, &ldt_info);
23950 if (oldmode)
23951 ldt.avl = 0;
23952diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
23953index 5b19e4d..6476a76 100644
23954--- a/arch/x86/kernel/machine_kexec_32.c
23955+++ b/arch/x86/kernel/machine_kexec_32.c
23956@@ -26,7 +26,7 @@
23957 #include <asm/cacheflush.h>
23958 #include <asm/debugreg.h>
23959
23960-static void set_idt(void *newidt, __u16 limit)
23961+static void set_idt(struct desc_struct *newidt, __u16 limit)
23962 {
23963 struct desc_ptr curidt;
23964
23965@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
23966 }
23967
23968
23969-static void set_gdt(void *newgdt, __u16 limit)
23970+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
23971 {
23972 struct desc_ptr curgdt;
23973
23974@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
23975 }
23976
23977 control_page = page_address(image->control_code_page);
23978- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
23979+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
23980
23981 relocate_kernel_ptr = control_page;
23982 page_list[PA_CONTROL_PAGE] = __pa(control_page);
23983diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
23984index 22db92b..d546bec 100644
23985--- a/arch/x86/kernel/microcode_core.c
23986+++ b/arch/x86/kernel/microcode_core.c
23987@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
23988 return NOTIFY_OK;
23989 }
23990
23991-static struct notifier_block __refdata mc_cpu_notifier = {
23992+static struct notifier_block mc_cpu_notifier = {
23993 .notifier_call = mc_cpu_callback,
23994 };
23995
23996diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
23997index 5fb2ceb..3ae90bb 100644
23998--- a/arch/x86/kernel/microcode_intel.c
23999+++ b/arch/x86/kernel/microcode_intel.c
24000@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
24001
24002 static int get_ucode_user(void *to, const void *from, size_t n)
24003 {
24004- return copy_from_user(to, from, n);
24005+ return copy_from_user(to, (const void __force_user *)from, n);
24006 }
24007
24008 static enum ucode_state
24009 request_microcode_user(int cpu, const void __user *buf, size_t size)
24010 {
24011- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
24012+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
24013 }
24014
24015 static void microcode_fini_cpu(int cpu)
24016diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
24017index 216a4d7..228255a 100644
24018--- a/arch/x86/kernel/module.c
24019+++ b/arch/x86/kernel/module.c
24020@@ -43,15 +43,60 @@ do { \
24021 } while (0)
24022 #endif
24023
24024-void *module_alloc(unsigned long size)
24025+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
24026 {
24027- if (PAGE_ALIGN(size) > MODULES_LEN)
24028+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
24029 return NULL;
24030 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
24031- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
24032+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
24033 -1, __builtin_return_address(0));
24034 }
24035
24036+void *module_alloc(unsigned long size)
24037+{
24038+
24039+#ifdef CONFIG_PAX_KERNEXEC
24040+ return __module_alloc(size, PAGE_KERNEL);
24041+#else
24042+ return __module_alloc(size, PAGE_KERNEL_EXEC);
24043+#endif
24044+
24045+}
24046+
24047+#ifdef CONFIG_PAX_KERNEXEC
24048+#ifdef CONFIG_X86_32
24049+void *module_alloc_exec(unsigned long size)
24050+{
24051+ struct vm_struct *area;
24052+
24053+ if (size == 0)
24054+ return NULL;
24055+
24056+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
24057+ return area ? area->addr : NULL;
24058+}
24059+EXPORT_SYMBOL(module_alloc_exec);
24060+
24061+void module_free_exec(struct module *mod, void *module_region)
24062+{
24063+ vunmap(module_region);
24064+}
24065+EXPORT_SYMBOL(module_free_exec);
24066+#else
24067+void module_free_exec(struct module *mod, void *module_region)
24068+{
24069+ module_free(mod, module_region);
24070+}
24071+EXPORT_SYMBOL(module_free_exec);
24072+
24073+void *module_alloc_exec(unsigned long size)
24074+{
24075+ return __module_alloc(size, PAGE_KERNEL_RX);
24076+}
24077+EXPORT_SYMBOL(module_alloc_exec);
24078+#endif
24079+#endif
24080+
24081 #ifdef CONFIG_X86_32
24082 int apply_relocate(Elf32_Shdr *sechdrs,
24083 const char *strtab,
24084@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24085 unsigned int i;
24086 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
24087 Elf32_Sym *sym;
24088- uint32_t *location;
24089+ uint32_t *plocation, location;
24090
24091 DEBUGP("Applying relocate section %u to %u\n",
24092 relsec, sechdrs[relsec].sh_info);
24093 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
24094 /* This is where to make the change */
24095- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
24096- + rel[i].r_offset;
24097+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
24098+ location = (uint32_t)plocation;
24099+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
24100+ plocation = ktla_ktva((void *)plocation);
24101 /* This is the symbol it is referring to. Note that all
24102 undefined symbols have been resolved. */
24103 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
24104@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24105 switch (ELF32_R_TYPE(rel[i].r_info)) {
24106 case R_386_32:
24107 /* We add the value into the location given */
24108- *location += sym->st_value;
24109+ pax_open_kernel();
24110+ *plocation += sym->st_value;
24111+ pax_close_kernel();
24112 break;
24113 case R_386_PC32:
24114 /* Add the value, subtract its position */
24115- *location += sym->st_value - (uint32_t)location;
24116+ pax_open_kernel();
24117+ *plocation += sym->st_value - location;
24118+ pax_close_kernel();
24119 break;
24120 default:
24121 pr_err("%s: Unknown relocation: %u\n",
24122@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
24123 case R_X86_64_NONE:
24124 break;
24125 case R_X86_64_64:
24126+ pax_open_kernel();
24127 *(u64 *)loc = val;
24128+ pax_close_kernel();
24129 break;
24130 case R_X86_64_32:
24131+ pax_open_kernel();
24132 *(u32 *)loc = val;
24133+ pax_close_kernel();
24134 if (val != *(u32 *)loc)
24135 goto overflow;
24136 break;
24137 case R_X86_64_32S:
24138+ pax_open_kernel();
24139 *(s32 *)loc = val;
24140+ pax_close_kernel();
24141 if ((s64)val != *(s32 *)loc)
24142 goto overflow;
24143 break;
24144 case R_X86_64_PC32:
24145 val -= (u64)loc;
24146+ pax_open_kernel();
24147 *(u32 *)loc = val;
24148+ pax_close_kernel();
24149+
24150 #if 0
24151 if ((s64)val != *(s32 *)loc)
24152 goto overflow;
24153diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
24154index ce13049..e2e9c3c 100644
24155--- a/arch/x86/kernel/msr.c
24156+++ b/arch/x86/kernel/msr.c
24157@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
24158 return notifier_from_errno(err);
24159 }
24160
24161-static struct notifier_block __refdata msr_class_cpu_notifier = {
24162+static struct notifier_block msr_class_cpu_notifier = {
24163 .notifier_call = msr_class_cpu_callback,
24164 };
24165
24166diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
24167index 6030805..2d33f21 100644
24168--- a/arch/x86/kernel/nmi.c
24169+++ b/arch/x86/kernel/nmi.c
24170@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
24171 return handled;
24172 }
24173
24174-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
24175+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
24176 {
24177 struct nmi_desc *desc = nmi_to_desc(type);
24178 unsigned long flags;
24179@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
24180 * event confuses some handlers (kdump uses this flag)
24181 */
24182 if (action->flags & NMI_FLAG_FIRST)
24183- list_add_rcu(&action->list, &desc->head);
24184+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
24185 else
24186- list_add_tail_rcu(&action->list, &desc->head);
24187+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
24188
24189 spin_unlock_irqrestore(&desc->lock, flags);
24190 return 0;
24191@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
24192 if (!strcmp(n->name, name)) {
24193 WARN(in_nmi(),
24194 "Trying to free NMI (%s) from NMI context!\n", n->name);
24195- list_del_rcu(&n->list);
24196+ pax_list_del_rcu((struct list_head *)&n->list);
24197 break;
24198 }
24199 }
24200@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
24201 dotraplinkage notrace __kprobes void
24202 do_nmi(struct pt_regs *regs, long error_code)
24203 {
24204+
24205+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24206+ if (!user_mode(regs)) {
24207+ unsigned long cs = regs->cs & 0xFFFF;
24208+ unsigned long ip = ktva_ktla(regs->ip);
24209+
24210+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
24211+ regs->ip = ip;
24212+ }
24213+#endif
24214+
24215 nmi_nesting_preprocess(regs);
24216
24217 nmi_enter();
24218diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
24219index 6d9582e..f746287 100644
24220--- a/arch/x86/kernel/nmi_selftest.c
24221+++ b/arch/x86/kernel/nmi_selftest.c
24222@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
24223 {
24224 /* trap all the unknown NMIs we may generate */
24225 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
24226- __initdata);
24227+ __initconst);
24228 }
24229
24230 static void __init cleanup_nmi_testsuite(void)
24231@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
24232 unsigned long timeout;
24233
24234 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
24235- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
24236+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
24237 nmi_fail = FAILURE;
24238 return;
24239 }
24240diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
24241index 676b8c7..870ba04 100644
24242--- a/arch/x86/kernel/paravirt-spinlocks.c
24243+++ b/arch/x86/kernel/paravirt-spinlocks.c
24244@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
24245 arch_spin_lock(lock);
24246 }
24247
24248-struct pv_lock_ops pv_lock_ops = {
24249+struct pv_lock_ops pv_lock_ops __read_only = {
24250 #ifdef CONFIG_SMP
24251 .spin_is_locked = __ticket_spin_is_locked,
24252 .spin_is_contended = __ticket_spin_is_contended,
24253diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
24254index cd6de64..27c6af0 100644
24255--- a/arch/x86/kernel/paravirt.c
24256+++ b/arch/x86/kernel/paravirt.c
24257@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
24258 {
24259 return x;
24260 }
24261+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24262+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
24263+#endif
24264
24265 void __init default_banner(void)
24266 {
24267@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
24268 if (opfunc == NULL)
24269 /* If there's no function, patch it with a ud2a (BUG) */
24270 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
24271- else if (opfunc == _paravirt_nop)
24272+ else if (opfunc == (void *)_paravirt_nop)
24273 /* If the operation is a nop, then nop the callsite */
24274 ret = paravirt_patch_nop();
24275
24276 /* identity functions just return their single argument */
24277- else if (opfunc == _paravirt_ident_32)
24278+ else if (opfunc == (void *)_paravirt_ident_32)
24279 ret = paravirt_patch_ident_32(insnbuf, len);
24280- else if (opfunc == _paravirt_ident_64)
24281+ else if (opfunc == (void *)_paravirt_ident_64)
24282 ret = paravirt_patch_ident_64(insnbuf, len);
24283+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24284+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
24285+ ret = paravirt_patch_ident_64(insnbuf, len);
24286+#endif
24287
24288 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
24289 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
24290@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
24291 if (insn_len > len || start == NULL)
24292 insn_len = len;
24293 else
24294- memcpy(insnbuf, start, insn_len);
24295+ memcpy(insnbuf, ktla_ktva(start), insn_len);
24296
24297 return insn_len;
24298 }
24299@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
24300 return this_cpu_read(paravirt_lazy_mode);
24301 }
24302
24303-struct pv_info pv_info = {
24304+struct pv_info pv_info __read_only = {
24305 .name = "bare hardware",
24306 .paravirt_enabled = 0,
24307 .kernel_rpl = 0,
24308@@ -315,16 +322,16 @@ struct pv_info pv_info = {
24309 #endif
24310 };
24311
24312-struct pv_init_ops pv_init_ops = {
24313+struct pv_init_ops pv_init_ops __read_only = {
24314 .patch = native_patch,
24315 };
24316
24317-struct pv_time_ops pv_time_ops = {
24318+struct pv_time_ops pv_time_ops __read_only = {
24319 .sched_clock = native_sched_clock,
24320 .steal_clock = native_steal_clock,
24321 };
24322
24323-struct pv_irq_ops pv_irq_ops = {
24324+struct pv_irq_ops pv_irq_ops __read_only = {
24325 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
24326 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
24327 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
24328@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
24329 #endif
24330 };
24331
24332-struct pv_cpu_ops pv_cpu_ops = {
24333+struct pv_cpu_ops pv_cpu_ops __read_only = {
24334 .cpuid = native_cpuid,
24335 .get_debugreg = native_get_debugreg,
24336 .set_debugreg = native_set_debugreg,
24337@@ -394,21 +401,26 @@ struct pv_cpu_ops pv_cpu_ops = {
24338 .end_context_switch = paravirt_nop,
24339 };
24340
24341-struct pv_apic_ops pv_apic_ops = {
24342+struct pv_apic_ops pv_apic_ops __read_only= {
24343 #ifdef CONFIG_X86_LOCAL_APIC
24344 .startup_ipi_hook = paravirt_nop,
24345 #endif
24346 };
24347
24348-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
24349+#ifdef CONFIG_X86_32
24350+#ifdef CONFIG_X86_PAE
24351+/* 64-bit pagetable entries */
24352+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
24353+#else
24354 /* 32-bit pagetable entries */
24355 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
24356+#endif
24357 #else
24358 /* 64-bit pagetable entries */
24359 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
24360 #endif
24361
24362-struct pv_mmu_ops pv_mmu_ops = {
24363+struct pv_mmu_ops pv_mmu_ops __read_only = {
24364
24365 .read_cr2 = native_read_cr2,
24366 .write_cr2 = native_write_cr2,
24367@@ -458,6 +470,7 @@ struct pv_mmu_ops pv_mmu_ops = {
24368 .make_pud = PTE_IDENT,
24369
24370 .set_pgd = native_set_pgd,
24371+ .set_pgd_batched = native_set_pgd_batched,
24372 #endif
24373 #endif /* PAGETABLE_LEVELS >= 3 */
24374
24375@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
24376 },
24377
24378 .set_fixmap = native_set_fixmap,
24379+
24380+#ifdef CONFIG_PAX_KERNEXEC
24381+ .pax_open_kernel = native_pax_open_kernel,
24382+ .pax_close_kernel = native_pax_close_kernel,
24383+#endif
24384+
24385 };
24386
24387 EXPORT_SYMBOL_GPL(pv_time_ops);
24388diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
24389index 299d493..2ccb0ee 100644
24390--- a/arch/x86/kernel/pci-calgary_64.c
24391+++ b/arch/x86/kernel/pci-calgary_64.c
24392@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
24393 tce_space = be64_to_cpu(readq(target));
24394 tce_space = tce_space & TAR_SW_BITS;
24395
24396- tce_space = tce_space & (~specified_table_size);
24397+ tce_space = tce_space & (~(unsigned long)specified_table_size);
24398 info->tce_space = (u64 *)__va(tce_space);
24399 }
24400 }
24401diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
24402index 35ccf75..7a15747 100644
24403--- a/arch/x86/kernel/pci-iommu_table.c
24404+++ b/arch/x86/kernel/pci-iommu_table.c
24405@@ -2,7 +2,7 @@
24406 #include <asm/iommu_table.h>
24407 #include <linux/string.h>
24408 #include <linux/kallsyms.h>
24409-
24410+#include <linux/sched.h>
24411
24412 #define DEBUG 1
24413
24414diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
24415index 6c483ba..d10ce2f 100644
24416--- a/arch/x86/kernel/pci-swiotlb.c
24417+++ b/arch/x86/kernel/pci-swiotlb.c
24418@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
24419 void *vaddr, dma_addr_t dma_addr,
24420 struct dma_attrs *attrs)
24421 {
24422- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
24423+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
24424 }
24425
24426 static struct dma_map_ops swiotlb_dma_ops = {
24427diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
24428index 81a5f5e..20f8b58 100644
24429--- a/arch/x86/kernel/process.c
24430+++ b/arch/x86/kernel/process.c
24431@@ -36,7 +36,8 @@
24432 * section. Since TSS's are completely CPU-local, we want them
24433 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
24434 */
24435-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
24436+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
24437+EXPORT_SYMBOL(init_tss);
24438
24439 #ifdef CONFIG_X86_64
24440 static DEFINE_PER_CPU(unsigned char, is_idle);
24441@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
24442 task_xstate_cachep =
24443 kmem_cache_create("task_xstate", xstate_size,
24444 __alignof__(union thread_xstate),
24445- SLAB_PANIC | SLAB_NOTRACK, NULL);
24446+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
24447 }
24448
24449 /*
24450@@ -105,7 +106,7 @@ void exit_thread(void)
24451 unsigned long *bp = t->io_bitmap_ptr;
24452
24453 if (bp) {
24454- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
24455+ struct tss_struct *tss = init_tss + get_cpu();
24456
24457 t->io_bitmap_ptr = NULL;
24458 clear_thread_flag(TIF_IO_BITMAP);
24459@@ -125,6 +126,9 @@ void flush_thread(void)
24460 {
24461 struct task_struct *tsk = current;
24462
24463+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
24464+ loadsegment(gs, 0);
24465+#endif
24466 flush_ptrace_hw_breakpoint(tsk);
24467 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
24468 drop_init_fpu(tsk);
24469@@ -271,7 +275,7 @@ static void __exit_idle(void)
24470 void exit_idle(void)
24471 {
24472 /* idle loop has pid 0 */
24473- if (current->pid)
24474+ if (task_pid_nr(current))
24475 return;
24476 __exit_idle();
24477 }
24478@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
24479 return ret;
24480 }
24481 #endif
24482-void stop_this_cpu(void *dummy)
24483+__noreturn void stop_this_cpu(void *dummy)
24484 {
24485 local_irq_disable();
24486 /*
24487@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
24488 }
24489 early_param("idle", idle_setup);
24490
24491-unsigned long arch_align_stack(unsigned long sp)
24492+#ifdef CONFIG_PAX_RANDKSTACK
24493+void pax_randomize_kstack(struct pt_regs *regs)
24494 {
24495- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
24496- sp -= get_random_int() % 8192;
24497- return sp & ~0xf;
24498-}
24499+ struct thread_struct *thread = &current->thread;
24500+ unsigned long time;
24501
24502-unsigned long arch_randomize_brk(struct mm_struct *mm)
24503-{
24504- unsigned long range_end = mm->brk + 0x02000000;
24505- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
24506-}
24507+ if (!randomize_va_space)
24508+ return;
24509+
24510+ if (v8086_mode(regs))
24511+ return;
24512
24513+ rdtscl(time);
24514+
24515+ /* P4 seems to return a 0 LSB, ignore it */
24516+#ifdef CONFIG_MPENTIUM4
24517+ time &= 0x3EUL;
24518+ time <<= 2;
24519+#elif defined(CONFIG_X86_64)
24520+ time &= 0xFUL;
24521+ time <<= 4;
24522+#else
24523+ time &= 0x1FUL;
24524+ time <<= 3;
24525+#endif
24526+
24527+ thread->sp0 ^= time;
24528+ load_sp0(init_tss + smp_processor_id(), thread);
24529+
24530+#ifdef CONFIG_X86_64
24531+ this_cpu_write(kernel_stack, thread->sp0);
24532+#endif
24533+}
24534+#endif
24535diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
24536index 7305f7d..22f73d6 100644
24537--- a/arch/x86/kernel/process_32.c
24538+++ b/arch/x86/kernel/process_32.c
24539@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
24540 unsigned long thread_saved_pc(struct task_struct *tsk)
24541 {
24542 return ((unsigned long *)tsk->thread.sp)[3];
24543+//XXX return tsk->thread.eip;
24544 }
24545
24546 void __show_regs(struct pt_regs *regs, int all)
24547@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
24548 unsigned long sp;
24549 unsigned short ss, gs;
24550
24551- if (user_mode_vm(regs)) {
24552+ if (user_mode(regs)) {
24553 sp = regs->sp;
24554 ss = regs->ss & 0xffff;
24555- gs = get_user_gs(regs);
24556 } else {
24557 sp = kernel_stack_pointer(regs);
24558 savesegment(ss, ss);
24559- savesegment(gs, gs);
24560 }
24561+ gs = get_user_gs(regs);
24562
24563 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
24564 (u16)regs->cs, regs->ip, regs->flags,
24565- smp_processor_id());
24566+ raw_smp_processor_id());
24567 print_symbol("EIP is at %s\n", regs->ip);
24568
24569 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
24570@@ -128,20 +128,21 @@ void release_thread(struct task_struct *dead_task)
24571 int copy_thread(unsigned long clone_flags, unsigned long sp,
24572 unsigned long arg, struct task_struct *p)
24573 {
24574- struct pt_regs *childregs = task_pt_regs(p);
24575+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
24576 struct task_struct *tsk;
24577 int err;
24578
24579 p->thread.sp = (unsigned long) childregs;
24580 p->thread.sp0 = (unsigned long) (childregs+1);
24581+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
24582
24583 if (unlikely(p->flags & PF_KTHREAD)) {
24584 /* kernel thread */
24585 memset(childregs, 0, sizeof(struct pt_regs));
24586 p->thread.ip = (unsigned long) ret_from_kernel_thread;
24587- task_user_gs(p) = __KERNEL_STACK_CANARY;
24588- childregs->ds = __USER_DS;
24589- childregs->es = __USER_DS;
24590+ savesegment(gs, childregs->gs);
24591+ childregs->ds = __KERNEL_DS;
24592+ childregs->es = __KERNEL_DS;
24593 childregs->fs = __KERNEL_PERCPU;
24594 childregs->bx = sp; /* function */
24595 childregs->bp = arg;
24596@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24597 struct thread_struct *prev = &prev_p->thread,
24598 *next = &next_p->thread;
24599 int cpu = smp_processor_id();
24600- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24601+ struct tss_struct *tss = init_tss + cpu;
24602 fpu_switch_t fpu;
24603
24604 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
24605@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24606 */
24607 lazy_save_gs(prev->gs);
24608
24609+#ifdef CONFIG_PAX_MEMORY_UDEREF
24610+ __set_fs(task_thread_info(next_p)->addr_limit);
24611+#endif
24612+
24613 /*
24614 * Load the per-thread Thread-Local Storage descriptor.
24615 */
24616@@ -302,6 +307,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24617 */
24618 arch_end_context_switch(next_p);
24619
24620+ this_cpu_write(current_task, next_p);
24621+ this_cpu_write(current_tinfo, &next_p->tinfo);
24622+
24623 /*
24624 * Restore %gs if needed (which is common)
24625 */
24626@@ -310,8 +318,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24627
24628 switch_fpu_finish(next_p, fpu);
24629
24630- this_cpu_write(current_task, next_p);
24631-
24632 return prev_p;
24633 }
24634
24635@@ -341,4 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
24636 } while (count++ < 16);
24637 return 0;
24638 }
24639-
24640diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
24641index 355ae06..560fbbe 100644
24642--- a/arch/x86/kernel/process_64.c
24643+++ b/arch/x86/kernel/process_64.c
24644@@ -151,10 +151,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
24645 struct pt_regs *childregs;
24646 struct task_struct *me = current;
24647
24648- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
24649+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
24650 childregs = task_pt_regs(p);
24651 p->thread.sp = (unsigned long) childregs;
24652 p->thread.usersp = me->thread.usersp;
24653+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
24654 set_tsk_thread_flag(p, TIF_FORK);
24655 p->fpu_counter = 0;
24656 p->thread.io_bitmap_ptr = NULL;
24657@@ -165,6 +166,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
24658 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
24659 savesegment(es, p->thread.es);
24660 savesegment(ds, p->thread.ds);
24661+ savesegment(ss, p->thread.ss);
24662+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
24663 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
24664
24665 if (unlikely(p->flags & PF_KTHREAD)) {
24666@@ -273,7 +276,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24667 struct thread_struct *prev = &prev_p->thread;
24668 struct thread_struct *next = &next_p->thread;
24669 int cpu = smp_processor_id();
24670- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24671+ struct tss_struct *tss = init_tss + cpu;
24672 unsigned fsindex, gsindex;
24673 fpu_switch_t fpu;
24674
24675@@ -296,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24676 if (unlikely(next->ds | prev->ds))
24677 loadsegment(ds, next->ds);
24678
24679+ savesegment(ss, prev->ss);
24680+ if (unlikely(next->ss != prev->ss))
24681+ loadsegment(ss, next->ss);
24682
24683 /* We must save %fs and %gs before load_TLS() because
24684 * %fs and %gs may be cleared by load_TLS().
24685@@ -355,10 +361,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
24686 prev->usersp = this_cpu_read(old_rsp);
24687 this_cpu_write(old_rsp, next->usersp);
24688 this_cpu_write(current_task, next_p);
24689+ this_cpu_write(current_tinfo, &next_p->tinfo);
24690
24691- this_cpu_write(kernel_stack,
24692- (unsigned long)task_stack_page(next_p) +
24693- THREAD_SIZE - KERNEL_STACK_OFFSET);
24694+ this_cpu_write(kernel_stack, next->sp0);
24695
24696 /*
24697 * Now maybe reload the debug registers and handle I/O bitmaps
24698@@ -427,12 +432,11 @@ unsigned long get_wchan(struct task_struct *p)
24699 if (!p || p == current || p->state == TASK_RUNNING)
24700 return 0;
24701 stack = (unsigned long)task_stack_page(p);
24702- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
24703+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
24704 return 0;
24705 fp = *(u64 *)(p->thread.sp);
24706 do {
24707- if (fp < (unsigned long)stack ||
24708- fp >= (unsigned long)stack+THREAD_SIZE)
24709+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
24710 return 0;
24711 ip = *(u64 *)(fp+8);
24712 if (!in_sched_functions(ip))
24713diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
24714index 29a8120..a50b5ee 100644
24715--- a/arch/x86/kernel/ptrace.c
24716+++ b/arch/x86/kernel/ptrace.c
24717@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
24718 {
24719 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
24720 unsigned long sp = (unsigned long)&regs->sp;
24721- struct thread_info *tinfo;
24722
24723- if (context == (sp & ~(THREAD_SIZE - 1)))
24724+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
24725 return sp;
24726
24727- tinfo = (struct thread_info *)context;
24728- if (tinfo->previous_esp)
24729- return tinfo->previous_esp;
24730+ sp = *(unsigned long *)context;
24731+ if (sp)
24732+ return sp;
24733
24734 return (unsigned long)regs;
24735 }
24736@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
24737 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
24738 {
24739 int i;
24740- int dr7 = 0;
24741+ unsigned long dr7 = 0;
24742 struct arch_hw_breakpoint *info;
24743
24744 for (i = 0; i < HBP_NUM; i++) {
24745@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
24746 unsigned long addr, unsigned long data)
24747 {
24748 int ret;
24749- unsigned long __user *datap = (unsigned long __user *)data;
24750+ unsigned long __user *datap = (__force unsigned long __user *)data;
24751
24752 switch (request) {
24753 /* read the word at location addr in the USER area. */
24754@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
24755 if ((int) addr < 0)
24756 return -EIO;
24757 ret = do_get_thread_area(child, addr,
24758- (struct user_desc __user *)data);
24759+ (__force struct user_desc __user *) data);
24760 break;
24761
24762 case PTRACE_SET_THREAD_AREA:
24763 if ((int) addr < 0)
24764 return -EIO;
24765 ret = do_set_thread_area(child, addr,
24766- (struct user_desc __user *)data, 0);
24767+ (__force struct user_desc __user *) data, 0);
24768 break;
24769 #endif
24770
24771@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
24772
24773 #ifdef CONFIG_X86_64
24774
24775-static struct user_regset x86_64_regsets[] __read_mostly = {
24776+static user_regset_no_const x86_64_regsets[] __read_only = {
24777 [REGSET_GENERAL] = {
24778 .core_note_type = NT_PRSTATUS,
24779 .n = sizeof(struct user_regs_struct) / sizeof(long),
24780@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
24781 #endif /* CONFIG_X86_64 */
24782
24783 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
24784-static struct user_regset x86_32_regsets[] __read_mostly = {
24785+static user_regset_no_const x86_32_regsets[] __read_only = {
24786 [REGSET_GENERAL] = {
24787 .core_note_type = NT_PRSTATUS,
24788 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
24789@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
24790 */
24791 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
24792
24793-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24794+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24795 {
24796 #ifdef CONFIG_X86_64
24797 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
24798@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
24799 memset(info, 0, sizeof(*info));
24800 info->si_signo = SIGTRAP;
24801 info->si_code = si_code;
24802- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
24803+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
24804 }
24805
24806 void user_single_step_siginfo(struct task_struct *tsk,
24807@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
24808 # define IS_IA32 0
24809 #endif
24810
24811+#ifdef CONFIG_GRKERNSEC_SETXID
24812+extern void gr_delayed_cred_worker(void);
24813+#endif
24814+
24815 /*
24816 * We must return the syscall number to actually look up in the table.
24817 * This can be -1L to skip running any syscall at all.
24818@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
24819
24820 user_exit();
24821
24822+#ifdef CONFIG_GRKERNSEC_SETXID
24823+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24824+ gr_delayed_cred_worker();
24825+#endif
24826+
24827 /*
24828 * If we stepped into a sysenter/syscall insn, it trapped in
24829 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
24830@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
24831 */
24832 user_exit();
24833
24834+#ifdef CONFIG_GRKERNSEC_SETXID
24835+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24836+ gr_delayed_cred_worker();
24837+#endif
24838+
24839 audit_syscall_exit(regs);
24840
24841 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
24842diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
24843index 2cb9470..ff1fd80 100644
24844--- a/arch/x86/kernel/pvclock.c
24845+++ b/arch/x86/kernel/pvclock.c
24846@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
24847 return pv_tsc_khz;
24848 }
24849
24850-static atomic64_t last_value = ATOMIC64_INIT(0);
24851+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
24852
24853 void pvclock_resume(void)
24854 {
24855- atomic64_set(&last_value, 0);
24856+ atomic64_set_unchecked(&last_value, 0);
24857 }
24858
24859 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
24860@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
24861 * updating at the same time, and one of them could be slightly behind,
24862 * making the assumption that last_value always go forward fail to hold.
24863 */
24864- last = atomic64_read(&last_value);
24865+ last = atomic64_read_unchecked(&last_value);
24866 do {
24867 if (ret < last)
24868 return last;
24869- last = atomic64_cmpxchg(&last_value, last, ret);
24870+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
24871 } while (unlikely(last != ret));
24872
24873 return ret;
24874diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
24875index 76fa1e9..abf09ea 100644
24876--- a/arch/x86/kernel/reboot.c
24877+++ b/arch/x86/kernel/reboot.c
24878@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
24879 EXPORT_SYMBOL(pm_power_off);
24880
24881 static const struct desc_ptr no_idt = {};
24882-static int reboot_mode;
24883+static unsigned short reboot_mode;
24884 enum reboot_type reboot_type = BOOT_ACPI;
24885 int reboot_force;
24886
24887@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
24888
24889 void __noreturn machine_real_restart(unsigned int type)
24890 {
24891+
24892+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
24893+ struct desc_struct *gdt;
24894+#endif
24895+
24896 local_irq_disable();
24897
24898 /*
24899@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
24900
24901 /* Jump to the identity-mapped low memory code */
24902 #ifdef CONFIG_X86_32
24903- asm volatile("jmpl *%0" : :
24904+
24905+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
24906+ gdt = get_cpu_gdt_table(smp_processor_id());
24907+ pax_open_kernel();
24908+#ifdef CONFIG_PAX_MEMORY_UDEREF
24909+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
24910+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
24911+ loadsegment(ds, __KERNEL_DS);
24912+ loadsegment(es, __KERNEL_DS);
24913+ loadsegment(ss, __KERNEL_DS);
24914+#endif
24915+#ifdef CONFIG_PAX_KERNEXEC
24916+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
24917+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
24918+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
24919+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
24920+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
24921+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
24922+#endif
24923+ pax_close_kernel();
24924+#endif
24925+
24926+ asm volatile("ljmpl *%0" : :
24927 "rm" (real_mode_header->machine_real_restart_asm),
24928 "a" (type));
24929 #else
24930@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
24931 * try to force a triple fault and then cycle between hitting the keyboard
24932 * controller and doing that
24933 */
24934-static void native_machine_emergency_restart(void)
24935+static void __noreturn native_machine_emergency_restart(void)
24936 {
24937 int i;
24938 int attempt = 0;
24939@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
24940 #endif
24941 }
24942
24943-static void __machine_emergency_restart(int emergency)
24944+static void __noreturn __machine_emergency_restart(int emergency)
24945 {
24946 reboot_emergency = emergency;
24947 machine_ops.emergency_restart();
24948 }
24949
24950-static void native_machine_restart(char *__unused)
24951+static void __noreturn native_machine_restart(char *__unused)
24952 {
24953 pr_notice("machine restart\n");
24954
24955@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
24956 __machine_emergency_restart(0);
24957 }
24958
24959-static void native_machine_halt(void)
24960+static void __noreturn native_machine_halt(void)
24961 {
24962 /* Stop other cpus and apics */
24963 machine_shutdown();
24964@@ -679,7 +706,7 @@ static void native_machine_halt(void)
24965 stop_this_cpu(NULL);
24966 }
24967
24968-static void native_machine_power_off(void)
24969+static void __noreturn native_machine_power_off(void)
24970 {
24971 if (pm_power_off) {
24972 if (!reboot_force)
24973@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
24974 }
24975 /* A fallback in case there is no PM info available */
24976 tboot_shutdown(TB_SHUTDOWN_HALT);
24977+ unreachable();
24978 }
24979
24980-struct machine_ops machine_ops = {
24981+struct machine_ops machine_ops __read_only = {
24982 .power_off = native_machine_power_off,
24983 .shutdown = native_machine_shutdown,
24984 .emergency_restart = native_machine_emergency_restart,
24985diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
24986index c8e41e9..64049ef 100644
24987--- a/arch/x86/kernel/reboot_fixups_32.c
24988+++ b/arch/x86/kernel/reboot_fixups_32.c
24989@@ -57,7 +57,7 @@ struct device_fixup {
24990 unsigned int vendor;
24991 unsigned int device;
24992 void (*reboot_fixup)(struct pci_dev *);
24993-};
24994+} __do_const;
24995
24996 /*
24997 * PCI ids solely used for fixups_table go here
24998diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
24999index f2bb9c9..bed145d7 100644
25000--- a/arch/x86/kernel/relocate_kernel_64.S
25001+++ b/arch/x86/kernel/relocate_kernel_64.S
25002@@ -11,6 +11,7 @@
25003 #include <asm/kexec.h>
25004 #include <asm/processor-flags.h>
25005 #include <asm/pgtable_types.h>
25006+#include <asm/alternative-asm.h>
25007
25008 /*
25009 * Must be relocatable PIC code callable as a C function
25010@@ -167,6 +168,7 @@ identity_mapped:
25011 xorq %r14, %r14
25012 xorq %r15, %r15
25013
25014+ pax_force_retaddr 0, 1
25015 ret
25016
25017 1:
25018diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
25019index 56f7fcf..2cfe4f1 100644
25020--- a/arch/x86/kernel/setup.c
25021+++ b/arch/x86/kernel/setup.c
25022@@ -110,6 +110,7 @@
25023 #include <asm/mce.h>
25024 #include <asm/alternative.h>
25025 #include <asm/prom.h>
25026+#include <asm/boot.h>
25027
25028 /*
25029 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
25030@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
25031 #endif
25032
25033
25034-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
25035-unsigned long mmu_cr4_features;
25036+#ifdef CONFIG_X86_64
25037+unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
25038+#elif defined(CONFIG_X86_PAE)
25039+unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
25040 #else
25041-unsigned long mmu_cr4_features = X86_CR4_PAE;
25042+unsigned long mmu_cr4_features __read_only;
25043 #endif
25044
25045+void set_in_cr4(unsigned long mask)
25046+{
25047+ unsigned long cr4 = read_cr4();
25048+
25049+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
25050+ return;
25051+
25052+ pax_open_kernel();
25053+ mmu_cr4_features |= mask;
25054+ pax_close_kernel();
25055+
25056+ if (trampoline_cr4_features)
25057+ *trampoline_cr4_features = mmu_cr4_features;
25058+ cr4 |= mask;
25059+ write_cr4(cr4);
25060+}
25061+EXPORT_SYMBOL(set_in_cr4);
25062+
25063+void clear_in_cr4(unsigned long mask)
25064+{
25065+ unsigned long cr4 = read_cr4();
25066+
25067+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
25068+ return;
25069+
25070+ pax_open_kernel();
25071+ mmu_cr4_features &= ~mask;
25072+ pax_close_kernel();
25073+
25074+ if (trampoline_cr4_features)
25075+ *trampoline_cr4_features = mmu_cr4_features;
25076+ cr4 &= ~mask;
25077+ write_cr4(cr4);
25078+}
25079+EXPORT_SYMBOL(clear_in_cr4);
25080+
25081 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
25082 int bootloader_type, bootloader_version;
25083
25084@@ -444,7 +483,7 @@ static void __init parse_setup_data(void)
25085
25086 switch (data->type) {
25087 case SETUP_E820_EXT:
25088- parse_e820_ext(data);
25089+ parse_e820_ext((struct setup_data __force_kernel *)data);
25090 break;
25091 case SETUP_DTB:
25092 add_dtb(pa_data);
25093@@ -771,7 +810,7 @@ static void __init trim_bios_range(void)
25094 * area (640->1Mb) as ram even though it is not.
25095 * take them out.
25096 */
25097- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
25098+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
25099
25100 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
25101 }
25102@@ -779,7 +818,7 @@ static void __init trim_bios_range(void)
25103 /* called before trim_bios_range() to spare extra sanitize */
25104 static void __init e820_add_kernel_range(void)
25105 {
25106- u64 start = __pa_symbol(_text);
25107+ u64 start = __pa_symbol(ktla_ktva(_text));
25108 u64 size = __pa_symbol(_end) - start;
25109
25110 /*
25111@@ -841,8 +880,12 @@ static void __init trim_low_memory_range(void)
25112
25113 void __init setup_arch(char **cmdline_p)
25114 {
25115+#ifdef CONFIG_X86_32
25116+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
25117+#else
25118 memblock_reserve(__pa_symbol(_text),
25119 (unsigned long)__bss_stop - (unsigned long)_text);
25120+#endif
25121
25122 early_reserve_initrd();
25123
25124@@ -934,14 +977,14 @@ void __init setup_arch(char **cmdline_p)
25125
25126 if (!boot_params.hdr.root_flags)
25127 root_mountflags &= ~MS_RDONLY;
25128- init_mm.start_code = (unsigned long) _text;
25129- init_mm.end_code = (unsigned long) _etext;
25130+ init_mm.start_code = ktla_ktva((unsigned long) _text);
25131+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
25132 init_mm.end_data = (unsigned long) _edata;
25133 init_mm.brk = _brk_end;
25134
25135- code_resource.start = __pa_symbol(_text);
25136- code_resource.end = __pa_symbol(_etext)-1;
25137- data_resource.start = __pa_symbol(_etext);
25138+ code_resource.start = __pa_symbol(ktla_ktva(_text));
25139+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
25140+ data_resource.start = __pa_symbol(_sdata);
25141 data_resource.end = __pa_symbol(_edata)-1;
25142 bss_resource.start = __pa_symbol(__bss_start);
25143 bss_resource.end = __pa_symbol(__bss_stop)-1;
25144diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
25145index 5cdff03..80fa283 100644
25146--- a/arch/x86/kernel/setup_percpu.c
25147+++ b/arch/x86/kernel/setup_percpu.c
25148@@ -21,19 +21,17 @@
25149 #include <asm/cpu.h>
25150 #include <asm/stackprotector.h>
25151
25152-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
25153+#ifdef CONFIG_SMP
25154+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
25155 EXPORT_PER_CPU_SYMBOL(cpu_number);
25156+#endif
25157
25158-#ifdef CONFIG_X86_64
25159 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
25160-#else
25161-#define BOOT_PERCPU_OFFSET 0
25162-#endif
25163
25164 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
25165 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
25166
25167-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
25168+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
25169 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
25170 };
25171 EXPORT_SYMBOL(__per_cpu_offset);
25172@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
25173 {
25174 #ifdef CONFIG_NEED_MULTIPLE_NODES
25175 pg_data_t *last = NULL;
25176- unsigned int cpu;
25177+ int cpu;
25178
25179 for_each_possible_cpu(cpu) {
25180 int node = early_cpu_to_node(cpu);
25181@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
25182 {
25183 #ifdef CONFIG_X86_32
25184 struct desc_struct gdt;
25185+ unsigned long base = per_cpu_offset(cpu);
25186
25187- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
25188- 0x2 | DESCTYPE_S, 0x8);
25189- gdt.s = 1;
25190+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
25191+ 0x83 | DESCTYPE_S, 0xC);
25192 write_gdt_entry(get_cpu_gdt_table(cpu),
25193 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
25194 #endif
25195@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
25196 /* alrighty, percpu areas up and running */
25197 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
25198 for_each_possible_cpu(cpu) {
25199+#ifdef CONFIG_CC_STACKPROTECTOR
25200+#ifdef CONFIG_X86_32
25201+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
25202+#endif
25203+#endif
25204 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
25205 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
25206 per_cpu(cpu_number, cpu) = cpu;
25207@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
25208 */
25209 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
25210 #endif
25211+#ifdef CONFIG_CC_STACKPROTECTOR
25212+#ifdef CONFIG_X86_32
25213+ if (!cpu)
25214+ per_cpu(stack_canary.canary, cpu) = canary;
25215+#endif
25216+#endif
25217 /*
25218 * Up to this point, the boot CPU has been using .init.data
25219 * area. Reload any changed state for the boot CPU.
25220diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
25221index 6956299..18126ec4 100644
25222--- a/arch/x86/kernel/signal.c
25223+++ b/arch/x86/kernel/signal.c
25224@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
25225 * Align the stack pointer according to the i386 ABI,
25226 * i.e. so that on function entry ((sp + 4) & 15) == 0.
25227 */
25228- sp = ((sp + 4) & -16ul) - 4;
25229+ sp = ((sp - 12) & -16ul) - 4;
25230 #else /* !CONFIG_X86_32 */
25231 sp = round_down(sp, 16) - 8;
25232 #endif
25233@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
25234 }
25235
25236 if (current->mm->context.vdso)
25237- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
25238+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
25239 else
25240- restorer = &frame->retcode;
25241+ restorer = (void __user *)&frame->retcode;
25242 if (ksig->ka.sa.sa_flags & SA_RESTORER)
25243 restorer = ksig->ka.sa.sa_restorer;
25244
25245@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
25246 * reasons and because gdb uses it as a signature to notice
25247 * signal handler stack frames.
25248 */
25249- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
25250+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
25251
25252 if (err)
25253 return -EFAULT;
25254@@ -364,10 +364,13 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25255 else
25256 put_user_ex(0, &frame->uc.uc_flags);
25257 put_user_ex(0, &frame->uc.uc_link);
25258- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
25259+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
25260
25261 /* Set up to return from userspace. */
25262- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
25263+ if (current->mm->context.vdso)
25264+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
25265+ else
25266+ restorer = (void __user *)&frame->retcode;
25267 if (ksig->ka.sa.sa_flags & SA_RESTORER)
25268 restorer = ksig->ka.sa.sa_restorer;
25269 put_user_ex(restorer, &frame->pretcode);
25270@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25271 * reasons and because gdb uses it as a signature to notice
25272 * signal handler stack frames.
25273 */
25274- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
25275+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
25276 } put_user_catch(err);
25277
25278 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
25279@@ -429,7 +432,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
25280 else
25281 put_user_ex(0, &frame->uc.uc_flags);
25282 put_user_ex(0, &frame->uc.uc_link);
25283- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
25284+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
25285
25286 /* Set up to return from userspace. If provided, use a stub
25287 already in userspace. */
25288@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
25289 {
25290 int usig = signr_convert(ksig->sig);
25291 sigset_t *set = sigmask_to_save();
25292- compat_sigset_t *cset = (compat_sigset_t *) set;
25293+ sigset_t sigcopy;
25294+ compat_sigset_t *cset;
25295+
25296+ sigcopy = *set;
25297+
25298+ cset = (compat_sigset_t *) &sigcopy;
25299
25300 /* Set up the stack frame */
25301 if (is_ia32_frame()) {
25302@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
25303 } else if (is_x32_frame()) {
25304 return x32_setup_rt_frame(ksig, cset, regs);
25305 } else {
25306- return __setup_rt_frame(ksig->sig, ksig, set, regs);
25307+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
25308 }
25309 }
25310
25311diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
25312index 48d2b7d..90d328a 100644
25313--- a/arch/x86/kernel/smp.c
25314+++ b/arch/x86/kernel/smp.c
25315@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
25316
25317 __setup("nonmi_ipi", nonmi_ipi_setup);
25318
25319-struct smp_ops smp_ops = {
25320+struct smp_ops smp_ops __read_only = {
25321 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
25322 .smp_prepare_cpus = native_smp_prepare_cpus,
25323 .smp_cpus_done = native_smp_cpus_done,
25324diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
25325index bfd348e..914f323 100644
25326--- a/arch/x86/kernel/smpboot.c
25327+++ b/arch/x86/kernel/smpboot.c
25328@@ -251,14 +251,18 @@ notrace static void __cpuinit start_secondary(void *unused)
25329
25330 enable_start_cpu0 = 0;
25331
25332-#ifdef CONFIG_X86_32
25333- /* switch away from the initial page table */
25334- load_cr3(swapper_pg_dir);
25335- __flush_tlb_all();
25336-#endif
25337-
25338 /* otherwise gcc will move up smp_processor_id before the cpu_init */
25339 barrier();
25340+
25341+ /* switch away from the initial page table */
25342+#ifdef CONFIG_PAX_PER_CPU_PGD
25343+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
25344+ __flush_tlb_all();
25345+#elif defined(CONFIG_X86_32)
25346+ load_cr3(swapper_pg_dir);
25347+ __flush_tlb_all();
25348+#endif
25349+
25350 /*
25351 * Check TSC synchronization with the BP:
25352 */
25353@@ -748,6 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
25354 idle->thread.sp = (unsigned long) (((struct pt_regs *)
25355 (THREAD_SIZE + task_stack_page(idle))) - 1);
25356 per_cpu(current_task, cpu) = idle;
25357+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
25358
25359 #ifdef CONFIG_X86_32
25360 /* Stack for startup_32 can be just as for start_secondary onwards */
25361@@ -755,11 +760,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
25362 #else
25363 clear_tsk_thread_flag(idle, TIF_FORK);
25364 initial_gs = per_cpu_offset(cpu);
25365- per_cpu(kernel_stack, cpu) =
25366- (unsigned long)task_stack_page(idle) -
25367- KERNEL_STACK_OFFSET + THREAD_SIZE;
25368+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
25369 #endif
25370+
25371+ pax_open_kernel();
25372 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
25373+ pax_close_kernel();
25374+
25375 initial_code = (unsigned long)start_secondary;
25376 stack_start = idle->thread.sp;
25377
25378@@ -908,6 +915,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
25379 /* the FPU context is blank, nobody can own it */
25380 __cpu_disable_lazy_restore(cpu);
25381
25382+#ifdef CONFIG_PAX_PER_CPU_PGD
25383+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
25384+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25385+ KERNEL_PGD_PTRS);
25386+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
25387+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25388+ KERNEL_PGD_PTRS);
25389+#endif
25390+
25391 err = do_boot_cpu(apicid, cpu, tidle);
25392 if (err) {
25393 pr_debug("do_boot_cpu failed %d\n", err);
25394diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
25395index 9b4d51d..5d28b58 100644
25396--- a/arch/x86/kernel/step.c
25397+++ b/arch/x86/kernel/step.c
25398@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
25399 struct desc_struct *desc;
25400 unsigned long base;
25401
25402- seg &= ~7UL;
25403+ seg >>= 3;
25404
25405 mutex_lock(&child->mm->context.lock);
25406- if (unlikely((seg >> 3) >= child->mm->context.size))
25407+ if (unlikely(seg >= child->mm->context.size))
25408 addr = -1L; /* bogus selector, access would fault */
25409 else {
25410 desc = child->mm->context.ldt + seg;
25411@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
25412 addr += base;
25413 }
25414 mutex_unlock(&child->mm->context.lock);
25415- }
25416+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
25417+ addr = ktla_ktva(addr);
25418
25419 return addr;
25420 }
25421@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
25422 unsigned char opcode[15];
25423 unsigned long addr = convert_ip_to_linear(child, regs);
25424
25425+ if (addr == -EINVAL)
25426+ return 0;
25427+
25428 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
25429 for (i = 0; i < copied; i++) {
25430 switch (opcode[i]) {
25431diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
25432new file mode 100644
25433index 0000000..5877189
25434--- /dev/null
25435+++ b/arch/x86/kernel/sys_i386_32.c
25436@@ -0,0 +1,189 @@
25437+/*
25438+ * This file contains various random system calls that
25439+ * have a non-standard calling sequence on the Linux/i386
25440+ * platform.
25441+ */
25442+
25443+#include <linux/errno.h>
25444+#include <linux/sched.h>
25445+#include <linux/mm.h>
25446+#include <linux/fs.h>
25447+#include <linux/smp.h>
25448+#include <linux/sem.h>
25449+#include <linux/msg.h>
25450+#include <linux/shm.h>
25451+#include <linux/stat.h>
25452+#include <linux/syscalls.h>
25453+#include <linux/mman.h>
25454+#include <linux/file.h>
25455+#include <linux/utsname.h>
25456+#include <linux/ipc.h>
25457+#include <linux/elf.h>
25458+
25459+#include <linux/uaccess.h>
25460+#include <linux/unistd.h>
25461+
25462+#include <asm/syscalls.h>
25463+
25464+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
25465+{
25466+ unsigned long pax_task_size = TASK_SIZE;
25467+
25468+#ifdef CONFIG_PAX_SEGMEXEC
25469+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
25470+ pax_task_size = SEGMEXEC_TASK_SIZE;
25471+#endif
25472+
25473+ if (flags & MAP_FIXED)
25474+ if (len > pax_task_size || addr > pax_task_size - len)
25475+ return -EINVAL;
25476+
25477+ return 0;
25478+}
25479+
25480+/*
25481+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
25482+ */
25483+static unsigned long get_align_mask(void)
25484+{
25485+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
25486+ return 0;
25487+
25488+ if (!(current->flags & PF_RANDOMIZE))
25489+ return 0;
25490+
25491+ return va_align.mask;
25492+}
25493+
25494+unsigned long
25495+arch_get_unmapped_area(struct file *filp, unsigned long addr,
25496+ unsigned long len, unsigned long pgoff, unsigned long flags)
25497+{
25498+ struct mm_struct *mm = current->mm;
25499+ struct vm_area_struct *vma;
25500+ unsigned long pax_task_size = TASK_SIZE;
25501+ struct vm_unmapped_area_info info;
25502+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25503+
25504+#ifdef CONFIG_PAX_SEGMEXEC
25505+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25506+ pax_task_size = SEGMEXEC_TASK_SIZE;
25507+#endif
25508+
25509+ pax_task_size -= PAGE_SIZE;
25510+
25511+ if (len > pax_task_size)
25512+ return -ENOMEM;
25513+
25514+ if (flags & MAP_FIXED)
25515+ return addr;
25516+
25517+#ifdef CONFIG_PAX_RANDMMAP
25518+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25519+#endif
25520+
25521+ if (addr) {
25522+ addr = PAGE_ALIGN(addr);
25523+ if (pax_task_size - len >= addr) {
25524+ vma = find_vma(mm, addr);
25525+ if (check_heap_stack_gap(vma, addr, len, offset))
25526+ return addr;
25527+ }
25528+ }
25529+
25530+ info.flags = 0;
25531+ info.length = len;
25532+ info.align_mask = filp ? get_align_mask() : 0;
25533+ info.align_offset = pgoff << PAGE_SHIFT;
25534+ info.threadstack_offset = offset;
25535+
25536+#ifdef CONFIG_PAX_PAGEEXEC
25537+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
25538+ info.low_limit = 0x00110000UL;
25539+ info.high_limit = mm->start_code;
25540+
25541+#ifdef CONFIG_PAX_RANDMMAP
25542+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25543+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
25544+#endif
25545+
25546+ if (info.low_limit < info.high_limit) {
25547+ addr = vm_unmapped_area(&info);
25548+ if (!IS_ERR_VALUE(addr))
25549+ return addr;
25550+ }
25551+ } else
25552+#endif
25553+
25554+ info.low_limit = mm->mmap_base;
25555+ info.high_limit = pax_task_size;
25556+
25557+ return vm_unmapped_area(&info);
25558+}
25559+
25560+unsigned long
25561+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25562+ const unsigned long len, const unsigned long pgoff,
25563+ const unsigned long flags)
25564+{
25565+ struct vm_area_struct *vma;
25566+ struct mm_struct *mm = current->mm;
25567+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
25568+ struct vm_unmapped_area_info info;
25569+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25570+
25571+#ifdef CONFIG_PAX_SEGMEXEC
25572+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25573+ pax_task_size = SEGMEXEC_TASK_SIZE;
25574+#endif
25575+
25576+ pax_task_size -= PAGE_SIZE;
25577+
25578+ /* requested length too big for entire address space */
25579+ if (len > pax_task_size)
25580+ return -ENOMEM;
25581+
25582+ if (flags & MAP_FIXED)
25583+ return addr;
25584+
25585+#ifdef CONFIG_PAX_PAGEEXEC
25586+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
25587+ goto bottomup;
25588+#endif
25589+
25590+#ifdef CONFIG_PAX_RANDMMAP
25591+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25592+#endif
25593+
25594+ /* requesting a specific address */
25595+ if (addr) {
25596+ addr = PAGE_ALIGN(addr);
25597+ if (pax_task_size - len >= addr) {
25598+ vma = find_vma(mm, addr);
25599+ if (check_heap_stack_gap(vma, addr, len, offset))
25600+ return addr;
25601+ }
25602+ }
25603+
25604+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
25605+ info.length = len;
25606+ info.low_limit = PAGE_SIZE;
25607+ info.high_limit = mm->mmap_base;
25608+ info.align_mask = filp ? get_align_mask() : 0;
25609+ info.align_offset = pgoff << PAGE_SHIFT;
25610+ info.threadstack_offset = offset;
25611+
25612+ addr = vm_unmapped_area(&info);
25613+ if (!(addr & ~PAGE_MASK))
25614+ return addr;
25615+ VM_BUG_ON(addr != -ENOMEM);
25616+
25617+bottomup:
25618+ /*
25619+ * A failed mmap() very likely causes application failure,
25620+ * so fall back to the bottom-up function here. This scenario
25621+ * can happen with large stack limits and large mmap()
25622+ * allocations.
25623+ */
25624+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
25625+}
25626diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
25627index 48f8375..ace2781 100644
25628--- a/arch/x86/kernel/sys_x86_64.c
25629+++ b/arch/x86/kernel/sys_x86_64.c
25630@@ -81,8 +81,8 @@ out:
25631 return error;
25632 }
25633
25634-static void find_start_end(unsigned long flags, unsigned long *begin,
25635- unsigned long *end)
25636+static void find_start_end(struct mm_struct *mm, unsigned long flags,
25637+ unsigned long *begin, unsigned long *end)
25638 {
25639 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
25640 unsigned long new_begin;
25641@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
25642 *begin = new_begin;
25643 }
25644 } else {
25645- *begin = mmap_legacy_base();
25646+ *begin = mm->mmap_base;
25647 *end = TASK_SIZE;
25648 }
25649 }
25650@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
25651 struct vm_area_struct *vma;
25652 struct vm_unmapped_area_info info;
25653 unsigned long begin, end;
25654+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25655
25656 if (flags & MAP_FIXED)
25657 return addr;
25658
25659- find_start_end(flags, &begin, &end);
25660+ find_start_end(mm, flags, &begin, &end);
25661
25662 if (len > end)
25663 return -ENOMEM;
25664
25665+#ifdef CONFIG_PAX_RANDMMAP
25666+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25667+#endif
25668+
25669 if (addr) {
25670 addr = PAGE_ALIGN(addr);
25671 vma = find_vma(mm, addr);
25672- if (end - len >= addr &&
25673- (!vma || addr + len <= vma->vm_start))
25674+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
25675 return addr;
25676 }
25677
25678@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
25679 info.high_limit = end;
25680 info.align_mask = filp ? get_align_mask() : 0;
25681 info.align_offset = pgoff << PAGE_SHIFT;
25682+ info.threadstack_offset = offset;
25683 return vm_unmapped_area(&info);
25684 }
25685
25686@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25687 struct mm_struct *mm = current->mm;
25688 unsigned long addr = addr0;
25689 struct vm_unmapped_area_info info;
25690+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
25691
25692 /* requested length too big for entire address space */
25693 if (len > TASK_SIZE)
25694@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25695 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
25696 goto bottomup;
25697
25698+#ifdef CONFIG_PAX_RANDMMAP
25699+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
25700+#endif
25701+
25702 /* requesting a specific address */
25703 if (addr) {
25704 addr = PAGE_ALIGN(addr);
25705 vma = find_vma(mm, addr);
25706- if (TASK_SIZE - len >= addr &&
25707- (!vma || addr + len <= vma->vm_start))
25708+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
25709 return addr;
25710 }
25711
25712@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
25713 info.high_limit = mm->mmap_base;
25714 info.align_mask = filp ? get_align_mask() : 0;
25715 info.align_offset = pgoff << PAGE_SHIFT;
25716+ info.threadstack_offset = offset;
25717 addr = vm_unmapped_area(&info);
25718 if (!(addr & ~PAGE_MASK))
25719 return addr;
25720diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
25721index f84fe00..f41d9f1 100644
25722--- a/arch/x86/kernel/tboot.c
25723+++ b/arch/x86/kernel/tboot.c
25724@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
25725
25726 void tboot_shutdown(u32 shutdown_type)
25727 {
25728- void (*shutdown)(void);
25729+ void (* __noreturn shutdown)(void);
25730
25731 if (!tboot_enabled())
25732 return;
25733@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
25734
25735 switch_to_tboot_pt();
25736
25737- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
25738+ shutdown = (void *)tboot->shutdown_entry;
25739 shutdown();
25740
25741 /* should not reach here */
25742@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
25743 return 0;
25744 }
25745
25746-static atomic_t ap_wfs_count;
25747+static atomic_unchecked_t ap_wfs_count;
25748
25749 static int tboot_wait_for_aps(int num_aps)
25750 {
25751@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
25752 {
25753 switch (action) {
25754 case CPU_DYING:
25755- atomic_inc(&ap_wfs_count);
25756+ atomic_inc_unchecked(&ap_wfs_count);
25757 if (num_online_cpus() == 1)
25758- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
25759+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
25760 return NOTIFY_BAD;
25761 break;
25762 }
25763 return NOTIFY_OK;
25764 }
25765
25766-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
25767+static struct notifier_block tboot_cpu_notifier =
25768 {
25769 .notifier_call = tboot_cpu_callback,
25770 };
25771@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
25772
25773 tboot_create_trampoline();
25774
25775- atomic_set(&ap_wfs_count, 0);
25776+ atomic_set_unchecked(&ap_wfs_count, 0);
25777 register_hotcpu_notifier(&tboot_cpu_notifier);
25778
25779 acpi_os_set_prepare_sleep(&tboot_sleep);
25780diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
25781index 24d3c91..d06b473 100644
25782--- a/arch/x86/kernel/time.c
25783+++ b/arch/x86/kernel/time.c
25784@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
25785 {
25786 unsigned long pc = instruction_pointer(regs);
25787
25788- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
25789+ if (!user_mode(regs) && in_lock_functions(pc)) {
25790 #ifdef CONFIG_FRAME_POINTER
25791- return *(unsigned long *)(regs->bp + sizeof(long));
25792+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
25793 #else
25794 unsigned long *sp =
25795 (unsigned long *)kernel_stack_pointer(regs);
25796@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
25797 * or above a saved flags. Eflags has bits 22-31 zero,
25798 * kernel addresses don't.
25799 */
25800+
25801+#ifdef CONFIG_PAX_KERNEXEC
25802+ return ktla_ktva(sp[0]);
25803+#else
25804 if (sp[0] >> 22)
25805 return sp[0];
25806 if (sp[1] >> 22)
25807 return sp[1];
25808 #endif
25809+
25810+#endif
25811 }
25812 return pc;
25813 }
25814diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
25815index f7fec09..9991981 100644
25816--- a/arch/x86/kernel/tls.c
25817+++ b/arch/x86/kernel/tls.c
25818@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
25819 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
25820 return -EINVAL;
25821
25822+#ifdef CONFIG_PAX_SEGMEXEC
25823+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
25824+ return -EINVAL;
25825+#endif
25826+
25827 set_tls_desc(p, idx, &info, 1);
25828
25829 return 0;
25830@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
25831
25832 if (kbuf)
25833 info = kbuf;
25834- else if (__copy_from_user(infobuf, ubuf, count))
25835+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
25836 return -EFAULT;
25837 else
25838 info = infobuf;
25839diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
25840index 772e2a8..bad5bf6 100644
25841--- a/arch/x86/kernel/traps.c
25842+++ b/arch/x86/kernel/traps.c
25843@@ -68,12 +68,6 @@
25844 #include <asm/setup.h>
25845
25846 asmlinkage int system_call(void);
25847-
25848-/*
25849- * The IDT has to be page-aligned to simplify the Pentium
25850- * F0 0F bug workaround.
25851- */
25852-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
25853 #endif
25854
25855 DECLARE_BITMAP(used_vectors, NR_VECTORS);
25856@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
25857 }
25858
25859 static int __kprobes
25860-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25861+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
25862 struct pt_regs *regs, long error_code)
25863 {
25864 #ifdef CONFIG_X86_32
25865- if (regs->flags & X86_VM_MASK) {
25866+ if (v8086_mode(regs)) {
25867 /*
25868 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
25869 * On nmi (interrupt 2), do_trap should not be called.
25870@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25871 return -1;
25872 }
25873 #endif
25874- if (!user_mode(regs)) {
25875+ if (!user_mode_novm(regs)) {
25876 if (!fixup_exception(regs)) {
25877 tsk->thread.error_code = error_code;
25878 tsk->thread.trap_nr = trapnr;
25879+
25880+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25881+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
25882+ str = "PAX: suspicious stack segment fault";
25883+#endif
25884+
25885 die(str, regs, error_code);
25886 }
25887+
25888+#ifdef CONFIG_PAX_REFCOUNT
25889+ if (trapnr == 4)
25890+ pax_report_refcount_overflow(regs);
25891+#endif
25892+
25893 return 0;
25894 }
25895
25896@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25897 }
25898
25899 static void __kprobes
25900-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25901+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
25902 long error_code, siginfo_t *info)
25903 {
25904 struct task_struct *tsk = current;
25905@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25906 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
25907 printk_ratelimit()) {
25908 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
25909- tsk->comm, tsk->pid, str,
25910+ tsk->comm, task_pid_nr(tsk), str,
25911 regs->ip, regs->sp, error_code);
25912 print_vma_addr(" in ", regs->ip);
25913 pr_cont("\n");
25914@@ -273,7 +279,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
25915 conditional_sti(regs);
25916
25917 #ifdef CONFIG_X86_32
25918- if (regs->flags & X86_VM_MASK) {
25919+ if (v8086_mode(regs)) {
25920 local_irq_enable();
25921 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
25922 goto exit;
25923@@ -281,18 +287,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
25924 #endif
25925
25926 tsk = current;
25927- if (!user_mode(regs)) {
25928+ if (!user_mode_novm(regs)) {
25929 if (fixup_exception(regs))
25930 goto exit;
25931
25932 tsk->thread.error_code = error_code;
25933 tsk->thread.trap_nr = X86_TRAP_GP;
25934 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
25935- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
25936+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
25937+
25938+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25939+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
25940+ die("PAX: suspicious general protection fault", regs, error_code);
25941+ else
25942+#endif
25943+
25944 die("general protection fault", regs, error_code);
25945+ }
25946 goto exit;
25947 }
25948
25949+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25950+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
25951+ struct mm_struct *mm = tsk->mm;
25952+ unsigned long limit;
25953+
25954+ down_write(&mm->mmap_sem);
25955+ limit = mm->context.user_cs_limit;
25956+ if (limit < TASK_SIZE) {
25957+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
25958+ up_write(&mm->mmap_sem);
25959+ return;
25960+ }
25961+ up_write(&mm->mmap_sem);
25962+ }
25963+#endif
25964+
25965 tsk->thread.error_code = error_code;
25966 tsk->thread.trap_nr = X86_TRAP_GP;
25967
25968@@ -450,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25969 /* It's safe to allow irq's after DR6 has been saved */
25970 preempt_conditional_sti(regs);
25971
25972- if (regs->flags & X86_VM_MASK) {
25973+ if (v8086_mode(regs)) {
25974 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
25975 X86_TRAP_DB);
25976 preempt_conditional_cli(regs);
25977@@ -465,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25978 * We already checked v86 mode above, so we can check for kernel mode
25979 * by just checking the CPL of CS.
25980 */
25981- if ((dr6 & DR_STEP) && !user_mode(regs)) {
25982+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
25983 tsk->thread.debugreg6 &= ~DR_STEP;
25984 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
25985 regs->flags &= ~X86_EFLAGS_TF;
25986@@ -497,7 +527,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
25987 return;
25988 conditional_sti(regs);
25989
25990- if (!user_mode_vm(regs))
25991+ if (!user_mode(regs))
25992 {
25993 if (!fixup_exception(regs)) {
25994 task->thread.error_code = error_code;
25995diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
25996index 2ed8459..7cf329f 100644
25997--- a/arch/x86/kernel/uprobes.c
25998+++ b/arch/x86/kernel/uprobes.c
25999@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
26000 int ret = NOTIFY_DONE;
26001
26002 /* We are only interested in userspace traps */
26003- if (regs && !user_mode_vm(regs))
26004+ if (regs && !user_mode(regs))
26005 return NOTIFY_DONE;
26006
26007 switch (val) {
26008@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
26009
26010 if (ncopied != rasize) {
26011 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
26012- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
26013+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
26014
26015 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
26016 }
26017diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
26018index b9242ba..50c5edd 100644
26019--- a/arch/x86/kernel/verify_cpu.S
26020+++ b/arch/x86/kernel/verify_cpu.S
26021@@ -20,6 +20,7 @@
26022 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
26023 * arch/x86/kernel/trampoline_64.S: secondary processor verification
26024 * arch/x86/kernel/head_32.S: processor startup
26025+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
26026 *
26027 * verify_cpu, returns the status of longmode and SSE in register %eax.
26028 * 0: Success 1: Failure
26029diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
26030index e8edcf5..27f9344 100644
26031--- a/arch/x86/kernel/vm86_32.c
26032+++ b/arch/x86/kernel/vm86_32.c
26033@@ -44,6 +44,7 @@
26034 #include <linux/ptrace.h>
26035 #include <linux/audit.h>
26036 #include <linux/stddef.h>
26037+#include <linux/grsecurity.h>
26038
26039 #include <asm/uaccess.h>
26040 #include <asm/io.h>
26041@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
26042 do_exit(SIGSEGV);
26043 }
26044
26045- tss = &per_cpu(init_tss, get_cpu());
26046+ tss = init_tss + get_cpu();
26047 current->thread.sp0 = current->thread.saved_sp0;
26048 current->thread.sysenter_cs = __KERNEL_CS;
26049 load_sp0(tss, &current->thread);
26050@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
26051
26052 if (tsk->thread.saved_sp0)
26053 return -EPERM;
26054+
26055+#ifdef CONFIG_GRKERNSEC_VM86
26056+ if (!capable(CAP_SYS_RAWIO)) {
26057+ gr_handle_vm86();
26058+ return -EPERM;
26059+ }
26060+#endif
26061+
26062 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
26063 offsetof(struct kernel_vm86_struct, vm86plus) -
26064 sizeof(info.regs));
26065@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
26066 int tmp;
26067 struct vm86plus_struct __user *v86;
26068
26069+#ifdef CONFIG_GRKERNSEC_VM86
26070+ if (!capable(CAP_SYS_RAWIO)) {
26071+ gr_handle_vm86();
26072+ return -EPERM;
26073+ }
26074+#endif
26075+
26076 tsk = current;
26077 switch (cmd) {
26078 case VM86_REQUEST_IRQ:
26079@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
26080 tsk->thread.saved_fs = info->regs32->fs;
26081 tsk->thread.saved_gs = get_user_gs(info->regs32);
26082
26083- tss = &per_cpu(init_tss, get_cpu());
26084+ tss = init_tss + get_cpu();
26085 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
26086 if (cpu_has_sep)
26087 tsk->thread.sysenter_cs = 0;
26088@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
26089 goto cannot_handle;
26090 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
26091 goto cannot_handle;
26092- intr_ptr = (unsigned long __user *) (i << 2);
26093+ intr_ptr = (__force unsigned long __user *) (i << 2);
26094 if (get_user(segoffs, intr_ptr))
26095 goto cannot_handle;
26096 if ((segoffs >> 16) == BIOSSEG)
26097diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
26098index 10c4f30..57377c2 100644
26099--- a/arch/x86/kernel/vmlinux.lds.S
26100+++ b/arch/x86/kernel/vmlinux.lds.S
26101@@ -26,6 +26,13 @@
26102 #include <asm/page_types.h>
26103 #include <asm/cache.h>
26104 #include <asm/boot.h>
26105+#include <asm/segment.h>
26106+
26107+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26108+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
26109+#else
26110+#define __KERNEL_TEXT_OFFSET 0
26111+#endif
26112
26113 #undef i386 /* in case the preprocessor is a 32bit one */
26114
26115@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
26116
26117 PHDRS {
26118 text PT_LOAD FLAGS(5); /* R_E */
26119+#ifdef CONFIG_X86_32
26120+ module PT_LOAD FLAGS(5); /* R_E */
26121+#endif
26122+#ifdef CONFIG_XEN
26123+ rodata PT_LOAD FLAGS(5); /* R_E */
26124+#else
26125+ rodata PT_LOAD FLAGS(4); /* R__ */
26126+#endif
26127 data PT_LOAD FLAGS(6); /* RW_ */
26128-#ifdef CONFIG_X86_64
26129+ init.begin PT_LOAD FLAGS(6); /* RW_ */
26130 #ifdef CONFIG_SMP
26131 percpu PT_LOAD FLAGS(6); /* RW_ */
26132 #endif
26133+ text.init PT_LOAD FLAGS(5); /* R_E */
26134+ text.exit PT_LOAD FLAGS(5); /* R_E */
26135 init PT_LOAD FLAGS(7); /* RWE */
26136-#endif
26137 note PT_NOTE FLAGS(0); /* ___ */
26138 }
26139
26140 SECTIONS
26141 {
26142 #ifdef CONFIG_X86_32
26143- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
26144- phys_startup_32 = startup_32 - LOAD_OFFSET;
26145+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
26146 #else
26147- . = __START_KERNEL;
26148- phys_startup_64 = startup_64 - LOAD_OFFSET;
26149+ . = __START_KERNEL;
26150 #endif
26151
26152 /* Text and read-only data */
26153- .text : AT(ADDR(.text) - LOAD_OFFSET) {
26154- _text = .;
26155+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
26156 /* bootstrapping code */
26157+#ifdef CONFIG_X86_32
26158+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26159+#else
26160+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26161+#endif
26162+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26163+ _text = .;
26164 HEAD_TEXT
26165 . = ALIGN(8);
26166 _stext = .;
26167@@ -104,13 +124,48 @@ SECTIONS
26168 IRQENTRY_TEXT
26169 *(.fixup)
26170 *(.gnu.warning)
26171- /* End of text section */
26172- _etext = .;
26173 } :text = 0x9090
26174
26175- NOTES :text :note
26176+ . += __KERNEL_TEXT_OFFSET;
26177
26178- EXCEPTION_TABLE(16) :text = 0x9090
26179+#ifdef CONFIG_X86_32
26180+ . = ALIGN(PAGE_SIZE);
26181+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
26182+
26183+#ifdef CONFIG_PAX_KERNEXEC
26184+ MODULES_EXEC_VADDR = .;
26185+ BYTE(0)
26186+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
26187+ . = ALIGN(HPAGE_SIZE) - 1;
26188+ MODULES_EXEC_END = .;
26189+#endif
26190+
26191+ } :module
26192+#endif
26193+
26194+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
26195+ /* End of text section */
26196+ BYTE(0)
26197+ _etext = . - __KERNEL_TEXT_OFFSET;
26198+ }
26199+
26200+#ifdef CONFIG_X86_32
26201+ . = ALIGN(PAGE_SIZE);
26202+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
26203+ *(.idt)
26204+ . = ALIGN(PAGE_SIZE);
26205+ *(.empty_zero_page)
26206+ *(.initial_pg_fixmap)
26207+ *(.initial_pg_pmd)
26208+ *(.initial_page_table)
26209+ *(.swapper_pg_dir)
26210+ } :rodata
26211+#endif
26212+
26213+ . = ALIGN(PAGE_SIZE);
26214+ NOTES :rodata :note
26215+
26216+ EXCEPTION_TABLE(16) :rodata
26217
26218 #if defined(CONFIG_DEBUG_RODATA)
26219 /* .text should occupy whole number of pages */
26220@@ -122,16 +177,20 @@ SECTIONS
26221
26222 /* Data */
26223 .data : AT(ADDR(.data) - LOAD_OFFSET) {
26224+
26225+#ifdef CONFIG_PAX_KERNEXEC
26226+ . = ALIGN(HPAGE_SIZE);
26227+#else
26228+ . = ALIGN(PAGE_SIZE);
26229+#endif
26230+
26231 /* Start of data section */
26232 _sdata = .;
26233
26234 /* init_task */
26235 INIT_TASK_DATA(THREAD_SIZE)
26236
26237-#ifdef CONFIG_X86_32
26238- /* 32 bit has nosave before _edata */
26239 NOSAVE_DATA
26240-#endif
26241
26242 PAGE_ALIGNED_DATA(PAGE_SIZE)
26243
26244@@ -172,12 +231,19 @@ SECTIONS
26245 #endif /* CONFIG_X86_64 */
26246
26247 /* Init code and data - will be freed after init */
26248- . = ALIGN(PAGE_SIZE);
26249 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
26250+ BYTE(0)
26251+
26252+#ifdef CONFIG_PAX_KERNEXEC
26253+ . = ALIGN(HPAGE_SIZE);
26254+#else
26255+ . = ALIGN(PAGE_SIZE);
26256+#endif
26257+
26258 __init_begin = .; /* paired with __init_end */
26259- }
26260+ } :init.begin
26261
26262-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
26263+#ifdef CONFIG_SMP
26264 /*
26265 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
26266 * output PHDR, so the next output section - .init.text - should
26267@@ -186,12 +252,27 @@ SECTIONS
26268 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
26269 #endif
26270
26271- INIT_TEXT_SECTION(PAGE_SIZE)
26272-#ifdef CONFIG_X86_64
26273- :init
26274-#endif
26275+ . = ALIGN(PAGE_SIZE);
26276+ init_begin = .;
26277+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
26278+ VMLINUX_SYMBOL(_sinittext) = .;
26279+ INIT_TEXT
26280+ VMLINUX_SYMBOL(_einittext) = .;
26281+ . = ALIGN(PAGE_SIZE);
26282+ } :text.init
26283
26284- INIT_DATA_SECTION(16)
26285+ /*
26286+ * .exit.text is discard at runtime, not link time, to deal with
26287+ * references from .altinstructions and .eh_frame
26288+ */
26289+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
26290+ EXIT_TEXT
26291+ . = ALIGN(16);
26292+ } :text.exit
26293+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
26294+
26295+ . = ALIGN(PAGE_SIZE);
26296+ INIT_DATA_SECTION(16) :init
26297
26298 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
26299 __x86_cpu_dev_start = .;
26300@@ -253,19 +334,12 @@ SECTIONS
26301 }
26302
26303 . = ALIGN(8);
26304- /*
26305- * .exit.text is discard at runtime, not link time, to deal with
26306- * references from .altinstructions and .eh_frame
26307- */
26308- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
26309- EXIT_TEXT
26310- }
26311
26312 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
26313 EXIT_DATA
26314 }
26315
26316-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
26317+#ifndef CONFIG_SMP
26318 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
26319 #endif
26320
26321@@ -284,16 +358,10 @@ SECTIONS
26322 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
26323 __smp_locks = .;
26324 *(.smp_locks)
26325- . = ALIGN(PAGE_SIZE);
26326 __smp_locks_end = .;
26327+ . = ALIGN(PAGE_SIZE);
26328 }
26329
26330-#ifdef CONFIG_X86_64
26331- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
26332- NOSAVE_DATA
26333- }
26334-#endif
26335-
26336 /* BSS */
26337 . = ALIGN(PAGE_SIZE);
26338 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
26339@@ -309,6 +377,7 @@ SECTIONS
26340 __brk_base = .;
26341 . += 64 * 1024; /* 64k alignment slop space */
26342 *(.brk_reservation) /* areas brk users have reserved */
26343+ . = ALIGN(HPAGE_SIZE);
26344 __brk_limit = .;
26345 }
26346
26347@@ -335,13 +404,12 @@ SECTIONS
26348 * for the boot processor.
26349 */
26350 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
26351-INIT_PER_CPU(gdt_page);
26352 INIT_PER_CPU(irq_stack_union);
26353
26354 /*
26355 * Build-time check on the image size:
26356 */
26357-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
26358+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
26359 "kernel image bigger than KERNEL_IMAGE_SIZE");
26360
26361 #ifdef CONFIG_SMP
26362diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
26363index 9a907a6..f83f921 100644
26364--- a/arch/x86/kernel/vsyscall_64.c
26365+++ b/arch/x86/kernel/vsyscall_64.c
26366@@ -56,15 +56,13 @@
26367 DEFINE_VVAR(int, vgetcpu_mode);
26368 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
26369
26370-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
26371+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
26372
26373 static int __init vsyscall_setup(char *str)
26374 {
26375 if (str) {
26376 if (!strcmp("emulate", str))
26377 vsyscall_mode = EMULATE;
26378- else if (!strcmp("native", str))
26379- vsyscall_mode = NATIVE;
26380 else if (!strcmp("none", str))
26381 vsyscall_mode = NONE;
26382 else
26383@@ -323,8 +321,7 @@ do_ret:
26384 return true;
26385
26386 sigsegv:
26387- force_sig(SIGSEGV, current);
26388- return true;
26389+ do_group_exit(SIGKILL);
26390 }
26391
26392 /*
26393@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
26394 extern char __vvar_page;
26395 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
26396
26397- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
26398- vsyscall_mode == NATIVE
26399- ? PAGE_KERNEL_VSYSCALL
26400- : PAGE_KERNEL_VVAR);
26401+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
26402 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
26403 (unsigned long)VSYSCALL_START);
26404
26405diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
26406index b014d94..e775258 100644
26407--- a/arch/x86/kernel/x8664_ksyms_64.c
26408+++ b/arch/x86/kernel/x8664_ksyms_64.c
26409@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
26410 EXPORT_SYMBOL(copy_user_generic_unrolled);
26411 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
26412 EXPORT_SYMBOL(__copy_user_nocache);
26413-EXPORT_SYMBOL(_copy_from_user);
26414-EXPORT_SYMBOL(_copy_to_user);
26415
26416 EXPORT_SYMBOL(copy_page);
26417 EXPORT_SYMBOL(clear_page);
26418@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
26419 #ifndef CONFIG_PARAVIRT
26420 EXPORT_SYMBOL(native_load_gs_index);
26421 #endif
26422+
26423+#ifdef CONFIG_PAX_PER_CPU_PGD
26424+EXPORT_SYMBOL(cpu_pgd);
26425+#endif
26426diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
26427index 45a14db..075bb9b 100644
26428--- a/arch/x86/kernel/x86_init.c
26429+++ b/arch/x86/kernel/x86_init.c
26430@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
26431 },
26432 };
26433
26434-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
26435+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
26436 .early_percpu_clock_init = x86_init_noop,
26437 .setup_percpu_clockev = setup_secondary_APIC_clock,
26438 };
26439@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
26440 static void default_nmi_init(void) { };
26441 static int default_i8042_detect(void) { return 1; };
26442
26443-struct x86_platform_ops x86_platform = {
26444+struct x86_platform_ops x86_platform __read_only = {
26445 .calibrate_tsc = native_calibrate_tsc,
26446 .get_wallclock = mach_get_cmos_time,
26447 .set_wallclock = mach_set_rtc_mmss,
26448@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
26449 };
26450
26451 EXPORT_SYMBOL_GPL(x86_platform);
26452-struct x86_msi_ops x86_msi = {
26453+struct x86_msi_ops x86_msi __read_only = {
26454 .setup_msi_irqs = native_setup_msi_irqs,
26455 .compose_msi_msg = native_compose_msi_msg,
26456 .teardown_msi_irq = native_teardown_msi_irq,
26457@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
26458 .setup_hpet_msi = default_setup_hpet_msi,
26459 };
26460
26461-struct x86_io_apic_ops x86_io_apic_ops = {
26462+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
26463 .init = native_io_apic_init_mappings,
26464 .read = native_io_apic_read,
26465 .write = native_io_apic_write,
26466diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
26467index ada87a3..afea76d 100644
26468--- a/arch/x86/kernel/xsave.c
26469+++ b/arch/x86/kernel/xsave.c
26470@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
26471 {
26472 int err;
26473
26474+ buf = (struct xsave_struct __user *)____m(buf);
26475 if (use_xsave())
26476 err = xsave_user(buf);
26477 else if (use_fxsr())
26478@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
26479 */
26480 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
26481 {
26482+ buf = (void __user *)____m(buf);
26483 if (use_xsave()) {
26484 if ((unsigned long)buf % 64 || fx_only) {
26485 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
26486diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
26487index a20ecb5..d0e2194 100644
26488--- a/arch/x86/kvm/cpuid.c
26489+++ b/arch/x86/kvm/cpuid.c
26490@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
26491 struct kvm_cpuid2 *cpuid,
26492 struct kvm_cpuid_entry2 __user *entries)
26493 {
26494- int r;
26495+ int r, i;
26496
26497 r = -E2BIG;
26498 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
26499 goto out;
26500 r = -EFAULT;
26501- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
26502- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
26503+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
26504 goto out;
26505+ for (i = 0; i < cpuid->nent; ++i) {
26506+ struct kvm_cpuid_entry2 cpuid_entry;
26507+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
26508+ goto out;
26509+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
26510+ }
26511 vcpu->arch.cpuid_nent = cpuid->nent;
26512 kvm_apic_set_version(vcpu);
26513 kvm_x86_ops->cpuid_update(vcpu);
26514@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
26515 struct kvm_cpuid2 *cpuid,
26516 struct kvm_cpuid_entry2 __user *entries)
26517 {
26518- int r;
26519+ int r, i;
26520
26521 r = -E2BIG;
26522 if (cpuid->nent < vcpu->arch.cpuid_nent)
26523 goto out;
26524 r = -EFAULT;
26525- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
26526- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
26527+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
26528 goto out;
26529+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
26530+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
26531+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
26532+ goto out;
26533+ }
26534 return 0;
26535
26536 out:
26537diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
26538index 5953dce..f11a7d2 100644
26539--- a/arch/x86/kvm/emulate.c
26540+++ b/arch/x86/kvm/emulate.c
26541@@ -329,6 +329,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26542
26543 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
26544 do { \
26545+ unsigned long _tmp; \
26546 __asm__ __volatile__ ( \
26547 _PRE_EFLAGS("0", "4", "2") \
26548 _op _suffix " %"_x"3,%1; " \
26549@@ -343,8 +344,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26550 /* Raw emulation: instruction has two explicit operands. */
26551 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
26552 do { \
26553- unsigned long _tmp; \
26554- \
26555 switch ((ctxt)->dst.bytes) { \
26556 case 2: \
26557 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
26558@@ -360,7 +359,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
26559
26560 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
26561 do { \
26562- unsigned long _tmp; \
26563 switch ((ctxt)->dst.bytes) { \
26564 case 1: \
26565 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
26566diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
26567index 0eee2c8..94a32c3 100644
26568--- a/arch/x86/kvm/lapic.c
26569+++ b/arch/x86/kvm/lapic.c
26570@@ -55,7 +55,7 @@
26571 #define APIC_BUS_CYCLE_NS 1
26572
26573 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
26574-#define apic_debug(fmt, arg...)
26575+#define apic_debug(fmt, arg...) do {} while (0)
26576
26577 #define APIC_LVT_NUM 6
26578 /* 14 is the version for Xeon and Pentium 8.4.8*/
26579diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
26580index da20860..d19fdf5 100644
26581--- a/arch/x86/kvm/paging_tmpl.h
26582+++ b/arch/x86/kvm/paging_tmpl.h
26583@@ -208,7 +208,7 @@ retry_walk:
26584 if (unlikely(kvm_is_error_hva(host_addr)))
26585 goto error;
26586
26587- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
26588+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
26589 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
26590 goto error;
26591 walker->ptep_user[walker->level - 1] = ptep_user;
26592diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
26593index a14a6ea..dc86cf0 100644
26594--- a/arch/x86/kvm/svm.c
26595+++ b/arch/x86/kvm/svm.c
26596@@ -3493,7 +3493,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
26597 int cpu = raw_smp_processor_id();
26598
26599 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
26600+
26601+ pax_open_kernel();
26602 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
26603+ pax_close_kernel();
26604+
26605 load_TR_desc();
26606 }
26607
26608@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
26609 #endif
26610 #endif
26611
26612+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26613+ __set_fs(current_thread_info()->addr_limit);
26614+#endif
26615+
26616 reload_tss(vcpu);
26617
26618 local_irq_disable();
26619diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
26620index 5402c94..c3bdeee 100644
26621--- a/arch/x86/kvm/vmx.c
26622+++ b/arch/x86/kvm/vmx.c
26623@@ -1311,12 +1311,12 @@ static void vmcs_write64(unsigned long field, u64 value)
26624 #endif
26625 }
26626
26627-static void vmcs_clear_bits(unsigned long field, u32 mask)
26628+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
26629 {
26630 vmcs_writel(field, vmcs_readl(field) & ~mask);
26631 }
26632
26633-static void vmcs_set_bits(unsigned long field, u32 mask)
26634+static void vmcs_set_bits(unsigned long field, unsigned long mask)
26635 {
26636 vmcs_writel(field, vmcs_readl(field) | mask);
26637 }
26638@@ -1517,7 +1517,11 @@ static void reload_tss(void)
26639 struct desc_struct *descs;
26640
26641 descs = (void *)gdt->address;
26642+
26643+ pax_open_kernel();
26644 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
26645+ pax_close_kernel();
26646+
26647 load_TR_desc();
26648 }
26649
26650@@ -1741,6 +1745,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
26651 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
26652 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
26653
26654+#ifdef CONFIG_PAX_PER_CPU_PGD
26655+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
26656+#endif
26657+
26658 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
26659 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
26660 vmx->loaded_vmcs->cpu = cpu;
26661@@ -2935,8 +2943,11 @@ static __init int hardware_setup(void)
26662 if (!cpu_has_vmx_flexpriority())
26663 flexpriority_enabled = 0;
26664
26665- if (!cpu_has_vmx_tpr_shadow())
26666- kvm_x86_ops->update_cr8_intercept = NULL;
26667+ if (!cpu_has_vmx_tpr_shadow()) {
26668+ pax_open_kernel();
26669+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
26670+ pax_close_kernel();
26671+ }
26672
26673 if (enable_ept && !cpu_has_vmx_ept_2m_page())
26674 kvm_disable_largepages();
26675@@ -2947,13 +2958,15 @@ static __init int hardware_setup(void)
26676 if (!cpu_has_vmx_apicv())
26677 enable_apicv = 0;
26678
26679+ pax_open_kernel();
26680 if (enable_apicv)
26681- kvm_x86_ops->update_cr8_intercept = NULL;
26682+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
26683 else {
26684- kvm_x86_ops->hwapic_irr_update = NULL;
26685- kvm_x86_ops->deliver_posted_interrupt = NULL;
26686- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
26687+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
26688+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
26689+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
26690 }
26691+ pax_close_kernel();
26692
26693 if (nested)
26694 nested_vmx_setup_ctls_msrs();
26695@@ -4076,7 +4089,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
26696
26697 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
26698 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
26699+
26700+#ifndef CONFIG_PAX_PER_CPU_PGD
26701 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
26702+#endif
26703
26704 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
26705 #ifdef CONFIG_X86_64
26706@@ -4098,7 +4114,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
26707 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
26708 vmx->host_idt_base = dt.address;
26709
26710- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
26711+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
26712
26713 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
26714 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
26715@@ -7030,6 +7046,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26716 "jmp 2f \n\t"
26717 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
26718 "2: "
26719+
26720+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26721+ "ljmp %[cs],$3f\n\t"
26722+ "3: "
26723+#endif
26724+
26725 /* Save guest registers, load host registers, keep flags */
26726 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
26727 "pop %0 \n\t"
26728@@ -7082,6 +7104,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26729 #endif
26730 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
26731 [wordsize]"i"(sizeof(ulong))
26732+
26733+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26734+ ,[cs]"i"(__KERNEL_CS)
26735+#endif
26736+
26737 : "cc", "memory"
26738 #ifdef CONFIG_X86_64
26739 , "rax", "rbx", "rdi", "rsi"
26740@@ -7095,7 +7122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26741 if (debugctlmsr)
26742 update_debugctlmsr(debugctlmsr);
26743
26744-#ifndef CONFIG_X86_64
26745+#ifdef CONFIG_X86_32
26746 /*
26747 * The sysexit path does not restore ds/es, so we must set them to
26748 * a reasonable value ourselves.
26749@@ -7104,8 +7131,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
26750 * may be executed in interrupt context, which saves and restore segments
26751 * around it, nullifying its effect.
26752 */
26753- loadsegment(ds, __USER_DS);
26754- loadsegment(es, __USER_DS);
26755+ loadsegment(ds, __KERNEL_DS);
26756+ loadsegment(es, __KERNEL_DS);
26757+ loadsegment(ss, __KERNEL_DS);
26758+
26759+#ifdef CONFIG_PAX_KERNEXEC
26760+ loadsegment(fs, __KERNEL_PERCPU);
26761+#endif
26762+
26763+#ifdef CONFIG_PAX_MEMORY_UDEREF
26764+ __set_fs(current_thread_info()->addr_limit);
26765+#endif
26766+
26767 #endif
26768
26769 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
26770diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
26771index e8ba99c..ee9d7d9 100644
26772--- a/arch/x86/kvm/x86.c
26773+++ b/arch/x86/kvm/x86.c
26774@@ -1725,8 +1725,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
26775 {
26776 struct kvm *kvm = vcpu->kvm;
26777 int lm = is_long_mode(vcpu);
26778- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
26779- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
26780+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
26781+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
26782 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
26783 : kvm->arch.xen_hvm_config.blob_size_32;
26784 u32 page_num = data & ~PAGE_MASK;
26785@@ -2609,6 +2609,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
26786 if (n < msr_list.nmsrs)
26787 goto out;
26788 r = -EFAULT;
26789+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
26790+ goto out;
26791 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
26792 num_msrs_to_save * sizeof(u32)))
26793 goto out;
26794@@ -5297,7 +5299,7 @@ static struct notifier_block pvclock_gtod_notifier = {
26795 };
26796 #endif
26797
26798-int kvm_arch_init(void *opaque)
26799+int kvm_arch_init(const void *opaque)
26800 {
26801 int r;
26802 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
26803diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
26804index 7114c63..a1018fc 100644
26805--- a/arch/x86/lguest/boot.c
26806+++ b/arch/x86/lguest/boot.c
26807@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
26808 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
26809 * Launcher to reboot us.
26810 */
26811-static void lguest_restart(char *reason)
26812+static __noreturn void lguest_restart(char *reason)
26813 {
26814 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
26815+ BUG();
26816 }
26817
26818 /*G:050
26819diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
26820index 00933d5..3a64af9 100644
26821--- a/arch/x86/lib/atomic64_386_32.S
26822+++ b/arch/x86/lib/atomic64_386_32.S
26823@@ -48,6 +48,10 @@ BEGIN(read)
26824 movl (v), %eax
26825 movl 4(v), %edx
26826 RET_ENDP
26827+BEGIN(read_unchecked)
26828+ movl (v), %eax
26829+ movl 4(v), %edx
26830+RET_ENDP
26831 #undef v
26832
26833 #define v %esi
26834@@ -55,6 +59,10 @@ BEGIN(set)
26835 movl %ebx, (v)
26836 movl %ecx, 4(v)
26837 RET_ENDP
26838+BEGIN(set_unchecked)
26839+ movl %ebx, (v)
26840+ movl %ecx, 4(v)
26841+RET_ENDP
26842 #undef v
26843
26844 #define v %esi
26845@@ -70,6 +78,20 @@ RET_ENDP
26846 BEGIN(add)
26847 addl %eax, (v)
26848 adcl %edx, 4(v)
26849+
26850+#ifdef CONFIG_PAX_REFCOUNT
26851+ jno 0f
26852+ subl %eax, (v)
26853+ sbbl %edx, 4(v)
26854+ int $4
26855+0:
26856+ _ASM_EXTABLE(0b, 0b)
26857+#endif
26858+
26859+RET_ENDP
26860+BEGIN(add_unchecked)
26861+ addl %eax, (v)
26862+ adcl %edx, 4(v)
26863 RET_ENDP
26864 #undef v
26865
26866@@ -77,6 +99,24 @@ RET_ENDP
26867 BEGIN(add_return)
26868 addl (v), %eax
26869 adcl 4(v), %edx
26870+
26871+#ifdef CONFIG_PAX_REFCOUNT
26872+ into
26873+1234:
26874+ _ASM_EXTABLE(1234b, 2f)
26875+#endif
26876+
26877+ movl %eax, (v)
26878+ movl %edx, 4(v)
26879+
26880+#ifdef CONFIG_PAX_REFCOUNT
26881+2:
26882+#endif
26883+
26884+RET_ENDP
26885+BEGIN(add_return_unchecked)
26886+ addl (v), %eax
26887+ adcl 4(v), %edx
26888 movl %eax, (v)
26889 movl %edx, 4(v)
26890 RET_ENDP
26891@@ -86,6 +126,20 @@ RET_ENDP
26892 BEGIN(sub)
26893 subl %eax, (v)
26894 sbbl %edx, 4(v)
26895+
26896+#ifdef CONFIG_PAX_REFCOUNT
26897+ jno 0f
26898+ addl %eax, (v)
26899+ adcl %edx, 4(v)
26900+ int $4
26901+0:
26902+ _ASM_EXTABLE(0b, 0b)
26903+#endif
26904+
26905+RET_ENDP
26906+BEGIN(sub_unchecked)
26907+ subl %eax, (v)
26908+ sbbl %edx, 4(v)
26909 RET_ENDP
26910 #undef v
26911
26912@@ -96,6 +150,27 @@ BEGIN(sub_return)
26913 sbbl $0, %edx
26914 addl (v), %eax
26915 adcl 4(v), %edx
26916+
26917+#ifdef CONFIG_PAX_REFCOUNT
26918+ into
26919+1234:
26920+ _ASM_EXTABLE(1234b, 2f)
26921+#endif
26922+
26923+ movl %eax, (v)
26924+ movl %edx, 4(v)
26925+
26926+#ifdef CONFIG_PAX_REFCOUNT
26927+2:
26928+#endif
26929+
26930+RET_ENDP
26931+BEGIN(sub_return_unchecked)
26932+ negl %edx
26933+ negl %eax
26934+ sbbl $0, %edx
26935+ addl (v), %eax
26936+ adcl 4(v), %edx
26937 movl %eax, (v)
26938 movl %edx, 4(v)
26939 RET_ENDP
26940@@ -105,6 +180,20 @@ RET_ENDP
26941 BEGIN(inc)
26942 addl $1, (v)
26943 adcl $0, 4(v)
26944+
26945+#ifdef CONFIG_PAX_REFCOUNT
26946+ jno 0f
26947+ subl $1, (v)
26948+ sbbl $0, 4(v)
26949+ int $4
26950+0:
26951+ _ASM_EXTABLE(0b, 0b)
26952+#endif
26953+
26954+RET_ENDP
26955+BEGIN(inc_unchecked)
26956+ addl $1, (v)
26957+ adcl $0, 4(v)
26958 RET_ENDP
26959 #undef v
26960
26961@@ -114,6 +203,26 @@ BEGIN(inc_return)
26962 movl 4(v), %edx
26963 addl $1, %eax
26964 adcl $0, %edx
26965+
26966+#ifdef CONFIG_PAX_REFCOUNT
26967+ into
26968+1234:
26969+ _ASM_EXTABLE(1234b, 2f)
26970+#endif
26971+
26972+ movl %eax, (v)
26973+ movl %edx, 4(v)
26974+
26975+#ifdef CONFIG_PAX_REFCOUNT
26976+2:
26977+#endif
26978+
26979+RET_ENDP
26980+BEGIN(inc_return_unchecked)
26981+ movl (v), %eax
26982+ movl 4(v), %edx
26983+ addl $1, %eax
26984+ adcl $0, %edx
26985 movl %eax, (v)
26986 movl %edx, 4(v)
26987 RET_ENDP
26988@@ -123,6 +232,20 @@ RET_ENDP
26989 BEGIN(dec)
26990 subl $1, (v)
26991 sbbl $0, 4(v)
26992+
26993+#ifdef CONFIG_PAX_REFCOUNT
26994+ jno 0f
26995+ addl $1, (v)
26996+ adcl $0, 4(v)
26997+ int $4
26998+0:
26999+ _ASM_EXTABLE(0b, 0b)
27000+#endif
27001+
27002+RET_ENDP
27003+BEGIN(dec_unchecked)
27004+ subl $1, (v)
27005+ sbbl $0, 4(v)
27006 RET_ENDP
27007 #undef v
27008
27009@@ -132,6 +255,26 @@ BEGIN(dec_return)
27010 movl 4(v), %edx
27011 subl $1, %eax
27012 sbbl $0, %edx
27013+
27014+#ifdef CONFIG_PAX_REFCOUNT
27015+ into
27016+1234:
27017+ _ASM_EXTABLE(1234b, 2f)
27018+#endif
27019+
27020+ movl %eax, (v)
27021+ movl %edx, 4(v)
27022+
27023+#ifdef CONFIG_PAX_REFCOUNT
27024+2:
27025+#endif
27026+
27027+RET_ENDP
27028+BEGIN(dec_return_unchecked)
27029+ movl (v), %eax
27030+ movl 4(v), %edx
27031+ subl $1, %eax
27032+ sbbl $0, %edx
27033 movl %eax, (v)
27034 movl %edx, 4(v)
27035 RET_ENDP
27036@@ -143,6 +286,13 @@ BEGIN(add_unless)
27037 adcl %edx, %edi
27038 addl (v), %eax
27039 adcl 4(v), %edx
27040+
27041+#ifdef CONFIG_PAX_REFCOUNT
27042+ into
27043+1234:
27044+ _ASM_EXTABLE(1234b, 2f)
27045+#endif
27046+
27047 cmpl %eax, %ecx
27048 je 3f
27049 1:
27050@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
27051 1:
27052 addl $1, %eax
27053 adcl $0, %edx
27054+
27055+#ifdef CONFIG_PAX_REFCOUNT
27056+ into
27057+1234:
27058+ _ASM_EXTABLE(1234b, 2f)
27059+#endif
27060+
27061 movl %eax, (v)
27062 movl %edx, 4(v)
27063 movl $1, %eax
27064@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
27065 movl 4(v), %edx
27066 subl $1, %eax
27067 sbbl $0, %edx
27068+
27069+#ifdef CONFIG_PAX_REFCOUNT
27070+ into
27071+1234:
27072+ _ASM_EXTABLE(1234b, 1f)
27073+#endif
27074+
27075 js 1f
27076 movl %eax, (v)
27077 movl %edx, 4(v)
27078diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
27079index f5cc9eb..51fa319 100644
27080--- a/arch/x86/lib/atomic64_cx8_32.S
27081+++ b/arch/x86/lib/atomic64_cx8_32.S
27082@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
27083 CFI_STARTPROC
27084
27085 read64 %ecx
27086+ pax_force_retaddr
27087 ret
27088 CFI_ENDPROC
27089 ENDPROC(atomic64_read_cx8)
27090
27091+ENTRY(atomic64_read_unchecked_cx8)
27092+ CFI_STARTPROC
27093+
27094+ read64 %ecx
27095+ pax_force_retaddr
27096+ ret
27097+ CFI_ENDPROC
27098+ENDPROC(atomic64_read_unchecked_cx8)
27099+
27100 ENTRY(atomic64_set_cx8)
27101 CFI_STARTPROC
27102
27103@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
27104 cmpxchg8b (%esi)
27105 jne 1b
27106
27107+ pax_force_retaddr
27108 ret
27109 CFI_ENDPROC
27110 ENDPROC(atomic64_set_cx8)
27111
27112+ENTRY(atomic64_set_unchecked_cx8)
27113+ CFI_STARTPROC
27114+
27115+1:
27116+/* we don't need LOCK_PREFIX since aligned 64-bit writes
27117+ * are atomic on 586 and newer */
27118+ cmpxchg8b (%esi)
27119+ jne 1b
27120+
27121+ pax_force_retaddr
27122+ ret
27123+ CFI_ENDPROC
27124+ENDPROC(atomic64_set_unchecked_cx8)
27125+
27126 ENTRY(atomic64_xchg_cx8)
27127 CFI_STARTPROC
27128
27129@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
27130 cmpxchg8b (%esi)
27131 jne 1b
27132
27133+ pax_force_retaddr
27134 ret
27135 CFI_ENDPROC
27136 ENDPROC(atomic64_xchg_cx8)
27137
27138-.macro addsub_return func ins insc
27139-ENTRY(atomic64_\func\()_return_cx8)
27140+.macro addsub_return func ins insc unchecked=""
27141+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27142 CFI_STARTPROC
27143 SAVE ebp
27144 SAVE ebx
27145@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
27146 movl %edx, %ecx
27147 \ins\()l %esi, %ebx
27148 \insc\()l %edi, %ecx
27149+
27150+.ifb \unchecked
27151+#ifdef CONFIG_PAX_REFCOUNT
27152+ into
27153+2:
27154+ _ASM_EXTABLE(2b, 3f)
27155+#endif
27156+.endif
27157+
27158 LOCK_PREFIX
27159 cmpxchg8b (%ebp)
27160 jne 1b
27161-
27162-10:
27163 movl %ebx, %eax
27164 movl %ecx, %edx
27165+
27166+.ifb \unchecked
27167+#ifdef CONFIG_PAX_REFCOUNT
27168+3:
27169+#endif
27170+.endif
27171+
27172 RESTORE edi
27173 RESTORE esi
27174 RESTORE ebx
27175 RESTORE ebp
27176+ pax_force_retaddr
27177 ret
27178 CFI_ENDPROC
27179-ENDPROC(atomic64_\func\()_return_cx8)
27180+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
27181 .endm
27182
27183 addsub_return add add adc
27184 addsub_return sub sub sbb
27185+addsub_return add add adc _unchecked
27186+addsub_return sub sub sbb _unchecked
27187
27188-.macro incdec_return func ins insc
27189-ENTRY(atomic64_\func\()_return_cx8)
27190+.macro incdec_return func ins insc unchecked=""
27191+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27192 CFI_STARTPROC
27193 SAVE ebx
27194
27195@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
27196 movl %edx, %ecx
27197 \ins\()l $1, %ebx
27198 \insc\()l $0, %ecx
27199+
27200+.ifb \unchecked
27201+#ifdef CONFIG_PAX_REFCOUNT
27202+ into
27203+2:
27204+ _ASM_EXTABLE(2b, 3f)
27205+#endif
27206+.endif
27207+
27208 LOCK_PREFIX
27209 cmpxchg8b (%esi)
27210 jne 1b
27211
27212-10:
27213 movl %ebx, %eax
27214 movl %ecx, %edx
27215+
27216+.ifb \unchecked
27217+#ifdef CONFIG_PAX_REFCOUNT
27218+3:
27219+#endif
27220+.endif
27221+
27222 RESTORE ebx
27223+ pax_force_retaddr
27224 ret
27225 CFI_ENDPROC
27226-ENDPROC(atomic64_\func\()_return_cx8)
27227+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
27228 .endm
27229
27230 incdec_return inc add adc
27231 incdec_return dec sub sbb
27232+incdec_return inc add adc _unchecked
27233+incdec_return dec sub sbb _unchecked
27234
27235 ENTRY(atomic64_dec_if_positive_cx8)
27236 CFI_STARTPROC
27237@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
27238 movl %edx, %ecx
27239 subl $1, %ebx
27240 sbb $0, %ecx
27241+
27242+#ifdef CONFIG_PAX_REFCOUNT
27243+ into
27244+1234:
27245+ _ASM_EXTABLE(1234b, 2f)
27246+#endif
27247+
27248 js 2f
27249 LOCK_PREFIX
27250 cmpxchg8b (%esi)
27251@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
27252 movl %ebx, %eax
27253 movl %ecx, %edx
27254 RESTORE ebx
27255+ pax_force_retaddr
27256 ret
27257 CFI_ENDPROC
27258 ENDPROC(atomic64_dec_if_positive_cx8)
27259@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
27260 movl %edx, %ecx
27261 addl %ebp, %ebx
27262 adcl %edi, %ecx
27263+
27264+#ifdef CONFIG_PAX_REFCOUNT
27265+ into
27266+1234:
27267+ _ASM_EXTABLE(1234b, 3f)
27268+#endif
27269+
27270 LOCK_PREFIX
27271 cmpxchg8b (%esi)
27272 jne 1b
27273@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
27274 CFI_ADJUST_CFA_OFFSET -8
27275 RESTORE ebx
27276 RESTORE ebp
27277+ pax_force_retaddr
27278 ret
27279 4:
27280 cmpl %edx, 4(%esp)
27281@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
27282 xorl %ecx, %ecx
27283 addl $1, %ebx
27284 adcl %edx, %ecx
27285+
27286+#ifdef CONFIG_PAX_REFCOUNT
27287+ into
27288+1234:
27289+ _ASM_EXTABLE(1234b, 3f)
27290+#endif
27291+
27292 LOCK_PREFIX
27293 cmpxchg8b (%esi)
27294 jne 1b
27295@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
27296 movl $1, %eax
27297 3:
27298 RESTORE ebx
27299+ pax_force_retaddr
27300 ret
27301 CFI_ENDPROC
27302 ENDPROC(atomic64_inc_not_zero_cx8)
27303diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
27304index e78b8ee..7e173a8 100644
27305--- a/arch/x86/lib/checksum_32.S
27306+++ b/arch/x86/lib/checksum_32.S
27307@@ -29,7 +29,8 @@
27308 #include <asm/dwarf2.h>
27309 #include <asm/errno.h>
27310 #include <asm/asm.h>
27311-
27312+#include <asm/segment.h>
27313+
27314 /*
27315 * computes a partial checksum, e.g. for TCP/UDP fragments
27316 */
27317@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
27318
27319 #define ARGBASE 16
27320 #define FP 12
27321-
27322-ENTRY(csum_partial_copy_generic)
27323+
27324+ENTRY(csum_partial_copy_generic_to_user)
27325 CFI_STARTPROC
27326+
27327+#ifdef CONFIG_PAX_MEMORY_UDEREF
27328+ pushl_cfi %gs
27329+ popl_cfi %es
27330+ jmp csum_partial_copy_generic
27331+#endif
27332+
27333+ENTRY(csum_partial_copy_generic_from_user)
27334+
27335+#ifdef CONFIG_PAX_MEMORY_UDEREF
27336+ pushl_cfi %gs
27337+ popl_cfi %ds
27338+#endif
27339+
27340+ENTRY(csum_partial_copy_generic)
27341 subl $4,%esp
27342 CFI_ADJUST_CFA_OFFSET 4
27343 pushl_cfi %edi
27344@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
27345 jmp 4f
27346 SRC(1: movw (%esi), %bx )
27347 addl $2, %esi
27348-DST( movw %bx, (%edi) )
27349+DST( movw %bx, %es:(%edi) )
27350 addl $2, %edi
27351 addw %bx, %ax
27352 adcl $0, %eax
27353@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
27354 SRC(1: movl (%esi), %ebx )
27355 SRC( movl 4(%esi), %edx )
27356 adcl %ebx, %eax
27357-DST( movl %ebx, (%edi) )
27358+DST( movl %ebx, %es:(%edi) )
27359 adcl %edx, %eax
27360-DST( movl %edx, 4(%edi) )
27361+DST( movl %edx, %es:4(%edi) )
27362
27363 SRC( movl 8(%esi), %ebx )
27364 SRC( movl 12(%esi), %edx )
27365 adcl %ebx, %eax
27366-DST( movl %ebx, 8(%edi) )
27367+DST( movl %ebx, %es:8(%edi) )
27368 adcl %edx, %eax
27369-DST( movl %edx, 12(%edi) )
27370+DST( movl %edx, %es:12(%edi) )
27371
27372 SRC( movl 16(%esi), %ebx )
27373 SRC( movl 20(%esi), %edx )
27374 adcl %ebx, %eax
27375-DST( movl %ebx, 16(%edi) )
27376+DST( movl %ebx, %es:16(%edi) )
27377 adcl %edx, %eax
27378-DST( movl %edx, 20(%edi) )
27379+DST( movl %edx, %es:20(%edi) )
27380
27381 SRC( movl 24(%esi), %ebx )
27382 SRC( movl 28(%esi), %edx )
27383 adcl %ebx, %eax
27384-DST( movl %ebx, 24(%edi) )
27385+DST( movl %ebx, %es:24(%edi) )
27386 adcl %edx, %eax
27387-DST( movl %edx, 28(%edi) )
27388+DST( movl %edx, %es:28(%edi) )
27389
27390 lea 32(%esi), %esi
27391 lea 32(%edi), %edi
27392@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
27393 shrl $2, %edx # This clears CF
27394 SRC(3: movl (%esi), %ebx )
27395 adcl %ebx, %eax
27396-DST( movl %ebx, (%edi) )
27397+DST( movl %ebx, %es:(%edi) )
27398 lea 4(%esi), %esi
27399 lea 4(%edi), %edi
27400 dec %edx
27401@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
27402 jb 5f
27403 SRC( movw (%esi), %cx )
27404 leal 2(%esi), %esi
27405-DST( movw %cx, (%edi) )
27406+DST( movw %cx, %es:(%edi) )
27407 leal 2(%edi), %edi
27408 je 6f
27409 shll $16,%ecx
27410 SRC(5: movb (%esi), %cl )
27411-DST( movb %cl, (%edi) )
27412+DST( movb %cl, %es:(%edi) )
27413 6: addl %ecx, %eax
27414 adcl $0, %eax
27415 7:
27416@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
27417
27418 6001:
27419 movl ARGBASE+20(%esp), %ebx # src_err_ptr
27420- movl $-EFAULT, (%ebx)
27421+ movl $-EFAULT, %ss:(%ebx)
27422
27423 # zero the complete destination - computing the rest
27424 # is too much work
27425@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
27426
27427 6002:
27428 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
27429- movl $-EFAULT,(%ebx)
27430+ movl $-EFAULT,%ss:(%ebx)
27431 jmp 5000b
27432
27433 .previous
27434
27435+ pushl_cfi %ss
27436+ popl_cfi %ds
27437+ pushl_cfi %ss
27438+ popl_cfi %es
27439 popl_cfi %ebx
27440 CFI_RESTORE ebx
27441 popl_cfi %esi
27442@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
27443 popl_cfi %ecx # equivalent to addl $4,%esp
27444 ret
27445 CFI_ENDPROC
27446-ENDPROC(csum_partial_copy_generic)
27447+ENDPROC(csum_partial_copy_generic_to_user)
27448
27449 #else
27450
27451 /* Version for PentiumII/PPro */
27452
27453 #define ROUND1(x) \
27454+ nop; nop; nop; \
27455 SRC(movl x(%esi), %ebx ) ; \
27456 addl %ebx, %eax ; \
27457- DST(movl %ebx, x(%edi) ) ;
27458+ DST(movl %ebx, %es:x(%edi)) ;
27459
27460 #define ROUND(x) \
27461+ nop; nop; nop; \
27462 SRC(movl x(%esi), %ebx ) ; \
27463 adcl %ebx, %eax ; \
27464- DST(movl %ebx, x(%edi) ) ;
27465+ DST(movl %ebx, %es:x(%edi)) ;
27466
27467 #define ARGBASE 12
27468-
27469-ENTRY(csum_partial_copy_generic)
27470+
27471+ENTRY(csum_partial_copy_generic_to_user)
27472 CFI_STARTPROC
27473+
27474+#ifdef CONFIG_PAX_MEMORY_UDEREF
27475+ pushl_cfi %gs
27476+ popl_cfi %es
27477+ jmp csum_partial_copy_generic
27478+#endif
27479+
27480+ENTRY(csum_partial_copy_generic_from_user)
27481+
27482+#ifdef CONFIG_PAX_MEMORY_UDEREF
27483+ pushl_cfi %gs
27484+ popl_cfi %ds
27485+#endif
27486+
27487+ENTRY(csum_partial_copy_generic)
27488 pushl_cfi %ebx
27489 CFI_REL_OFFSET ebx, 0
27490 pushl_cfi %edi
27491@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
27492 subl %ebx, %edi
27493 lea -1(%esi),%edx
27494 andl $-32,%edx
27495- lea 3f(%ebx,%ebx), %ebx
27496+ lea 3f(%ebx,%ebx,2), %ebx
27497 testl %esi, %esi
27498 jmp *%ebx
27499 1: addl $64,%esi
27500@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
27501 jb 5f
27502 SRC( movw (%esi), %dx )
27503 leal 2(%esi), %esi
27504-DST( movw %dx, (%edi) )
27505+DST( movw %dx, %es:(%edi) )
27506 leal 2(%edi), %edi
27507 je 6f
27508 shll $16,%edx
27509 5:
27510 SRC( movb (%esi), %dl )
27511-DST( movb %dl, (%edi) )
27512+DST( movb %dl, %es:(%edi) )
27513 6: addl %edx, %eax
27514 adcl $0, %eax
27515 7:
27516 .section .fixup, "ax"
27517 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
27518- movl $-EFAULT, (%ebx)
27519+ movl $-EFAULT, %ss:(%ebx)
27520 # zero the complete destination (computing the rest is too much work)
27521 movl ARGBASE+8(%esp),%edi # dst
27522 movl ARGBASE+12(%esp),%ecx # len
27523@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
27524 rep; stosb
27525 jmp 7b
27526 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
27527- movl $-EFAULT, (%ebx)
27528+ movl $-EFAULT, %ss:(%ebx)
27529 jmp 7b
27530 .previous
27531
27532+#ifdef CONFIG_PAX_MEMORY_UDEREF
27533+ pushl_cfi %ss
27534+ popl_cfi %ds
27535+ pushl_cfi %ss
27536+ popl_cfi %es
27537+#endif
27538+
27539 popl_cfi %esi
27540 CFI_RESTORE esi
27541 popl_cfi %edi
27542@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
27543 CFI_RESTORE ebx
27544 ret
27545 CFI_ENDPROC
27546-ENDPROC(csum_partial_copy_generic)
27547+ENDPROC(csum_partial_copy_generic_to_user)
27548
27549 #undef ROUND
27550 #undef ROUND1
27551diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
27552index f2145cf..cea889d 100644
27553--- a/arch/x86/lib/clear_page_64.S
27554+++ b/arch/x86/lib/clear_page_64.S
27555@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
27556 movl $4096/8,%ecx
27557 xorl %eax,%eax
27558 rep stosq
27559+ pax_force_retaddr
27560 ret
27561 CFI_ENDPROC
27562 ENDPROC(clear_page_c)
27563@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
27564 movl $4096,%ecx
27565 xorl %eax,%eax
27566 rep stosb
27567+ pax_force_retaddr
27568 ret
27569 CFI_ENDPROC
27570 ENDPROC(clear_page_c_e)
27571@@ -43,6 +45,7 @@ ENTRY(clear_page)
27572 leaq 64(%rdi),%rdi
27573 jnz .Lloop
27574 nop
27575+ pax_force_retaddr
27576 ret
27577 CFI_ENDPROC
27578 .Lclear_page_end:
27579@@ -58,7 +61,7 @@ ENDPROC(clear_page)
27580
27581 #include <asm/cpufeature.h>
27582
27583- .section .altinstr_replacement,"ax"
27584+ .section .altinstr_replacement,"a"
27585 1: .byte 0xeb /* jmp <disp8> */
27586 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
27587 2: .byte 0xeb /* jmp <disp8> */
27588diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
27589index 1e572c5..2a162cd 100644
27590--- a/arch/x86/lib/cmpxchg16b_emu.S
27591+++ b/arch/x86/lib/cmpxchg16b_emu.S
27592@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
27593
27594 popf
27595 mov $1, %al
27596+ pax_force_retaddr
27597 ret
27598
27599 not_same:
27600 popf
27601 xor %al,%al
27602+ pax_force_retaddr
27603 ret
27604
27605 CFI_ENDPROC
27606diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
27607index 176cca6..1166c50 100644
27608--- a/arch/x86/lib/copy_page_64.S
27609+++ b/arch/x86/lib/copy_page_64.S
27610@@ -9,6 +9,7 @@ copy_page_rep:
27611 CFI_STARTPROC
27612 movl $4096/8, %ecx
27613 rep movsq
27614+ pax_force_retaddr
27615 ret
27616 CFI_ENDPROC
27617 ENDPROC(copy_page_rep)
27618@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
27619
27620 ENTRY(copy_page)
27621 CFI_STARTPROC
27622- subq $2*8, %rsp
27623- CFI_ADJUST_CFA_OFFSET 2*8
27624+ subq $3*8, %rsp
27625+ CFI_ADJUST_CFA_OFFSET 3*8
27626 movq %rbx, (%rsp)
27627 CFI_REL_OFFSET rbx, 0
27628 movq %r12, 1*8(%rsp)
27629 CFI_REL_OFFSET r12, 1*8
27630+ movq %r13, 2*8(%rsp)
27631+ CFI_REL_OFFSET r13, 2*8
27632
27633 movl $(4096/64)-5, %ecx
27634 .p2align 4
27635@@ -36,7 +39,7 @@ ENTRY(copy_page)
27636 movq 0x8*2(%rsi), %rdx
27637 movq 0x8*3(%rsi), %r8
27638 movq 0x8*4(%rsi), %r9
27639- movq 0x8*5(%rsi), %r10
27640+ movq 0x8*5(%rsi), %r13
27641 movq 0x8*6(%rsi), %r11
27642 movq 0x8*7(%rsi), %r12
27643
27644@@ -47,7 +50,7 @@ ENTRY(copy_page)
27645 movq %rdx, 0x8*2(%rdi)
27646 movq %r8, 0x8*3(%rdi)
27647 movq %r9, 0x8*4(%rdi)
27648- movq %r10, 0x8*5(%rdi)
27649+ movq %r13, 0x8*5(%rdi)
27650 movq %r11, 0x8*6(%rdi)
27651 movq %r12, 0x8*7(%rdi)
27652
27653@@ -66,7 +69,7 @@ ENTRY(copy_page)
27654 movq 0x8*2(%rsi), %rdx
27655 movq 0x8*3(%rsi), %r8
27656 movq 0x8*4(%rsi), %r9
27657- movq 0x8*5(%rsi), %r10
27658+ movq 0x8*5(%rsi), %r13
27659 movq 0x8*6(%rsi), %r11
27660 movq 0x8*7(%rsi), %r12
27661
27662@@ -75,7 +78,7 @@ ENTRY(copy_page)
27663 movq %rdx, 0x8*2(%rdi)
27664 movq %r8, 0x8*3(%rdi)
27665 movq %r9, 0x8*4(%rdi)
27666- movq %r10, 0x8*5(%rdi)
27667+ movq %r13, 0x8*5(%rdi)
27668 movq %r11, 0x8*6(%rdi)
27669 movq %r12, 0x8*7(%rdi)
27670
27671@@ -87,8 +90,11 @@ ENTRY(copy_page)
27672 CFI_RESTORE rbx
27673 movq 1*8(%rsp), %r12
27674 CFI_RESTORE r12
27675- addq $2*8, %rsp
27676- CFI_ADJUST_CFA_OFFSET -2*8
27677+ movq 2*8(%rsp), %r13
27678+ CFI_RESTORE r13
27679+ addq $3*8, %rsp
27680+ CFI_ADJUST_CFA_OFFSET -3*8
27681+ pax_force_retaddr
27682 ret
27683 .Lcopy_page_end:
27684 CFI_ENDPROC
27685@@ -99,7 +105,7 @@ ENDPROC(copy_page)
27686
27687 #include <asm/cpufeature.h>
27688
27689- .section .altinstr_replacement,"ax"
27690+ .section .altinstr_replacement,"a"
27691 1: .byte 0xeb /* jmp <disp8> */
27692 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
27693 2:
27694diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
27695index a30ca15..6b3f4e1 100644
27696--- a/arch/x86/lib/copy_user_64.S
27697+++ b/arch/x86/lib/copy_user_64.S
27698@@ -18,31 +18,7 @@
27699 #include <asm/alternative-asm.h>
27700 #include <asm/asm.h>
27701 #include <asm/smap.h>
27702-
27703-/*
27704- * By placing feature2 after feature1 in altinstructions section, we logically
27705- * implement:
27706- * If CPU has feature2, jmp to alt2 is used
27707- * else if CPU has feature1, jmp to alt1 is used
27708- * else jmp to orig is used.
27709- */
27710- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
27711-0:
27712- .byte 0xe9 /* 32bit jump */
27713- .long \orig-1f /* by default jump to orig */
27714-1:
27715- .section .altinstr_replacement,"ax"
27716-2: .byte 0xe9 /* near jump with 32bit immediate */
27717- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
27718-3: .byte 0xe9 /* near jump with 32bit immediate */
27719- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
27720- .previous
27721-
27722- .section .altinstructions,"a"
27723- altinstruction_entry 0b,2b,\feature1,5,5
27724- altinstruction_entry 0b,3b,\feature2,5,5
27725- .previous
27726- .endm
27727+#include <asm/pgtable.h>
27728
27729 .macro ALIGN_DESTINATION
27730 #ifdef FIX_ALIGNMENT
27731@@ -70,52 +46,6 @@
27732 #endif
27733 .endm
27734
27735-/* Standard copy_to_user with segment limit checking */
27736-ENTRY(_copy_to_user)
27737- CFI_STARTPROC
27738- GET_THREAD_INFO(%rax)
27739- movq %rdi,%rcx
27740- addq %rdx,%rcx
27741- jc bad_to_user
27742- cmpq TI_addr_limit(%rax),%rcx
27743- ja bad_to_user
27744- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
27745- copy_user_generic_unrolled,copy_user_generic_string, \
27746- copy_user_enhanced_fast_string
27747- CFI_ENDPROC
27748-ENDPROC(_copy_to_user)
27749-
27750-/* Standard copy_from_user with segment limit checking */
27751-ENTRY(_copy_from_user)
27752- CFI_STARTPROC
27753- GET_THREAD_INFO(%rax)
27754- movq %rsi,%rcx
27755- addq %rdx,%rcx
27756- jc bad_from_user
27757- cmpq TI_addr_limit(%rax),%rcx
27758- ja bad_from_user
27759- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
27760- copy_user_generic_unrolled,copy_user_generic_string, \
27761- copy_user_enhanced_fast_string
27762- CFI_ENDPROC
27763-ENDPROC(_copy_from_user)
27764-
27765- .section .fixup,"ax"
27766- /* must zero dest */
27767-ENTRY(bad_from_user)
27768-bad_from_user:
27769- CFI_STARTPROC
27770- movl %edx,%ecx
27771- xorl %eax,%eax
27772- rep
27773- stosb
27774-bad_to_user:
27775- movl %edx,%eax
27776- ret
27777- CFI_ENDPROC
27778-ENDPROC(bad_from_user)
27779- .previous
27780-
27781 /*
27782 * copy_user_generic_unrolled - memory copy with exception handling.
27783 * This version is for CPUs like P4 that don't have efficient micro
27784@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
27785 */
27786 ENTRY(copy_user_generic_unrolled)
27787 CFI_STARTPROC
27788+ ASM_PAX_OPEN_USERLAND
27789 ASM_STAC
27790 cmpl $8,%edx
27791 jb 20f /* less then 8 bytes, go to byte copy loop */
27792@@ -141,19 +72,19 @@ ENTRY(copy_user_generic_unrolled)
27793 jz 17f
27794 1: movq (%rsi),%r8
27795 2: movq 1*8(%rsi),%r9
27796-3: movq 2*8(%rsi),%r10
27797+3: movq 2*8(%rsi),%rax
27798 4: movq 3*8(%rsi),%r11
27799 5: movq %r8,(%rdi)
27800 6: movq %r9,1*8(%rdi)
27801-7: movq %r10,2*8(%rdi)
27802+7: movq %rax,2*8(%rdi)
27803 8: movq %r11,3*8(%rdi)
27804 9: movq 4*8(%rsi),%r8
27805 10: movq 5*8(%rsi),%r9
27806-11: movq 6*8(%rsi),%r10
27807+11: movq 6*8(%rsi),%rax
27808 12: movq 7*8(%rsi),%r11
27809 13: movq %r8,4*8(%rdi)
27810 14: movq %r9,5*8(%rdi)
27811-15: movq %r10,6*8(%rdi)
27812+15: movq %rax,6*8(%rdi)
27813 16: movq %r11,7*8(%rdi)
27814 leaq 64(%rsi),%rsi
27815 leaq 64(%rdi),%rdi
27816@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
27817 jnz 21b
27818 23: xor %eax,%eax
27819 ASM_CLAC
27820+ ASM_PAX_CLOSE_USERLAND
27821+ pax_force_retaddr
27822 ret
27823
27824 .section .fixup,"ax"
27825@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
27826 */
27827 ENTRY(copy_user_generic_string)
27828 CFI_STARTPROC
27829+ ASM_PAX_OPEN_USERLAND
27830 ASM_STAC
27831 andl %edx,%edx
27832 jz 4f
27833@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
27834 movsb
27835 4: xorl %eax,%eax
27836 ASM_CLAC
27837+ ASM_PAX_CLOSE_USERLAND
27838+ pax_force_retaddr
27839 ret
27840
27841 .section .fixup,"ax"
27842@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
27843 */
27844 ENTRY(copy_user_enhanced_fast_string)
27845 CFI_STARTPROC
27846+ ASM_PAX_OPEN_USERLAND
27847 ASM_STAC
27848 andl %edx,%edx
27849 jz 2f
27850@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
27851 movsb
27852 2: xorl %eax,%eax
27853 ASM_CLAC
27854+ ASM_PAX_CLOSE_USERLAND
27855+ pax_force_retaddr
27856 ret
27857
27858 .section .fixup,"ax"
27859diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
27860index 6a4f43c..55d26f2 100644
27861--- a/arch/x86/lib/copy_user_nocache_64.S
27862+++ b/arch/x86/lib/copy_user_nocache_64.S
27863@@ -8,6 +8,7 @@
27864
27865 #include <linux/linkage.h>
27866 #include <asm/dwarf2.h>
27867+#include <asm/alternative-asm.h>
27868
27869 #define FIX_ALIGNMENT 1
27870
27871@@ -16,6 +17,7 @@
27872 #include <asm/thread_info.h>
27873 #include <asm/asm.h>
27874 #include <asm/smap.h>
27875+#include <asm/pgtable.h>
27876
27877 .macro ALIGN_DESTINATION
27878 #ifdef FIX_ALIGNMENT
27879@@ -49,6 +51,16 @@
27880 */
27881 ENTRY(__copy_user_nocache)
27882 CFI_STARTPROC
27883+
27884+#ifdef CONFIG_PAX_MEMORY_UDEREF
27885+ mov pax_user_shadow_base,%rcx
27886+ cmp %rcx,%rsi
27887+ jae 1f
27888+ add %rcx,%rsi
27889+1:
27890+#endif
27891+
27892+ ASM_PAX_OPEN_USERLAND
27893 ASM_STAC
27894 cmpl $8,%edx
27895 jb 20f /* less then 8 bytes, go to byte copy loop */
27896@@ -59,19 +71,19 @@ ENTRY(__copy_user_nocache)
27897 jz 17f
27898 1: movq (%rsi),%r8
27899 2: movq 1*8(%rsi),%r9
27900-3: movq 2*8(%rsi),%r10
27901+3: movq 2*8(%rsi),%rax
27902 4: movq 3*8(%rsi),%r11
27903 5: movnti %r8,(%rdi)
27904 6: movnti %r9,1*8(%rdi)
27905-7: movnti %r10,2*8(%rdi)
27906+7: movnti %rax,2*8(%rdi)
27907 8: movnti %r11,3*8(%rdi)
27908 9: movq 4*8(%rsi),%r8
27909 10: movq 5*8(%rsi),%r9
27910-11: movq 6*8(%rsi),%r10
27911+11: movq 6*8(%rsi),%rax
27912 12: movq 7*8(%rsi),%r11
27913 13: movnti %r8,4*8(%rdi)
27914 14: movnti %r9,5*8(%rdi)
27915-15: movnti %r10,6*8(%rdi)
27916+15: movnti %rax,6*8(%rdi)
27917 16: movnti %r11,7*8(%rdi)
27918 leaq 64(%rsi),%rsi
27919 leaq 64(%rdi),%rdi
27920@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
27921 jnz 21b
27922 23: xorl %eax,%eax
27923 ASM_CLAC
27924+ ASM_PAX_CLOSE_USERLAND
27925 sfence
27926+ pax_force_retaddr
27927 ret
27928
27929 .section .fixup,"ax"
27930diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
27931index 2419d5f..953ee51 100644
27932--- a/arch/x86/lib/csum-copy_64.S
27933+++ b/arch/x86/lib/csum-copy_64.S
27934@@ -9,6 +9,7 @@
27935 #include <asm/dwarf2.h>
27936 #include <asm/errno.h>
27937 #include <asm/asm.h>
27938+#include <asm/alternative-asm.h>
27939
27940 /*
27941 * Checksum copy with exception handling.
27942@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
27943 CFI_RESTORE rbp
27944 addq $7*8, %rsp
27945 CFI_ADJUST_CFA_OFFSET -7*8
27946+ pax_force_retaddr 0, 1
27947 ret
27948 CFI_RESTORE_STATE
27949
27950diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
27951index 25b7ae8..c40113e 100644
27952--- a/arch/x86/lib/csum-wrappers_64.c
27953+++ b/arch/x86/lib/csum-wrappers_64.c
27954@@ -52,8 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
27955 len -= 2;
27956 }
27957 }
27958- isum = csum_partial_copy_generic((__force const void *)src,
27959+ pax_open_userland();
27960+ stac();
27961+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
27962 dst, len, isum, errp, NULL);
27963+ clac();
27964+ pax_close_userland();
27965 if (unlikely(*errp))
27966 goto out_err;
27967
27968@@ -105,8 +109,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
27969 }
27970
27971 *errp = 0;
27972- return csum_partial_copy_generic(src, (void __force *)dst,
27973+ pax_open_userland();
27974+ stac();
27975+ isum = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
27976 len, isum, NULL, errp);
27977+ clac();
27978+ pax_close_userland();
27979+ return isum;
27980 }
27981 EXPORT_SYMBOL(csum_partial_copy_to_user);
27982
27983diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
27984index a451235..1daa956 100644
27985--- a/arch/x86/lib/getuser.S
27986+++ b/arch/x86/lib/getuser.S
27987@@ -33,17 +33,40 @@
27988 #include <asm/thread_info.h>
27989 #include <asm/asm.h>
27990 #include <asm/smap.h>
27991+#include <asm/segment.h>
27992+#include <asm/pgtable.h>
27993+#include <asm/alternative-asm.h>
27994+
27995+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27996+#define __copyuser_seg gs;
27997+#else
27998+#define __copyuser_seg
27999+#endif
28000
28001 .text
28002 ENTRY(__get_user_1)
28003 CFI_STARTPROC
28004+
28005+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28006 GET_THREAD_INFO(%_ASM_DX)
28007 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28008 jae bad_get_user
28009 ASM_STAC
28010-1: movzbl (%_ASM_AX),%edx
28011+
28012+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28013+ mov pax_user_shadow_base,%_ASM_DX
28014+ cmp %_ASM_DX,%_ASM_AX
28015+ jae 1234f
28016+ add %_ASM_DX,%_ASM_AX
28017+1234:
28018+#endif
28019+
28020+#endif
28021+
28022+1: __copyuser_seg movzbl (%_ASM_AX),%edx
28023 xor %eax,%eax
28024 ASM_CLAC
28025+ pax_force_retaddr
28026 ret
28027 CFI_ENDPROC
28028 ENDPROC(__get_user_1)
28029@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
28030 ENTRY(__get_user_2)
28031 CFI_STARTPROC
28032 add $1,%_ASM_AX
28033+
28034+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28035 jc bad_get_user
28036 GET_THREAD_INFO(%_ASM_DX)
28037 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28038 jae bad_get_user
28039 ASM_STAC
28040-2: movzwl -1(%_ASM_AX),%edx
28041+
28042+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28043+ mov pax_user_shadow_base,%_ASM_DX
28044+ cmp %_ASM_DX,%_ASM_AX
28045+ jae 1234f
28046+ add %_ASM_DX,%_ASM_AX
28047+1234:
28048+#endif
28049+
28050+#endif
28051+
28052+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
28053 xor %eax,%eax
28054 ASM_CLAC
28055+ pax_force_retaddr
28056 ret
28057 CFI_ENDPROC
28058 ENDPROC(__get_user_2)
28059@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
28060 ENTRY(__get_user_4)
28061 CFI_STARTPROC
28062 add $3,%_ASM_AX
28063+
28064+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28065 jc bad_get_user
28066 GET_THREAD_INFO(%_ASM_DX)
28067 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28068 jae bad_get_user
28069 ASM_STAC
28070-3: movl -3(%_ASM_AX),%edx
28071+
28072+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28073+ mov pax_user_shadow_base,%_ASM_DX
28074+ cmp %_ASM_DX,%_ASM_AX
28075+ jae 1234f
28076+ add %_ASM_DX,%_ASM_AX
28077+1234:
28078+#endif
28079+
28080+#endif
28081+
28082+3: __copyuser_seg movl -3(%_ASM_AX),%edx
28083 xor %eax,%eax
28084 ASM_CLAC
28085+ pax_force_retaddr
28086 ret
28087 CFI_ENDPROC
28088 ENDPROC(__get_user_4)
28089@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
28090 GET_THREAD_INFO(%_ASM_DX)
28091 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28092 jae bad_get_user
28093+
28094+#ifdef CONFIG_PAX_MEMORY_UDEREF
28095+ mov pax_user_shadow_base,%_ASM_DX
28096+ cmp %_ASM_DX,%_ASM_AX
28097+ jae 1234f
28098+ add %_ASM_DX,%_ASM_AX
28099+1234:
28100+#endif
28101+
28102 ASM_STAC
28103 4: movq -7(%_ASM_AX),%rdx
28104 xor %eax,%eax
28105 ASM_CLAC
28106+ pax_force_retaddr
28107 ret
28108 #else
28109 add $7,%_ASM_AX
28110@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
28111 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28112 jae bad_get_user_8
28113 ASM_STAC
28114-4: movl -7(%_ASM_AX),%edx
28115-5: movl -3(%_ASM_AX),%ecx
28116+4: __copyuser_seg movl -7(%_ASM_AX),%edx
28117+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
28118 xor %eax,%eax
28119 ASM_CLAC
28120+ pax_force_retaddr
28121 ret
28122 #endif
28123 CFI_ENDPROC
28124@@ -113,6 +175,7 @@ bad_get_user:
28125 xor %edx,%edx
28126 mov $(-EFAULT),%_ASM_AX
28127 ASM_CLAC
28128+ pax_force_retaddr
28129 ret
28130 CFI_ENDPROC
28131 END(bad_get_user)
28132@@ -124,6 +187,7 @@ bad_get_user_8:
28133 xor %ecx,%ecx
28134 mov $(-EFAULT),%_ASM_AX
28135 ASM_CLAC
28136+ pax_force_retaddr
28137 ret
28138 CFI_ENDPROC
28139 END(bad_get_user_8)
28140diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
28141index 54fcffe..7be149e 100644
28142--- a/arch/x86/lib/insn.c
28143+++ b/arch/x86/lib/insn.c
28144@@ -20,8 +20,10 @@
28145
28146 #ifdef __KERNEL__
28147 #include <linux/string.h>
28148+#include <asm/pgtable_types.h>
28149 #else
28150 #include <string.h>
28151+#define ktla_ktva(addr) addr
28152 #endif
28153 #include <asm/inat.h>
28154 #include <asm/insn.h>
28155@@ -53,8 +55,8 @@
28156 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
28157 {
28158 memset(insn, 0, sizeof(*insn));
28159- insn->kaddr = kaddr;
28160- insn->next_byte = kaddr;
28161+ insn->kaddr = ktla_ktva(kaddr);
28162+ insn->next_byte = ktla_ktva(kaddr);
28163 insn->x86_64 = x86_64 ? 1 : 0;
28164 insn->opnd_bytes = 4;
28165 if (x86_64)
28166diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
28167index 05a95e7..326f2fa 100644
28168--- a/arch/x86/lib/iomap_copy_64.S
28169+++ b/arch/x86/lib/iomap_copy_64.S
28170@@ -17,6 +17,7 @@
28171
28172 #include <linux/linkage.h>
28173 #include <asm/dwarf2.h>
28174+#include <asm/alternative-asm.h>
28175
28176 /*
28177 * override generic version in lib/iomap_copy.c
28178@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
28179 CFI_STARTPROC
28180 movl %edx,%ecx
28181 rep movsd
28182+ pax_force_retaddr
28183 ret
28184 CFI_ENDPROC
28185 ENDPROC(__iowrite32_copy)
28186diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
28187index 56313a3..9b59269 100644
28188--- a/arch/x86/lib/memcpy_64.S
28189+++ b/arch/x86/lib/memcpy_64.S
28190@@ -24,7 +24,7 @@
28191 * This gets patched over the unrolled variant (below) via the
28192 * alternative instructions framework:
28193 */
28194- .section .altinstr_replacement, "ax", @progbits
28195+ .section .altinstr_replacement, "a", @progbits
28196 .Lmemcpy_c:
28197 movq %rdi, %rax
28198 movq %rdx, %rcx
28199@@ -33,6 +33,7 @@
28200 rep movsq
28201 movl %edx, %ecx
28202 rep movsb
28203+ pax_force_retaddr
28204 ret
28205 .Lmemcpy_e:
28206 .previous
28207@@ -44,11 +45,12 @@
28208 * This gets patched over the unrolled variant (below) via the
28209 * alternative instructions framework:
28210 */
28211- .section .altinstr_replacement, "ax", @progbits
28212+ .section .altinstr_replacement, "a", @progbits
28213 .Lmemcpy_c_e:
28214 movq %rdi, %rax
28215 movq %rdx, %rcx
28216 rep movsb
28217+ pax_force_retaddr
28218 ret
28219 .Lmemcpy_e_e:
28220 .previous
28221@@ -76,13 +78,13 @@ ENTRY(memcpy)
28222 */
28223 movq 0*8(%rsi), %r8
28224 movq 1*8(%rsi), %r9
28225- movq 2*8(%rsi), %r10
28226+ movq 2*8(%rsi), %rcx
28227 movq 3*8(%rsi), %r11
28228 leaq 4*8(%rsi), %rsi
28229
28230 movq %r8, 0*8(%rdi)
28231 movq %r9, 1*8(%rdi)
28232- movq %r10, 2*8(%rdi)
28233+ movq %rcx, 2*8(%rdi)
28234 movq %r11, 3*8(%rdi)
28235 leaq 4*8(%rdi), %rdi
28236 jae .Lcopy_forward_loop
28237@@ -105,12 +107,12 @@ ENTRY(memcpy)
28238 subq $0x20, %rdx
28239 movq -1*8(%rsi), %r8
28240 movq -2*8(%rsi), %r9
28241- movq -3*8(%rsi), %r10
28242+ movq -3*8(%rsi), %rcx
28243 movq -4*8(%rsi), %r11
28244 leaq -4*8(%rsi), %rsi
28245 movq %r8, -1*8(%rdi)
28246 movq %r9, -2*8(%rdi)
28247- movq %r10, -3*8(%rdi)
28248+ movq %rcx, -3*8(%rdi)
28249 movq %r11, -4*8(%rdi)
28250 leaq -4*8(%rdi), %rdi
28251 jae .Lcopy_backward_loop
28252@@ -130,12 +132,13 @@ ENTRY(memcpy)
28253 */
28254 movq 0*8(%rsi), %r8
28255 movq 1*8(%rsi), %r9
28256- movq -2*8(%rsi, %rdx), %r10
28257+ movq -2*8(%rsi, %rdx), %rcx
28258 movq -1*8(%rsi, %rdx), %r11
28259 movq %r8, 0*8(%rdi)
28260 movq %r9, 1*8(%rdi)
28261- movq %r10, -2*8(%rdi, %rdx)
28262+ movq %rcx, -2*8(%rdi, %rdx)
28263 movq %r11, -1*8(%rdi, %rdx)
28264+ pax_force_retaddr
28265 retq
28266 .p2align 4
28267 .Lless_16bytes:
28268@@ -148,6 +151,7 @@ ENTRY(memcpy)
28269 movq -1*8(%rsi, %rdx), %r9
28270 movq %r8, 0*8(%rdi)
28271 movq %r9, -1*8(%rdi, %rdx)
28272+ pax_force_retaddr
28273 retq
28274 .p2align 4
28275 .Lless_8bytes:
28276@@ -161,6 +165,7 @@ ENTRY(memcpy)
28277 movl -4(%rsi, %rdx), %r8d
28278 movl %ecx, (%rdi)
28279 movl %r8d, -4(%rdi, %rdx)
28280+ pax_force_retaddr
28281 retq
28282 .p2align 4
28283 .Lless_3bytes:
28284@@ -179,6 +184,7 @@ ENTRY(memcpy)
28285 movb %cl, (%rdi)
28286
28287 .Lend:
28288+ pax_force_retaddr
28289 retq
28290 CFI_ENDPROC
28291 ENDPROC(memcpy)
28292diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
28293index 65268a6..5aa7815 100644
28294--- a/arch/x86/lib/memmove_64.S
28295+++ b/arch/x86/lib/memmove_64.S
28296@@ -61,13 +61,13 @@ ENTRY(memmove)
28297 5:
28298 sub $0x20, %rdx
28299 movq 0*8(%rsi), %r11
28300- movq 1*8(%rsi), %r10
28301+ movq 1*8(%rsi), %rcx
28302 movq 2*8(%rsi), %r9
28303 movq 3*8(%rsi), %r8
28304 leaq 4*8(%rsi), %rsi
28305
28306 movq %r11, 0*8(%rdi)
28307- movq %r10, 1*8(%rdi)
28308+ movq %rcx, 1*8(%rdi)
28309 movq %r9, 2*8(%rdi)
28310 movq %r8, 3*8(%rdi)
28311 leaq 4*8(%rdi), %rdi
28312@@ -81,10 +81,10 @@ ENTRY(memmove)
28313 4:
28314 movq %rdx, %rcx
28315 movq -8(%rsi, %rdx), %r11
28316- lea -8(%rdi, %rdx), %r10
28317+ lea -8(%rdi, %rdx), %r9
28318 shrq $3, %rcx
28319 rep movsq
28320- movq %r11, (%r10)
28321+ movq %r11, (%r9)
28322 jmp 13f
28323 .Lmemmove_end_forward:
28324
28325@@ -95,14 +95,14 @@ ENTRY(memmove)
28326 7:
28327 movq %rdx, %rcx
28328 movq (%rsi), %r11
28329- movq %rdi, %r10
28330+ movq %rdi, %r9
28331 leaq -8(%rsi, %rdx), %rsi
28332 leaq -8(%rdi, %rdx), %rdi
28333 shrq $3, %rcx
28334 std
28335 rep movsq
28336 cld
28337- movq %r11, (%r10)
28338+ movq %r11, (%r9)
28339 jmp 13f
28340
28341 /*
28342@@ -127,13 +127,13 @@ ENTRY(memmove)
28343 8:
28344 subq $0x20, %rdx
28345 movq -1*8(%rsi), %r11
28346- movq -2*8(%rsi), %r10
28347+ movq -2*8(%rsi), %rcx
28348 movq -3*8(%rsi), %r9
28349 movq -4*8(%rsi), %r8
28350 leaq -4*8(%rsi), %rsi
28351
28352 movq %r11, -1*8(%rdi)
28353- movq %r10, -2*8(%rdi)
28354+ movq %rcx, -2*8(%rdi)
28355 movq %r9, -3*8(%rdi)
28356 movq %r8, -4*8(%rdi)
28357 leaq -4*8(%rdi), %rdi
28358@@ -151,11 +151,11 @@ ENTRY(memmove)
28359 * Move data from 16 bytes to 31 bytes.
28360 */
28361 movq 0*8(%rsi), %r11
28362- movq 1*8(%rsi), %r10
28363+ movq 1*8(%rsi), %rcx
28364 movq -2*8(%rsi, %rdx), %r9
28365 movq -1*8(%rsi, %rdx), %r8
28366 movq %r11, 0*8(%rdi)
28367- movq %r10, 1*8(%rdi)
28368+ movq %rcx, 1*8(%rdi)
28369 movq %r9, -2*8(%rdi, %rdx)
28370 movq %r8, -1*8(%rdi, %rdx)
28371 jmp 13f
28372@@ -167,9 +167,9 @@ ENTRY(memmove)
28373 * Move data from 8 bytes to 15 bytes.
28374 */
28375 movq 0*8(%rsi), %r11
28376- movq -1*8(%rsi, %rdx), %r10
28377+ movq -1*8(%rsi, %rdx), %r9
28378 movq %r11, 0*8(%rdi)
28379- movq %r10, -1*8(%rdi, %rdx)
28380+ movq %r9, -1*8(%rdi, %rdx)
28381 jmp 13f
28382 10:
28383 cmpq $4, %rdx
28384@@ -178,9 +178,9 @@ ENTRY(memmove)
28385 * Move data from 4 bytes to 7 bytes.
28386 */
28387 movl (%rsi), %r11d
28388- movl -4(%rsi, %rdx), %r10d
28389+ movl -4(%rsi, %rdx), %r9d
28390 movl %r11d, (%rdi)
28391- movl %r10d, -4(%rdi, %rdx)
28392+ movl %r9d, -4(%rdi, %rdx)
28393 jmp 13f
28394 11:
28395 cmp $2, %rdx
28396@@ -189,9 +189,9 @@ ENTRY(memmove)
28397 * Move data from 2 bytes to 3 bytes.
28398 */
28399 movw (%rsi), %r11w
28400- movw -2(%rsi, %rdx), %r10w
28401+ movw -2(%rsi, %rdx), %r9w
28402 movw %r11w, (%rdi)
28403- movw %r10w, -2(%rdi, %rdx)
28404+ movw %r9w, -2(%rdi, %rdx)
28405 jmp 13f
28406 12:
28407 cmp $1, %rdx
28408@@ -202,14 +202,16 @@ ENTRY(memmove)
28409 movb (%rsi), %r11b
28410 movb %r11b, (%rdi)
28411 13:
28412+ pax_force_retaddr
28413 retq
28414 CFI_ENDPROC
28415
28416- .section .altinstr_replacement,"ax"
28417+ .section .altinstr_replacement,"a"
28418 .Lmemmove_begin_forward_efs:
28419 /* Forward moving data. */
28420 movq %rdx, %rcx
28421 rep movsb
28422+ pax_force_retaddr
28423 retq
28424 .Lmemmove_end_forward_efs:
28425 .previous
28426diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
28427index 2dcb380..50a78bc 100644
28428--- a/arch/x86/lib/memset_64.S
28429+++ b/arch/x86/lib/memset_64.S
28430@@ -16,7 +16,7 @@
28431 *
28432 * rax original destination
28433 */
28434- .section .altinstr_replacement, "ax", @progbits
28435+ .section .altinstr_replacement, "a", @progbits
28436 .Lmemset_c:
28437 movq %rdi,%r9
28438 movq %rdx,%rcx
28439@@ -30,6 +30,7 @@
28440 movl %edx,%ecx
28441 rep stosb
28442 movq %r9,%rax
28443+ pax_force_retaddr
28444 ret
28445 .Lmemset_e:
28446 .previous
28447@@ -45,13 +46,14 @@
28448 *
28449 * rax original destination
28450 */
28451- .section .altinstr_replacement, "ax", @progbits
28452+ .section .altinstr_replacement, "a", @progbits
28453 .Lmemset_c_e:
28454 movq %rdi,%r9
28455 movb %sil,%al
28456 movq %rdx,%rcx
28457 rep stosb
28458 movq %r9,%rax
28459+ pax_force_retaddr
28460 ret
28461 .Lmemset_e_e:
28462 .previous
28463@@ -59,7 +61,7 @@
28464 ENTRY(memset)
28465 ENTRY(__memset)
28466 CFI_STARTPROC
28467- movq %rdi,%r10
28468+ movq %rdi,%r11
28469
28470 /* expand byte value */
28471 movzbl %sil,%ecx
28472@@ -117,7 +119,8 @@ ENTRY(__memset)
28473 jnz .Lloop_1
28474
28475 .Lende:
28476- movq %r10,%rax
28477+ movq %r11,%rax
28478+ pax_force_retaddr
28479 ret
28480
28481 CFI_RESTORE_STATE
28482diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
28483index c9f2d9b..e7fd2c0 100644
28484--- a/arch/x86/lib/mmx_32.c
28485+++ b/arch/x86/lib/mmx_32.c
28486@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
28487 {
28488 void *p;
28489 int i;
28490+ unsigned long cr0;
28491
28492 if (unlikely(in_interrupt()))
28493 return __memcpy(to, from, len);
28494@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
28495 kernel_fpu_begin();
28496
28497 __asm__ __volatile__ (
28498- "1: prefetch (%0)\n" /* This set is 28 bytes */
28499- " prefetch 64(%0)\n"
28500- " prefetch 128(%0)\n"
28501- " prefetch 192(%0)\n"
28502- " prefetch 256(%0)\n"
28503+ "1: prefetch (%1)\n" /* This set is 28 bytes */
28504+ " prefetch 64(%1)\n"
28505+ " prefetch 128(%1)\n"
28506+ " prefetch 192(%1)\n"
28507+ " prefetch 256(%1)\n"
28508 "2: \n"
28509 ".section .fixup, \"ax\"\n"
28510- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28511+ "3: \n"
28512+
28513+#ifdef CONFIG_PAX_KERNEXEC
28514+ " movl %%cr0, %0\n"
28515+ " movl %0, %%eax\n"
28516+ " andl $0xFFFEFFFF, %%eax\n"
28517+ " movl %%eax, %%cr0\n"
28518+#endif
28519+
28520+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28521+
28522+#ifdef CONFIG_PAX_KERNEXEC
28523+ " movl %0, %%cr0\n"
28524+#endif
28525+
28526 " jmp 2b\n"
28527 ".previous\n"
28528 _ASM_EXTABLE(1b, 3b)
28529- : : "r" (from));
28530+ : "=&r" (cr0) : "r" (from) : "ax");
28531
28532 for ( ; i > 5; i--) {
28533 __asm__ __volatile__ (
28534- "1: prefetch 320(%0)\n"
28535- "2: movq (%0), %%mm0\n"
28536- " movq 8(%0), %%mm1\n"
28537- " movq 16(%0), %%mm2\n"
28538- " movq 24(%0), %%mm3\n"
28539- " movq %%mm0, (%1)\n"
28540- " movq %%mm1, 8(%1)\n"
28541- " movq %%mm2, 16(%1)\n"
28542- " movq %%mm3, 24(%1)\n"
28543- " movq 32(%0), %%mm0\n"
28544- " movq 40(%0), %%mm1\n"
28545- " movq 48(%0), %%mm2\n"
28546- " movq 56(%0), %%mm3\n"
28547- " movq %%mm0, 32(%1)\n"
28548- " movq %%mm1, 40(%1)\n"
28549- " movq %%mm2, 48(%1)\n"
28550- " movq %%mm3, 56(%1)\n"
28551+ "1: prefetch 320(%1)\n"
28552+ "2: movq (%1), %%mm0\n"
28553+ " movq 8(%1), %%mm1\n"
28554+ " movq 16(%1), %%mm2\n"
28555+ " movq 24(%1), %%mm3\n"
28556+ " movq %%mm0, (%2)\n"
28557+ " movq %%mm1, 8(%2)\n"
28558+ " movq %%mm2, 16(%2)\n"
28559+ " movq %%mm3, 24(%2)\n"
28560+ " movq 32(%1), %%mm0\n"
28561+ " movq 40(%1), %%mm1\n"
28562+ " movq 48(%1), %%mm2\n"
28563+ " movq 56(%1), %%mm3\n"
28564+ " movq %%mm0, 32(%2)\n"
28565+ " movq %%mm1, 40(%2)\n"
28566+ " movq %%mm2, 48(%2)\n"
28567+ " movq %%mm3, 56(%2)\n"
28568 ".section .fixup, \"ax\"\n"
28569- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28570+ "3:\n"
28571+
28572+#ifdef CONFIG_PAX_KERNEXEC
28573+ " movl %%cr0, %0\n"
28574+ " movl %0, %%eax\n"
28575+ " andl $0xFFFEFFFF, %%eax\n"
28576+ " movl %%eax, %%cr0\n"
28577+#endif
28578+
28579+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28580+
28581+#ifdef CONFIG_PAX_KERNEXEC
28582+ " movl %0, %%cr0\n"
28583+#endif
28584+
28585 " jmp 2b\n"
28586 ".previous\n"
28587 _ASM_EXTABLE(1b, 3b)
28588- : : "r" (from), "r" (to) : "memory");
28589+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28590
28591 from += 64;
28592 to += 64;
28593@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
28594 static void fast_copy_page(void *to, void *from)
28595 {
28596 int i;
28597+ unsigned long cr0;
28598
28599 kernel_fpu_begin();
28600
28601@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
28602 * but that is for later. -AV
28603 */
28604 __asm__ __volatile__(
28605- "1: prefetch (%0)\n"
28606- " prefetch 64(%0)\n"
28607- " prefetch 128(%0)\n"
28608- " prefetch 192(%0)\n"
28609- " prefetch 256(%0)\n"
28610+ "1: prefetch (%1)\n"
28611+ " prefetch 64(%1)\n"
28612+ " prefetch 128(%1)\n"
28613+ " prefetch 192(%1)\n"
28614+ " prefetch 256(%1)\n"
28615 "2: \n"
28616 ".section .fixup, \"ax\"\n"
28617- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28618+ "3: \n"
28619+
28620+#ifdef CONFIG_PAX_KERNEXEC
28621+ " movl %%cr0, %0\n"
28622+ " movl %0, %%eax\n"
28623+ " andl $0xFFFEFFFF, %%eax\n"
28624+ " movl %%eax, %%cr0\n"
28625+#endif
28626+
28627+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28628+
28629+#ifdef CONFIG_PAX_KERNEXEC
28630+ " movl %0, %%cr0\n"
28631+#endif
28632+
28633 " jmp 2b\n"
28634 ".previous\n"
28635- _ASM_EXTABLE(1b, 3b) : : "r" (from));
28636+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
28637
28638 for (i = 0; i < (4096-320)/64; i++) {
28639 __asm__ __volatile__ (
28640- "1: prefetch 320(%0)\n"
28641- "2: movq (%0), %%mm0\n"
28642- " movntq %%mm0, (%1)\n"
28643- " movq 8(%0), %%mm1\n"
28644- " movntq %%mm1, 8(%1)\n"
28645- " movq 16(%0), %%mm2\n"
28646- " movntq %%mm2, 16(%1)\n"
28647- " movq 24(%0), %%mm3\n"
28648- " movntq %%mm3, 24(%1)\n"
28649- " movq 32(%0), %%mm4\n"
28650- " movntq %%mm4, 32(%1)\n"
28651- " movq 40(%0), %%mm5\n"
28652- " movntq %%mm5, 40(%1)\n"
28653- " movq 48(%0), %%mm6\n"
28654- " movntq %%mm6, 48(%1)\n"
28655- " movq 56(%0), %%mm7\n"
28656- " movntq %%mm7, 56(%1)\n"
28657+ "1: prefetch 320(%1)\n"
28658+ "2: movq (%1), %%mm0\n"
28659+ " movntq %%mm0, (%2)\n"
28660+ " movq 8(%1), %%mm1\n"
28661+ " movntq %%mm1, 8(%2)\n"
28662+ " movq 16(%1), %%mm2\n"
28663+ " movntq %%mm2, 16(%2)\n"
28664+ " movq 24(%1), %%mm3\n"
28665+ " movntq %%mm3, 24(%2)\n"
28666+ " movq 32(%1), %%mm4\n"
28667+ " movntq %%mm4, 32(%2)\n"
28668+ " movq 40(%1), %%mm5\n"
28669+ " movntq %%mm5, 40(%2)\n"
28670+ " movq 48(%1), %%mm6\n"
28671+ " movntq %%mm6, 48(%2)\n"
28672+ " movq 56(%1), %%mm7\n"
28673+ " movntq %%mm7, 56(%2)\n"
28674 ".section .fixup, \"ax\"\n"
28675- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28676+ "3:\n"
28677+
28678+#ifdef CONFIG_PAX_KERNEXEC
28679+ " movl %%cr0, %0\n"
28680+ " movl %0, %%eax\n"
28681+ " andl $0xFFFEFFFF, %%eax\n"
28682+ " movl %%eax, %%cr0\n"
28683+#endif
28684+
28685+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28686+
28687+#ifdef CONFIG_PAX_KERNEXEC
28688+ " movl %0, %%cr0\n"
28689+#endif
28690+
28691 " jmp 2b\n"
28692 ".previous\n"
28693- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
28694+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28695
28696 from += 64;
28697 to += 64;
28698@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
28699 static void fast_copy_page(void *to, void *from)
28700 {
28701 int i;
28702+ unsigned long cr0;
28703
28704 kernel_fpu_begin();
28705
28706 __asm__ __volatile__ (
28707- "1: prefetch (%0)\n"
28708- " prefetch 64(%0)\n"
28709- " prefetch 128(%0)\n"
28710- " prefetch 192(%0)\n"
28711- " prefetch 256(%0)\n"
28712+ "1: prefetch (%1)\n"
28713+ " prefetch 64(%1)\n"
28714+ " prefetch 128(%1)\n"
28715+ " prefetch 192(%1)\n"
28716+ " prefetch 256(%1)\n"
28717 "2: \n"
28718 ".section .fixup, \"ax\"\n"
28719- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28720+ "3: \n"
28721+
28722+#ifdef CONFIG_PAX_KERNEXEC
28723+ " movl %%cr0, %0\n"
28724+ " movl %0, %%eax\n"
28725+ " andl $0xFFFEFFFF, %%eax\n"
28726+ " movl %%eax, %%cr0\n"
28727+#endif
28728+
28729+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
28730+
28731+#ifdef CONFIG_PAX_KERNEXEC
28732+ " movl %0, %%cr0\n"
28733+#endif
28734+
28735 " jmp 2b\n"
28736 ".previous\n"
28737- _ASM_EXTABLE(1b, 3b) : : "r" (from));
28738+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
28739
28740 for (i = 0; i < 4096/64; i++) {
28741 __asm__ __volatile__ (
28742- "1: prefetch 320(%0)\n"
28743- "2: movq (%0), %%mm0\n"
28744- " movq 8(%0), %%mm1\n"
28745- " movq 16(%0), %%mm2\n"
28746- " movq 24(%0), %%mm3\n"
28747- " movq %%mm0, (%1)\n"
28748- " movq %%mm1, 8(%1)\n"
28749- " movq %%mm2, 16(%1)\n"
28750- " movq %%mm3, 24(%1)\n"
28751- " movq 32(%0), %%mm0\n"
28752- " movq 40(%0), %%mm1\n"
28753- " movq 48(%0), %%mm2\n"
28754- " movq 56(%0), %%mm3\n"
28755- " movq %%mm0, 32(%1)\n"
28756- " movq %%mm1, 40(%1)\n"
28757- " movq %%mm2, 48(%1)\n"
28758- " movq %%mm3, 56(%1)\n"
28759+ "1: prefetch 320(%1)\n"
28760+ "2: movq (%1), %%mm0\n"
28761+ " movq 8(%1), %%mm1\n"
28762+ " movq 16(%1), %%mm2\n"
28763+ " movq 24(%1), %%mm3\n"
28764+ " movq %%mm0, (%2)\n"
28765+ " movq %%mm1, 8(%2)\n"
28766+ " movq %%mm2, 16(%2)\n"
28767+ " movq %%mm3, 24(%2)\n"
28768+ " movq 32(%1), %%mm0\n"
28769+ " movq 40(%1), %%mm1\n"
28770+ " movq 48(%1), %%mm2\n"
28771+ " movq 56(%1), %%mm3\n"
28772+ " movq %%mm0, 32(%2)\n"
28773+ " movq %%mm1, 40(%2)\n"
28774+ " movq %%mm2, 48(%2)\n"
28775+ " movq %%mm3, 56(%2)\n"
28776 ".section .fixup, \"ax\"\n"
28777- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28778+ "3:\n"
28779+
28780+#ifdef CONFIG_PAX_KERNEXEC
28781+ " movl %%cr0, %0\n"
28782+ " movl %0, %%eax\n"
28783+ " andl $0xFFFEFFFF, %%eax\n"
28784+ " movl %%eax, %%cr0\n"
28785+#endif
28786+
28787+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
28788+
28789+#ifdef CONFIG_PAX_KERNEXEC
28790+ " movl %0, %%cr0\n"
28791+#endif
28792+
28793 " jmp 2b\n"
28794 ".previous\n"
28795 _ASM_EXTABLE(1b, 3b)
28796- : : "r" (from), "r" (to) : "memory");
28797+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
28798
28799 from += 64;
28800 to += 64;
28801diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
28802index f6d13ee..aca5f0b 100644
28803--- a/arch/x86/lib/msr-reg.S
28804+++ b/arch/x86/lib/msr-reg.S
28805@@ -3,6 +3,7 @@
28806 #include <asm/dwarf2.h>
28807 #include <asm/asm.h>
28808 #include <asm/msr.h>
28809+#include <asm/alternative-asm.h>
28810
28811 #ifdef CONFIG_X86_64
28812 /*
28813@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
28814 CFI_STARTPROC
28815 pushq_cfi %rbx
28816 pushq_cfi %rbp
28817- movq %rdi, %r10 /* Save pointer */
28818+ movq %rdi, %r9 /* Save pointer */
28819 xorl %r11d, %r11d /* Return value */
28820 movl (%rdi), %eax
28821 movl 4(%rdi), %ecx
28822@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
28823 movl 28(%rdi), %edi
28824 CFI_REMEMBER_STATE
28825 1: \op
28826-2: movl %eax, (%r10)
28827+2: movl %eax, (%r9)
28828 movl %r11d, %eax /* Return value */
28829- movl %ecx, 4(%r10)
28830- movl %edx, 8(%r10)
28831- movl %ebx, 12(%r10)
28832- movl %ebp, 20(%r10)
28833- movl %esi, 24(%r10)
28834- movl %edi, 28(%r10)
28835+ movl %ecx, 4(%r9)
28836+ movl %edx, 8(%r9)
28837+ movl %ebx, 12(%r9)
28838+ movl %ebp, 20(%r9)
28839+ movl %esi, 24(%r9)
28840+ movl %edi, 28(%r9)
28841 popq_cfi %rbp
28842 popq_cfi %rbx
28843+ pax_force_retaddr
28844 ret
28845 3:
28846 CFI_RESTORE_STATE
28847diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
28848index fc6ba17..d4d989d 100644
28849--- a/arch/x86/lib/putuser.S
28850+++ b/arch/x86/lib/putuser.S
28851@@ -16,7 +16,9 @@
28852 #include <asm/errno.h>
28853 #include <asm/asm.h>
28854 #include <asm/smap.h>
28855-
28856+#include <asm/segment.h>
28857+#include <asm/pgtable.h>
28858+#include <asm/alternative-asm.h>
28859
28860 /*
28861 * __put_user_X
28862@@ -30,57 +32,125 @@
28863 * as they get called from within inline assembly.
28864 */
28865
28866-#define ENTER CFI_STARTPROC ; \
28867- GET_THREAD_INFO(%_ASM_BX)
28868-#define EXIT ASM_CLAC ; \
28869- ret ; \
28870+#define ENTER CFI_STARTPROC
28871+#define EXIT ASM_CLAC ; \
28872+ pax_force_retaddr ; \
28873+ ret ; \
28874 CFI_ENDPROC
28875
28876+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28877+#define _DEST %_ASM_CX,%_ASM_BX
28878+#else
28879+#define _DEST %_ASM_CX
28880+#endif
28881+
28882+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28883+#define __copyuser_seg gs;
28884+#else
28885+#define __copyuser_seg
28886+#endif
28887+
28888 .text
28889 ENTRY(__put_user_1)
28890 ENTER
28891+
28892+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28893+ GET_THREAD_INFO(%_ASM_BX)
28894 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
28895 jae bad_put_user
28896 ASM_STAC
28897-1: movb %al,(%_ASM_CX)
28898+
28899+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28900+ mov pax_user_shadow_base,%_ASM_BX
28901+ cmp %_ASM_BX,%_ASM_CX
28902+ jb 1234f
28903+ xor %ebx,%ebx
28904+1234:
28905+#endif
28906+
28907+#endif
28908+
28909+1: __copyuser_seg movb %al,(_DEST)
28910 xor %eax,%eax
28911 EXIT
28912 ENDPROC(__put_user_1)
28913
28914 ENTRY(__put_user_2)
28915 ENTER
28916+
28917+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28918+ GET_THREAD_INFO(%_ASM_BX)
28919 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28920 sub $1,%_ASM_BX
28921 cmp %_ASM_BX,%_ASM_CX
28922 jae bad_put_user
28923 ASM_STAC
28924-2: movw %ax,(%_ASM_CX)
28925+
28926+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28927+ mov pax_user_shadow_base,%_ASM_BX
28928+ cmp %_ASM_BX,%_ASM_CX
28929+ jb 1234f
28930+ xor %ebx,%ebx
28931+1234:
28932+#endif
28933+
28934+#endif
28935+
28936+2: __copyuser_seg movw %ax,(_DEST)
28937 xor %eax,%eax
28938 EXIT
28939 ENDPROC(__put_user_2)
28940
28941 ENTRY(__put_user_4)
28942 ENTER
28943+
28944+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28945+ GET_THREAD_INFO(%_ASM_BX)
28946 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28947 sub $3,%_ASM_BX
28948 cmp %_ASM_BX,%_ASM_CX
28949 jae bad_put_user
28950 ASM_STAC
28951-3: movl %eax,(%_ASM_CX)
28952+
28953+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28954+ mov pax_user_shadow_base,%_ASM_BX
28955+ cmp %_ASM_BX,%_ASM_CX
28956+ jb 1234f
28957+ xor %ebx,%ebx
28958+1234:
28959+#endif
28960+
28961+#endif
28962+
28963+3: __copyuser_seg movl %eax,(_DEST)
28964 xor %eax,%eax
28965 EXIT
28966 ENDPROC(__put_user_4)
28967
28968 ENTRY(__put_user_8)
28969 ENTER
28970+
28971+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28972+ GET_THREAD_INFO(%_ASM_BX)
28973 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28974 sub $7,%_ASM_BX
28975 cmp %_ASM_BX,%_ASM_CX
28976 jae bad_put_user
28977 ASM_STAC
28978-4: mov %_ASM_AX,(%_ASM_CX)
28979+
28980+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28981+ mov pax_user_shadow_base,%_ASM_BX
28982+ cmp %_ASM_BX,%_ASM_CX
28983+ jb 1234f
28984+ xor %ebx,%ebx
28985+1234:
28986+#endif
28987+
28988+#endif
28989+
28990+4: __copyuser_seg mov %_ASM_AX,(_DEST)
28991 #ifdef CONFIG_X86_32
28992-5: movl %edx,4(%_ASM_CX)
28993+5: __copyuser_seg movl %edx,4(_DEST)
28994 #endif
28995 xor %eax,%eax
28996 EXIT
28997diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
28998index 1cad221..de671ee 100644
28999--- a/arch/x86/lib/rwlock.S
29000+++ b/arch/x86/lib/rwlock.S
29001@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
29002 FRAME
29003 0: LOCK_PREFIX
29004 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29005+
29006+#ifdef CONFIG_PAX_REFCOUNT
29007+ jno 1234f
29008+ LOCK_PREFIX
29009+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29010+ int $4
29011+1234:
29012+ _ASM_EXTABLE(1234b, 1234b)
29013+#endif
29014+
29015 1: rep; nop
29016 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
29017 jne 1b
29018 LOCK_PREFIX
29019 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29020+
29021+#ifdef CONFIG_PAX_REFCOUNT
29022+ jno 1234f
29023+ LOCK_PREFIX
29024+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29025+ int $4
29026+1234:
29027+ _ASM_EXTABLE(1234b, 1234b)
29028+#endif
29029+
29030 jnz 0b
29031 ENDFRAME
29032+ pax_force_retaddr
29033 ret
29034 CFI_ENDPROC
29035 END(__write_lock_failed)
29036@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
29037 FRAME
29038 0: LOCK_PREFIX
29039 READ_LOCK_SIZE(inc) (%__lock_ptr)
29040+
29041+#ifdef CONFIG_PAX_REFCOUNT
29042+ jno 1234f
29043+ LOCK_PREFIX
29044+ READ_LOCK_SIZE(dec) (%__lock_ptr)
29045+ int $4
29046+1234:
29047+ _ASM_EXTABLE(1234b, 1234b)
29048+#endif
29049+
29050 1: rep; nop
29051 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
29052 js 1b
29053 LOCK_PREFIX
29054 READ_LOCK_SIZE(dec) (%__lock_ptr)
29055+
29056+#ifdef CONFIG_PAX_REFCOUNT
29057+ jno 1234f
29058+ LOCK_PREFIX
29059+ READ_LOCK_SIZE(inc) (%__lock_ptr)
29060+ int $4
29061+1234:
29062+ _ASM_EXTABLE(1234b, 1234b)
29063+#endif
29064+
29065 js 0b
29066 ENDFRAME
29067+ pax_force_retaddr
29068 ret
29069 CFI_ENDPROC
29070 END(__read_lock_failed)
29071diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
29072index 5dff5f0..cadebf4 100644
29073--- a/arch/x86/lib/rwsem.S
29074+++ b/arch/x86/lib/rwsem.S
29075@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
29076 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29077 CFI_RESTORE __ASM_REG(dx)
29078 restore_common_regs
29079+ pax_force_retaddr
29080 ret
29081 CFI_ENDPROC
29082 ENDPROC(call_rwsem_down_read_failed)
29083@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
29084 movq %rax,%rdi
29085 call rwsem_down_write_failed
29086 restore_common_regs
29087+ pax_force_retaddr
29088 ret
29089 CFI_ENDPROC
29090 ENDPROC(call_rwsem_down_write_failed)
29091@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
29092 movq %rax,%rdi
29093 call rwsem_wake
29094 restore_common_regs
29095-1: ret
29096+1: pax_force_retaddr
29097+ ret
29098 CFI_ENDPROC
29099 ENDPROC(call_rwsem_wake)
29100
29101@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
29102 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29103 CFI_RESTORE __ASM_REG(dx)
29104 restore_common_regs
29105+ pax_force_retaddr
29106 ret
29107 CFI_ENDPROC
29108 ENDPROC(call_rwsem_downgrade_wake)
29109diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
29110index a63efd6..ccecad8 100644
29111--- a/arch/x86/lib/thunk_64.S
29112+++ b/arch/x86/lib/thunk_64.S
29113@@ -8,6 +8,7 @@
29114 #include <linux/linkage.h>
29115 #include <asm/dwarf2.h>
29116 #include <asm/calling.h>
29117+#include <asm/alternative-asm.h>
29118
29119 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
29120 .macro THUNK name, func, put_ret_addr_in_rdi=0
29121@@ -41,5 +42,6 @@
29122 SAVE_ARGS
29123 restore:
29124 RESTORE_ARGS
29125+ pax_force_retaddr
29126 ret
29127 CFI_ENDPROC
29128diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
29129index 3eb18ac..6890bc3 100644
29130--- a/arch/x86/lib/usercopy_32.c
29131+++ b/arch/x86/lib/usercopy_32.c
29132@@ -42,11 +42,13 @@ do { \
29133 int __d0; \
29134 might_fault(); \
29135 __asm__ __volatile__( \
29136+ __COPYUSER_SET_ES \
29137 ASM_STAC "\n" \
29138 "0: rep; stosl\n" \
29139 " movl %2,%0\n" \
29140 "1: rep; stosb\n" \
29141 "2: " ASM_CLAC "\n" \
29142+ __COPYUSER_RESTORE_ES \
29143 ".section .fixup,\"ax\"\n" \
29144 "3: lea 0(%2,%0,4),%0\n" \
29145 " jmp 2b\n" \
29146@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
29147
29148 #ifdef CONFIG_X86_INTEL_USERCOPY
29149 static unsigned long
29150-__copy_user_intel(void __user *to, const void *from, unsigned long size)
29151+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
29152 {
29153 int d0, d1;
29154 __asm__ __volatile__(
29155@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29156 " .align 2,0x90\n"
29157 "3: movl 0(%4), %%eax\n"
29158 "4: movl 4(%4), %%edx\n"
29159- "5: movl %%eax, 0(%3)\n"
29160- "6: movl %%edx, 4(%3)\n"
29161+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
29162+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
29163 "7: movl 8(%4), %%eax\n"
29164 "8: movl 12(%4),%%edx\n"
29165- "9: movl %%eax, 8(%3)\n"
29166- "10: movl %%edx, 12(%3)\n"
29167+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
29168+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
29169 "11: movl 16(%4), %%eax\n"
29170 "12: movl 20(%4), %%edx\n"
29171- "13: movl %%eax, 16(%3)\n"
29172- "14: movl %%edx, 20(%3)\n"
29173+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
29174+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
29175 "15: movl 24(%4), %%eax\n"
29176 "16: movl 28(%4), %%edx\n"
29177- "17: movl %%eax, 24(%3)\n"
29178- "18: movl %%edx, 28(%3)\n"
29179+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
29180+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
29181 "19: movl 32(%4), %%eax\n"
29182 "20: movl 36(%4), %%edx\n"
29183- "21: movl %%eax, 32(%3)\n"
29184- "22: movl %%edx, 36(%3)\n"
29185+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
29186+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
29187 "23: movl 40(%4), %%eax\n"
29188 "24: movl 44(%4), %%edx\n"
29189- "25: movl %%eax, 40(%3)\n"
29190- "26: movl %%edx, 44(%3)\n"
29191+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
29192+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
29193 "27: movl 48(%4), %%eax\n"
29194 "28: movl 52(%4), %%edx\n"
29195- "29: movl %%eax, 48(%3)\n"
29196- "30: movl %%edx, 52(%3)\n"
29197+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
29198+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
29199 "31: movl 56(%4), %%eax\n"
29200 "32: movl 60(%4), %%edx\n"
29201- "33: movl %%eax, 56(%3)\n"
29202- "34: movl %%edx, 60(%3)\n"
29203+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
29204+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
29205 " addl $-64, %0\n"
29206 " addl $64, %4\n"
29207 " addl $64, %3\n"
29208@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29209 " shrl $2, %0\n"
29210 " andl $3, %%eax\n"
29211 " cld\n"
29212+ __COPYUSER_SET_ES
29213 "99: rep; movsl\n"
29214 "36: movl %%eax, %0\n"
29215 "37: rep; movsb\n"
29216 "100:\n"
29217+ __COPYUSER_RESTORE_ES
29218 ".section .fixup,\"ax\"\n"
29219 "101: lea 0(%%eax,%0,4),%0\n"
29220 " jmp 100b\n"
29221@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29222 }
29223
29224 static unsigned long
29225+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
29226+{
29227+ int d0, d1;
29228+ __asm__ __volatile__(
29229+ " .align 2,0x90\n"
29230+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
29231+ " cmpl $67, %0\n"
29232+ " jbe 3f\n"
29233+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
29234+ " .align 2,0x90\n"
29235+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
29236+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
29237+ "5: movl %%eax, 0(%3)\n"
29238+ "6: movl %%edx, 4(%3)\n"
29239+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
29240+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
29241+ "9: movl %%eax, 8(%3)\n"
29242+ "10: movl %%edx, 12(%3)\n"
29243+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
29244+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
29245+ "13: movl %%eax, 16(%3)\n"
29246+ "14: movl %%edx, 20(%3)\n"
29247+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
29248+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
29249+ "17: movl %%eax, 24(%3)\n"
29250+ "18: movl %%edx, 28(%3)\n"
29251+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
29252+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
29253+ "21: movl %%eax, 32(%3)\n"
29254+ "22: movl %%edx, 36(%3)\n"
29255+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
29256+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
29257+ "25: movl %%eax, 40(%3)\n"
29258+ "26: movl %%edx, 44(%3)\n"
29259+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
29260+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
29261+ "29: movl %%eax, 48(%3)\n"
29262+ "30: movl %%edx, 52(%3)\n"
29263+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
29264+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
29265+ "33: movl %%eax, 56(%3)\n"
29266+ "34: movl %%edx, 60(%3)\n"
29267+ " addl $-64, %0\n"
29268+ " addl $64, %4\n"
29269+ " addl $64, %3\n"
29270+ " cmpl $63, %0\n"
29271+ " ja 1b\n"
29272+ "35: movl %0, %%eax\n"
29273+ " shrl $2, %0\n"
29274+ " andl $3, %%eax\n"
29275+ " cld\n"
29276+ "99: rep; "__copyuser_seg" movsl\n"
29277+ "36: movl %%eax, %0\n"
29278+ "37: rep; "__copyuser_seg" movsb\n"
29279+ "100:\n"
29280+ ".section .fixup,\"ax\"\n"
29281+ "101: lea 0(%%eax,%0,4),%0\n"
29282+ " jmp 100b\n"
29283+ ".previous\n"
29284+ _ASM_EXTABLE(1b,100b)
29285+ _ASM_EXTABLE(2b,100b)
29286+ _ASM_EXTABLE(3b,100b)
29287+ _ASM_EXTABLE(4b,100b)
29288+ _ASM_EXTABLE(5b,100b)
29289+ _ASM_EXTABLE(6b,100b)
29290+ _ASM_EXTABLE(7b,100b)
29291+ _ASM_EXTABLE(8b,100b)
29292+ _ASM_EXTABLE(9b,100b)
29293+ _ASM_EXTABLE(10b,100b)
29294+ _ASM_EXTABLE(11b,100b)
29295+ _ASM_EXTABLE(12b,100b)
29296+ _ASM_EXTABLE(13b,100b)
29297+ _ASM_EXTABLE(14b,100b)
29298+ _ASM_EXTABLE(15b,100b)
29299+ _ASM_EXTABLE(16b,100b)
29300+ _ASM_EXTABLE(17b,100b)
29301+ _ASM_EXTABLE(18b,100b)
29302+ _ASM_EXTABLE(19b,100b)
29303+ _ASM_EXTABLE(20b,100b)
29304+ _ASM_EXTABLE(21b,100b)
29305+ _ASM_EXTABLE(22b,100b)
29306+ _ASM_EXTABLE(23b,100b)
29307+ _ASM_EXTABLE(24b,100b)
29308+ _ASM_EXTABLE(25b,100b)
29309+ _ASM_EXTABLE(26b,100b)
29310+ _ASM_EXTABLE(27b,100b)
29311+ _ASM_EXTABLE(28b,100b)
29312+ _ASM_EXTABLE(29b,100b)
29313+ _ASM_EXTABLE(30b,100b)
29314+ _ASM_EXTABLE(31b,100b)
29315+ _ASM_EXTABLE(32b,100b)
29316+ _ASM_EXTABLE(33b,100b)
29317+ _ASM_EXTABLE(34b,100b)
29318+ _ASM_EXTABLE(35b,100b)
29319+ _ASM_EXTABLE(36b,100b)
29320+ _ASM_EXTABLE(37b,100b)
29321+ _ASM_EXTABLE(99b,101b)
29322+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
29323+ : "1"(to), "2"(from), "0"(size)
29324+ : "eax", "edx", "memory");
29325+ return size;
29326+}
29327+
29328+static unsigned long __size_overflow(3)
29329 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29330 {
29331 int d0, d1;
29332 __asm__ __volatile__(
29333 " .align 2,0x90\n"
29334- "0: movl 32(%4), %%eax\n"
29335+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29336 " cmpl $67, %0\n"
29337 " jbe 2f\n"
29338- "1: movl 64(%4), %%eax\n"
29339+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29340 " .align 2,0x90\n"
29341- "2: movl 0(%4), %%eax\n"
29342- "21: movl 4(%4), %%edx\n"
29343+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29344+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29345 " movl %%eax, 0(%3)\n"
29346 " movl %%edx, 4(%3)\n"
29347- "3: movl 8(%4), %%eax\n"
29348- "31: movl 12(%4),%%edx\n"
29349+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29350+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29351 " movl %%eax, 8(%3)\n"
29352 " movl %%edx, 12(%3)\n"
29353- "4: movl 16(%4), %%eax\n"
29354- "41: movl 20(%4), %%edx\n"
29355+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29356+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29357 " movl %%eax, 16(%3)\n"
29358 " movl %%edx, 20(%3)\n"
29359- "10: movl 24(%4), %%eax\n"
29360- "51: movl 28(%4), %%edx\n"
29361+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29362+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29363 " movl %%eax, 24(%3)\n"
29364 " movl %%edx, 28(%3)\n"
29365- "11: movl 32(%4), %%eax\n"
29366- "61: movl 36(%4), %%edx\n"
29367+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29368+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29369 " movl %%eax, 32(%3)\n"
29370 " movl %%edx, 36(%3)\n"
29371- "12: movl 40(%4), %%eax\n"
29372- "71: movl 44(%4), %%edx\n"
29373+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29374+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29375 " movl %%eax, 40(%3)\n"
29376 " movl %%edx, 44(%3)\n"
29377- "13: movl 48(%4), %%eax\n"
29378- "81: movl 52(%4), %%edx\n"
29379+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29380+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29381 " movl %%eax, 48(%3)\n"
29382 " movl %%edx, 52(%3)\n"
29383- "14: movl 56(%4), %%eax\n"
29384- "91: movl 60(%4), %%edx\n"
29385+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29386+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29387 " movl %%eax, 56(%3)\n"
29388 " movl %%edx, 60(%3)\n"
29389 " addl $-64, %0\n"
29390@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29391 " shrl $2, %0\n"
29392 " andl $3, %%eax\n"
29393 " cld\n"
29394- "6: rep; movsl\n"
29395+ "6: rep; "__copyuser_seg" movsl\n"
29396 " movl %%eax,%0\n"
29397- "7: rep; movsb\n"
29398+ "7: rep; "__copyuser_seg" movsb\n"
29399 "8:\n"
29400 ".section .fixup,\"ax\"\n"
29401 "9: lea 0(%%eax,%0,4),%0\n"
29402@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29403 * hyoshiok@miraclelinux.com
29404 */
29405
29406-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29407+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
29408 const void __user *from, unsigned long size)
29409 {
29410 int d0, d1;
29411
29412 __asm__ __volatile__(
29413 " .align 2,0x90\n"
29414- "0: movl 32(%4), %%eax\n"
29415+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29416 " cmpl $67, %0\n"
29417 " jbe 2f\n"
29418- "1: movl 64(%4), %%eax\n"
29419+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29420 " .align 2,0x90\n"
29421- "2: movl 0(%4), %%eax\n"
29422- "21: movl 4(%4), %%edx\n"
29423+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29424+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29425 " movnti %%eax, 0(%3)\n"
29426 " movnti %%edx, 4(%3)\n"
29427- "3: movl 8(%4), %%eax\n"
29428- "31: movl 12(%4),%%edx\n"
29429+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29430+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29431 " movnti %%eax, 8(%3)\n"
29432 " movnti %%edx, 12(%3)\n"
29433- "4: movl 16(%4), %%eax\n"
29434- "41: movl 20(%4), %%edx\n"
29435+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29436+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29437 " movnti %%eax, 16(%3)\n"
29438 " movnti %%edx, 20(%3)\n"
29439- "10: movl 24(%4), %%eax\n"
29440- "51: movl 28(%4), %%edx\n"
29441+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29442+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29443 " movnti %%eax, 24(%3)\n"
29444 " movnti %%edx, 28(%3)\n"
29445- "11: movl 32(%4), %%eax\n"
29446- "61: movl 36(%4), %%edx\n"
29447+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29448+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29449 " movnti %%eax, 32(%3)\n"
29450 " movnti %%edx, 36(%3)\n"
29451- "12: movl 40(%4), %%eax\n"
29452- "71: movl 44(%4), %%edx\n"
29453+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29454+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29455 " movnti %%eax, 40(%3)\n"
29456 " movnti %%edx, 44(%3)\n"
29457- "13: movl 48(%4), %%eax\n"
29458- "81: movl 52(%4), %%edx\n"
29459+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29460+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29461 " movnti %%eax, 48(%3)\n"
29462 " movnti %%edx, 52(%3)\n"
29463- "14: movl 56(%4), %%eax\n"
29464- "91: movl 60(%4), %%edx\n"
29465+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29466+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29467 " movnti %%eax, 56(%3)\n"
29468 " movnti %%edx, 60(%3)\n"
29469 " addl $-64, %0\n"
29470@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29471 " shrl $2, %0\n"
29472 " andl $3, %%eax\n"
29473 " cld\n"
29474- "6: rep; movsl\n"
29475+ "6: rep; "__copyuser_seg" movsl\n"
29476 " movl %%eax,%0\n"
29477- "7: rep; movsb\n"
29478+ "7: rep; "__copyuser_seg" movsb\n"
29479 "8:\n"
29480 ".section .fixup,\"ax\"\n"
29481 "9: lea 0(%%eax,%0,4),%0\n"
29482@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
29483 return size;
29484 }
29485
29486-static unsigned long __copy_user_intel_nocache(void *to,
29487+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
29488 const void __user *from, unsigned long size)
29489 {
29490 int d0, d1;
29491
29492 __asm__ __volatile__(
29493 " .align 2,0x90\n"
29494- "0: movl 32(%4), %%eax\n"
29495+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29496 " cmpl $67, %0\n"
29497 " jbe 2f\n"
29498- "1: movl 64(%4), %%eax\n"
29499+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29500 " .align 2,0x90\n"
29501- "2: movl 0(%4), %%eax\n"
29502- "21: movl 4(%4), %%edx\n"
29503+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29504+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29505 " movnti %%eax, 0(%3)\n"
29506 " movnti %%edx, 4(%3)\n"
29507- "3: movl 8(%4), %%eax\n"
29508- "31: movl 12(%4),%%edx\n"
29509+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29510+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29511 " movnti %%eax, 8(%3)\n"
29512 " movnti %%edx, 12(%3)\n"
29513- "4: movl 16(%4), %%eax\n"
29514- "41: movl 20(%4), %%edx\n"
29515+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29516+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29517 " movnti %%eax, 16(%3)\n"
29518 " movnti %%edx, 20(%3)\n"
29519- "10: movl 24(%4), %%eax\n"
29520- "51: movl 28(%4), %%edx\n"
29521+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29522+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29523 " movnti %%eax, 24(%3)\n"
29524 " movnti %%edx, 28(%3)\n"
29525- "11: movl 32(%4), %%eax\n"
29526- "61: movl 36(%4), %%edx\n"
29527+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29528+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29529 " movnti %%eax, 32(%3)\n"
29530 " movnti %%edx, 36(%3)\n"
29531- "12: movl 40(%4), %%eax\n"
29532- "71: movl 44(%4), %%edx\n"
29533+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29534+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29535 " movnti %%eax, 40(%3)\n"
29536 " movnti %%edx, 44(%3)\n"
29537- "13: movl 48(%4), %%eax\n"
29538- "81: movl 52(%4), %%edx\n"
29539+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29540+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
29541 " movnti %%eax, 48(%3)\n"
29542 " movnti %%edx, 52(%3)\n"
29543- "14: movl 56(%4), %%eax\n"
29544- "91: movl 60(%4), %%edx\n"
29545+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
29546+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
29547 " movnti %%eax, 56(%3)\n"
29548 " movnti %%edx, 60(%3)\n"
29549 " addl $-64, %0\n"
29550@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
29551 " shrl $2, %0\n"
29552 " andl $3, %%eax\n"
29553 " cld\n"
29554- "6: rep; movsl\n"
29555+ "6: rep; "__copyuser_seg" movsl\n"
29556 " movl %%eax,%0\n"
29557- "7: rep; movsb\n"
29558+ "7: rep; "__copyuser_seg" movsb\n"
29559 "8:\n"
29560 ".section .fixup,\"ax\"\n"
29561 "9: lea 0(%%eax,%0,4),%0\n"
29562@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
29563 */
29564 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
29565 unsigned long size);
29566-unsigned long __copy_user_intel(void __user *to, const void *from,
29567+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
29568+ unsigned long size);
29569+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
29570 unsigned long size);
29571 unsigned long __copy_user_zeroing_intel_nocache(void *to,
29572 const void __user *from, unsigned long size);
29573 #endif /* CONFIG_X86_INTEL_USERCOPY */
29574
29575 /* Generic arbitrary sized copy. */
29576-#define __copy_user(to, from, size) \
29577+#define __copy_user(to, from, size, prefix, set, restore) \
29578 do { \
29579 int __d0, __d1, __d2; \
29580 __asm__ __volatile__( \
29581+ set \
29582 " cmp $7,%0\n" \
29583 " jbe 1f\n" \
29584 " movl %1,%0\n" \
29585 " negl %0\n" \
29586 " andl $7,%0\n" \
29587 " subl %0,%3\n" \
29588- "4: rep; movsb\n" \
29589+ "4: rep; "prefix"movsb\n" \
29590 " movl %3,%0\n" \
29591 " shrl $2,%0\n" \
29592 " andl $3,%3\n" \
29593 " .align 2,0x90\n" \
29594- "0: rep; movsl\n" \
29595+ "0: rep; "prefix"movsl\n" \
29596 " movl %3,%0\n" \
29597- "1: rep; movsb\n" \
29598+ "1: rep; "prefix"movsb\n" \
29599 "2:\n" \
29600+ restore \
29601 ".section .fixup,\"ax\"\n" \
29602 "5: addl %3,%0\n" \
29603 " jmp 2b\n" \
29604@@ -538,14 +650,14 @@ do { \
29605 " negl %0\n" \
29606 " andl $7,%0\n" \
29607 " subl %0,%3\n" \
29608- "4: rep; movsb\n" \
29609+ "4: rep; "__copyuser_seg"movsb\n" \
29610 " movl %3,%0\n" \
29611 " shrl $2,%0\n" \
29612 " andl $3,%3\n" \
29613 " .align 2,0x90\n" \
29614- "0: rep; movsl\n" \
29615+ "0: rep; "__copyuser_seg"movsl\n" \
29616 " movl %3,%0\n" \
29617- "1: rep; movsb\n" \
29618+ "1: rep; "__copyuser_seg"movsb\n" \
29619 "2:\n" \
29620 ".section .fixup,\"ax\"\n" \
29621 "5: addl %3,%0\n" \
29622@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
29623 {
29624 stac();
29625 if (movsl_is_ok(to, from, n))
29626- __copy_user(to, from, n);
29627+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
29628 else
29629- n = __copy_user_intel(to, from, n);
29630+ n = __generic_copy_to_user_intel(to, from, n);
29631 clac();
29632 return n;
29633 }
29634@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
29635 {
29636 stac();
29637 if (movsl_is_ok(to, from, n))
29638- __copy_user(to, from, n);
29639+ __copy_user(to, from, n, __copyuser_seg, "", "");
29640 else
29641- n = __copy_user_intel((void __user *)to,
29642- (const void *)from, n);
29643+ n = __generic_copy_from_user_intel(to, from, n);
29644 clac();
29645 return n;
29646 }
29647@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
29648 if (n > 64 && cpu_has_xmm2)
29649 n = __copy_user_intel_nocache(to, from, n);
29650 else
29651- __copy_user(to, from, n);
29652+ __copy_user(to, from, n, __copyuser_seg, "", "");
29653 #else
29654- __copy_user(to, from, n);
29655+ __copy_user(to, from, n, __copyuser_seg, "", "");
29656 #endif
29657 clac();
29658 return n;
29659 }
29660 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
29661
29662-/**
29663- * copy_to_user: - Copy a block of data into user space.
29664- * @to: Destination address, in user space.
29665- * @from: Source address, in kernel space.
29666- * @n: Number of bytes to copy.
29667- *
29668- * Context: User context only. This function may sleep.
29669- *
29670- * Copy data from kernel space to user space.
29671- *
29672- * Returns number of bytes that could not be copied.
29673- * On success, this will be zero.
29674- */
29675-unsigned long
29676-copy_to_user(void __user *to, const void *from, unsigned long n)
29677+#ifdef CONFIG_PAX_MEMORY_UDEREF
29678+void __set_fs(mm_segment_t x)
29679 {
29680- if (access_ok(VERIFY_WRITE, to, n))
29681- n = __copy_to_user(to, from, n);
29682- return n;
29683+ switch (x.seg) {
29684+ case 0:
29685+ loadsegment(gs, 0);
29686+ break;
29687+ case TASK_SIZE_MAX:
29688+ loadsegment(gs, __USER_DS);
29689+ break;
29690+ case -1UL:
29691+ loadsegment(gs, __KERNEL_DS);
29692+ break;
29693+ default:
29694+ BUG();
29695+ }
29696 }
29697-EXPORT_SYMBOL(copy_to_user);
29698+EXPORT_SYMBOL(__set_fs);
29699
29700-/**
29701- * copy_from_user: - Copy a block of data from user space.
29702- * @to: Destination address, in kernel space.
29703- * @from: Source address, in user space.
29704- * @n: Number of bytes to copy.
29705- *
29706- * Context: User context only. This function may sleep.
29707- *
29708- * Copy data from user space to kernel space.
29709- *
29710- * Returns number of bytes that could not be copied.
29711- * On success, this will be zero.
29712- *
29713- * If some data could not be copied, this function will pad the copied
29714- * data to the requested size using zero bytes.
29715- */
29716-unsigned long
29717-_copy_from_user(void *to, const void __user *from, unsigned long n)
29718+void set_fs(mm_segment_t x)
29719 {
29720- if (access_ok(VERIFY_READ, from, n))
29721- n = __copy_from_user(to, from, n);
29722- else
29723- memset(to, 0, n);
29724- return n;
29725+ current_thread_info()->addr_limit = x;
29726+ __set_fs(x);
29727 }
29728-EXPORT_SYMBOL(_copy_from_user);
29729+EXPORT_SYMBOL(set_fs);
29730+#endif
29731diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
29732index 906fea3..0194a18 100644
29733--- a/arch/x86/lib/usercopy_64.c
29734+++ b/arch/x86/lib/usercopy_64.c
29735@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
29736 might_fault();
29737 /* no memory constraint because it doesn't change any memory gcc knows
29738 about */
29739+ pax_open_userland();
29740 stac();
29741 asm volatile(
29742 " testq %[size8],%[size8]\n"
29743@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
29744 _ASM_EXTABLE(0b,3b)
29745 _ASM_EXTABLE(1b,2b)
29746 : [size8] "=&c"(size), [dst] "=&D" (__d0)
29747- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
29748+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
29749 [zero] "r" (0UL), [eight] "r" (8UL));
29750 clac();
29751+ pax_close_userland();
29752 return size;
29753 }
29754 EXPORT_SYMBOL(__clear_user);
29755@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
29756 }
29757 EXPORT_SYMBOL(clear_user);
29758
29759-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
29760+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
29761 {
29762- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
29763- return copy_user_generic((__force void *)to, (__force void *)from, len);
29764- }
29765- return len;
29766+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
29767+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
29768+ return len;
29769 }
29770 EXPORT_SYMBOL(copy_in_user);
29771
29772@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
29773 * it is not necessary to optimize tail handling.
29774 */
29775 unsigned long
29776-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
29777+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
29778 {
29779 char c;
29780 unsigned zero_len;
29781
29782+ clac();
29783+ pax_close_userland();
29784 for (; len; --len, to++) {
29785 if (__get_user_nocheck(c, from++, sizeof(char)))
29786 break;
29787@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
29788 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
29789 if (__put_user_nocheck(c, to++, sizeof(char)))
29790 break;
29791- clac();
29792 return len;
29793 }
29794diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
29795index 23d8e5f..9ccc13a 100644
29796--- a/arch/x86/mm/Makefile
29797+++ b/arch/x86/mm/Makefile
29798@@ -28,3 +28,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
29799 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
29800
29801 obj-$(CONFIG_MEMTEST) += memtest.o
29802+
29803+quote:="
29804+obj-$(CONFIG_X86_64) += uderef_64.o
29805+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
29806diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
29807index 903ec1e..c4166b2 100644
29808--- a/arch/x86/mm/extable.c
29809+++ b/arch/x86/mm/extable.c
29810@@ -6,12 +6,24 @@
29811 static inline unsigned long
29812 ex_insn_addr(const struct exception_table_entry *x)
29813 {
29814- return (unsigned long)&x->insn + x->insn;
29815+ unsigned long reloc = 0;
29816+
29817+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29818+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29819+#endif
29820+
29821+ return (unsigned long)&x->insn + x->insn + reloc;
29822 }
29823 static inline unsigned long
29824 ex_fixup_addr(const struct exception_table_entry *x)
29825 {
29826- return (unsigned long)&x->fixup + x->fixup;
29827+ unsigned long reloc = 0;
29828+
29829+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29830+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29831+#endif
29832+
29833+ return (unsigned long)&x->fixup + x->fixup + reloc;
29834 }
29835
29836 int fixup_exception(struct pt_regs *regs)
29837@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
29838 unsigned long new_ip;
29839
29840 #ifdef CONFIG_PNPBIOS
29841- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
29842+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
29843 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
29844 extern u32 pnp_bios_is_utter_crap;
29845 pnp_bios_is_utter_crap = 1;
29846@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
29847 i += 4;
29848 p->fixup -= i;
29849 i += 4;
29850+
29851+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29852+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
29853+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29854+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29855+#endif
29856+
29857 }
29858 }
29859
29860diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
29861index 654be4a..a4a3da1 100644
29862--- a/arch/x86/mm/fault.c
29863+++ b/arch/x86/mm/fault.c
29864@@ -14,11 +14,18 @@
29865 #include <linux/hugetlb.h> /* hstate_index_to_shift */
29866 #include <linux/prefetch.h> /* prefetchw */
29867 #include <linux/context_tracking.h> /* exception_enter(), ... */
29868+#include <linux/unistd.h>
29869+#include <linux/compiler.h>
29870
29871 #include <asm/traps.h> /* dotraplinkage, ... */
29872 #include <asm/pgalloc.h> /* pgd_*(), ... */
29873 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
29874 #include <asm/fixmap.h> /* VSYSCALL_START */
29875+#include <asm/tlbflush.h>
29876+
29877+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29878+#include <asm/stacktrace.h>
29879+#endif
29880
29881 /*
29882 * Page fault error code bits:
29883@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
29884 int ret = 0;
29885
29886 /* kprobe_running() needs smp_processor_id() */
29887- if (kprobes_built_in() && !user_mode_vm(regs)) {
29888+ if (kprobes_built_in() && !user_mode(regs)) {
29889 preempt_disable();
29890 if (kprobe_running() && kprobe_fault_handler(regs, 14))
29891 ret = 1;
29892@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
29893 return !instr_lo || (instr_lo>>1) == 1;
29894 case 0x00:
29895 /* Prefetch instruction is 0x0F0D or 0x0F18 */
29896- if (probe_kernel_address(instr, opcode))
29897+ if (user_mode(regs)) {
29898+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29899+ return 0;
29900+ } else if (probe_kernel_address(instr, opcode))
29901 return 0;
29902
29903 *prefetch = (instr_lo == 0xF) &&
29904@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
29905 while (instr < max_instr) {
29906 unsigned char opcode;
29907
29908- if (probe_kernel_address(instr, opcode))
29909+ if (user_mode(regs)) {
29910+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29911+ break;
29912+ } else if (probe_kernel_address(instr, opcode))
29913 break;
29914
29915 instr++;
29916@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
29917 force_sig_info(si_signo, &info, tsk);
29918 }
29919
29920+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29921+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
29922+#endif
29923+
29924+#ifdef CONFIG_PAX_EMUTRAMP
29925+static int pax_handle_fetch_fault(struct pt_regs *regs);
29926+#endif
29927+
29928+#ifdef CONFIG_PAX_PAGEEXEC
29929+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
29930+{
29931+ pgd_t *pgd;
29932+ pud_t *pud;
29933+ pmd_t *pmd;
29934+
29935+ pgd = pgd_offset(mm, address);
29936+ if (!pgd_present(*pgd))
29937+ return NULL;
29938+ pud = pud_offset(pgd, address);
29939+ if (!pud_present(*pud))
29940+ return NULL;
29941+ pmd = pmd_offset(pud, address);
29942+ if (!pmd_present(*pmd))
29943+ return NULL;
29944+ return pmd;
29945+}
29946+#endif
29947+
29948 DEFINE_SPINLOCK(pgd_lock);
29949 LIST_HEAD(pgd_list);
29950
29951@@ -232,10 +273,27 @@ void vmalloc_sync_all(void)
29952 for (address = VMALLOC_START & PMD_MASK;
29953 address >= TASK_SIZE && address < FIXADDR_TOP;
29954 address += PMD_SIZE) {
29955+
29956+#ifdef CONFIG_PAX_PER_CPU_PGD
29957+ unsigned long cpu;
29958+#else
29959 struct page *page;
29960+#endif
29961
29962 spin_lock(&pgd_lock);
29963+
29964+#ifdef CONFIG_PAX_PER_CPU_PGD
29965+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29966+ pgd_t *pgd = get_cpu_pgd(cpu, user);
29967+ pmd_t *ret;
29968+
29969+ ret = vmalloc_sync_one(pgd, address);
29970+ if (!ret)
29971+ break;
29972+ pgd = get_cpu_pgd(cpu, kernel);
29973+#else
29974 list_for_each_entry(page, &pgd_list, lru) {
29975+ pgd_t *pgd;
29976 spinlock_t *pgt_lock;
29977 pmd_t *ret;
29978
29979@@ -243,8 +301,14 @@ void vmalloc_sync_all(void)
29980 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
29981
29982 spin_lock(pgt_lock);
29983- ret = vmalloc_sync_one(page_address(page), address);
29984+ pgd = page_address(page);
29985+#endif
29986+
29987+ ret = vmalloc_sync_one(pgd, address);
29988+
29989+#ifndef CONFIG_PAX_PER_CPU_PGD
29990 spin_unlock(pgt_lock);
29991+#endif
29992
29993 if (!ret)
29994 break;
29995@@ -278,6 +342,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
29996 * an interrupt in the middle of a task switch..
29997 */
29998 pgd_paddr = read_cr3();
29999+
30000+#ifdef CONFIG_PAX_PER_CPU_PGD
30001+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
30002+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
30003+#endif
30004+
30005 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
30006 if (!pmd_k)
30007 return -1;
30008@@ -373,11 +443,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30009 * happen within a race in page table update. In the later
30010 * case just flush:
30011 */
30012- pgd = pgd_offset(current->active_mm, address);
30013+
30014 pgd_ref = pgd_offset_k(address);
30015 if (pgd_none(*pgd_ref))
30016 return -1;
30017
30018+#ifdef CONFIG_PAX_PER_CPU_PGD
30019+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
30020+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
30021+ if (pgd_none(*pgd)) {
30022+ set_pgd(pgd, *pgd_ref);
30023+ arch_flush_lazy_mmu_mode();
30024+ } else {
30025+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
30026+ }
30027+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
30028+#else
30029+ pgd = pgd_offset(current->active_mm, address);
30030+#endif
30031+
30032 if (pgd_none(*pgd)) {
30033 set_pgd(pgd, *pgd_ref);
30034 arch_flush_lazy_mmu_mode();
30035@@ -543,7 +627,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
30036 static int is_errata100(struct pt_regs *regs, unsigned long address)
30037 {
30038 #ifdef CONFIG_X86_64
30039- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
30040+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
30041 return 1;
30042 #endif
30043 return 0;
30044@@ -570,7 +654,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
30045 }
30046
30047 static const char nx_warning[] = KERN_CRIT
30048-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
30049+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
30050
30051 static void
30052 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30053@@ -579,15 +663,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30054 if (!oops_may_print())
30055 return;
30056
30057- if (error_code & PF_INSTR) {
30058+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
30059 unsigned int level;
30060
30061 pte_t *pte = lookup_address(address, &level);
30062
30063 if (pte && pte_present(*pte) && !pte_exec(*pte))
30064- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
30065+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
30066 }
30067
30068+#ifdef CONFIG_PAX_KERNEXEC
30069+ if (init_mm.start_code <= address && address < init_mm.end_code) {
30070+ if (current->signal->curr_ip)
30071+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
30072+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
30073+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30074+ else
30075+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
30076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30077+ }
30078+#endif
30079+
30080 printk(KERN_ALERT "BUG: unable to handle kernel ");
30081 if (address < PAGE_SIZE)
30082 printk(KERN_CONT "NULL pointer dereference");
30083@@ -750,6 +846,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
30084 return;
30085 }
30086 #endif
30087+
30088+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30089+ if (pax_is_fetch_fault(regs, error_code, address)) {
30090+
30091+#ifdef CONFIG_PAX_EMUTRAMP
30092+ switch (pax_handle_fetch_fault(regs)) {
30093+ case 2:
30094+ return;
30095+ }
30096+#endif
30097+
30098+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30099+ do_group_exit(SIGKILL);
30100+ }
30101+#endif
30102+
30103 /* Kernel addresses are always protection faults: */
30104 if (address >= TASK_SIZE)
30105 error_code |= PF_PROT;
30106@@ -835,7 +947,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
30107 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
30108 printk(KERN_ERR
30109 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
30110- tsk->comm, tsk->pid, address);
30111+ tsk->comm, task_pid_nr(tsk), address);
30112 code = BUS_MCEERR_AR;
30113 }
30114 #endif
30115@@ -898,6 +1010,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
30116 return 1;
30117 }
30118
30119+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30120+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
30121+{
30122+ pte_t *pte;
30123+ pmd_t *pmd;
30124+ spinlock_t *ptl;
30125+ unsigned char pte_mask;
30126+
30127+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
30128+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
30129+ return 0;
30130+
30131+ /* PaX: it's our fault, let's handle it if we can */
30132+
30133+ /* PaX: take a look at read faults before acquiring any locks */
30134+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
30135+ /* instruction fetch attempt from a protected page in user mode */
30136+ up_read(&mm->mmap_sem);
30137+
30138+#ifdef CONFIG_PAX_EMUTRAMP
30139+ switch (pax_handle_fetch_fault(regs)) {
30140+ case 2:
30141+ return 1;
30142+ }
30143+#endif
30144+
30145+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30146+ do_group_exit(SIGKILL);
30147+ }
30148+
30149+ pmd = pax_get_pmd(mm, address);
30150+ if (unlikely(!pmd))
30151+ return 0;
30152+
30153+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
30154+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
30155+ pte_unmap_unlock(pte, ptl);
30156+ return 0;
30157+ }
30158+
30159+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
30160+ /* write attempt to a protected page in user mode */
30161+ pte_unmap_unlock(pte, ptl);
30162+ return 0;
30163+ }
30164+
30165+#ifdef CONFIG_SMP
30166+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
30167+#else
30168+ if (likely(address > get_limit(regs->cs)))
30169+#endif
30170+ {
30171+ set_pte(pte, pte_mkread(*pte));
30172+ __flush_tlb_one(address);
30173+ pte_unmap_unlock(pte, ptl);
30174+ up_read(&mm->mmap_sem);
30175+ return 1;
30176+ }
30177+
30178+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
30179+
30180+ /*
30181+ * PaX: fill DTLB with user rights and retry
30182+ */
30183+ __asm__ __volatile__ (
30184+ "orb %2,(%1)\n"
30185+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
30186+/*
30187+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
30188+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
30189+ * page fault when examined during a TLB load attempt. this is true not only
30190+ * for PTEs holding a non-present entry but also present entries that will
30191+ * raise a page fault (such as those set up by PaX, or the copy-on-write
30192+ * mechanism). in effect it means that we do *not* need to flush the TLBs
30193+ * for our target pages since their PTEs are simply not in the TLBs at all.
30194+
30195+ * the best thing in omitting it is that we gain around 15-20% speed in the
30196+ * fast path of the page fault handler and can get rid of tracing since we
30197+ * can no longer flush unintended entries.
30198+ */
30199+ "invlpg (%0)\n"
30200+#endif
30201+ __copyuser_seg"testb $0,(%0)\n"
30202+ "xorb %3,(%1)\n"
30203+ :
30204+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
30205+ : "memory", "cc");
30206+ pte_unmap_unlock(pte, ptl);
30207+ up_read(&mm->mmap_sem);
30208+ return 1;
30209+}
30210+#endif
30211+
30212 /*
30213 * Handle a spurious fault caused by a stale TLB entry.
30214 *
30215@@ -964,6 +1169,9 @@ int show_unhandled_signals = 1;
30216 static inline int
30217 access_error(unsigned long error_code, struct vm_area_struct *vma)
30218 {
30219+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
30220+ return 1;
30221+
30222 if (error_code & PF_WRITE) {
30223 /* write, present and write, not present: */
30224 if (unlikely(!(vma->vm_flags & VM_WRITE)))
30225@@ -992,7 +1200,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
30226 if (error_code & PF_USER)
30227 return false;
30228
30229- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
30230+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
30231 return false;
30232
30233 return true;
30234@@ -1008,18 +1216,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30235 {
30236 struct vm_area_struct *vma;
30237 struct task_struct *tsk;
30238- unsigned long address;
30239 struct mm_struct *mm;
30240 int fault;
30241 int write = error_code & PF_WRITE;
30242 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
30243 (write ? FAULT_FLAG_WRITE : 0);
30244
30245- tsk = current;
30246- mm = tsk->mm;
30247-
30248 /* Get the faulting address: */
30249- address = read_cr2();
30250+ unsigned long address = read_cr2();
30251+
30252+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30253+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
30254+ if (!search_exception_tables(regs->ip)) {
30255+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30256+ bad_area_nosemaphore(regs, error_code, address);
30257+ return;
30258+ }
30259+ if (address < pax_user_shadow_base) {
30260+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30261+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
30262+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
30263+ } else
30264+ address -= pax_user_shadow_base;
30265+ }
30266+#endif
30267+
30268+ tsk = current;
30269+ mm = tsk->mm;
30270
30271 /*
30272 * Detect and handle instructions that would cause a page fault for
30273@@ -1080,7 +1303,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30274 * User-mode registers count as a user access even for any
30275 * potential system fault or CPU buglet:
30276 */
30277- if (user_mode_vm(regs)) {
30278+ if (user_mode(regs)) {
30279 local_irq_enable();
30280 error_code |= PF_USER;
30281 } else {
30282@@ -1142,6 +1365,11 @@ retry:
30283 might_sleep();
30284 }
30285
30286+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30287+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
30288+ return;
30289+#endif
30290+
30291 vma = find_vma(mm, address);
30292 if (unlikely(!vma)) {
30293 bad_area(regs, error_code, address);
30294@@ -1153,18 +1381,24 @@ retry:
30295 bad_area(regs, error_code, address);
30296 return;
30297 }
30298- if (error_code & PF_USER) {
30299- /*
30300- * Accessing the stack below %sp is always a bug.
30301- * The large cushion allows instructions like enter
30302- * and pusha to work. ("enter $65535, $31" pushes
30303- * 32 pointers and then decrements %sp by 65535.)
30304- */
30305- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
30306- bad_area(regs, error_code, address);
30307- return;
30308- }
30309+ /*
30310+ * Accessing the stack below %sp is always a bug.
30311+ * The large cushion allows instructions like enter
30312+ * and pusha to work. ("enter $65535, $31" pushes
30313+ * 32 pointers and then decrements %sp by 65535.)
30314+ */
30315+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
30316+ bad_area(regs, error_code, address);
30317+ return;
30318 }
30319+
30320+#ifdef CONFIG_PAX_SEGMEXEC
30321+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
30322+ bad_area(regs, error_code, address);
30323+ return;
30324+ }
30325+#endif
30326+
30327 if (unlikely(expand_stack(vma, address))) {
30328 bad_area(regs, error_code, address);
30329 return;
30330@@ -1230,3 +1464,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
30331 __do_page_fault(regs, error_code);
30332 exception_exit(prev_state);
30333 }
30334+
30335+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30336+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
30337+{
30338+ struct mm_struct *mm = current->mm;
30339+ unsigned long ip = regs->ip;
30340+
30341+ if (v8086_mode(regs))
30342+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
30343+
30344+#ifdef CONFIG_PAX_PAGEEXEC
30345+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
30346+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
30347+ return true;
30348+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
30349+ return true;
30350+ return false;
30351+ }
30352+#endif
30353+
30354+#ifdef CONFIG_PAX_SEGMEXEC
30355+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
30356+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
30357+ return true;
30358+ return false;
30359+ }
30360+#endif
30361+
30362+ return false;
30363+}
30364+#endif
30365+
30366+#ifdef CONFIG_PAX_EMUTRAMP
30367+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
30368+{
30369+ int err;
30370+
30371+ do { /* PaX: libffi trampoline emulation */
30372+ unsigned char mov, jmp;
30373+ unsigned int addr1, addr2;
30374+
30375+#ifdef CONFIG_X86_64
30376+ if ((regs->ip + 9) >> 32)
30377+ break;
30378+#endif
30379+
30380+ err = get_user(mov, (unsigned char __user *)regs->ip);
30381+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30382+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
30383+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30384+
30385+ if (err)
30386+ break;
30387+
30388+ if (mov == 0xB8 && jmp == 0xE9) {
30389+ regs->ax = addr1;
30390+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
30391+ return 2;
30392+ }
30393+ } while (0);
30394+
30395+ do { /* PaX: gcc trampoline emulation #1 */
30396+ unsigned char mov1, mov2;
30397+ unsigned short jmp;
30398+ unsigned int addr1, addr2;
30399+
30400+#ifdef CONFIG_X86_64
30401+ if ((regs->ip + 11) >> 32)
30402+ break;
30403+#endif
30404+
30405+ err = get_user(mov1, (unsigned char __user *)regs->ip);
30406+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30407+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
30408+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30409+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
30410+
30411+ if (err)
30412+ break;
30413+
30414+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
30415+ regs->cx = addr1;
30416+ regs->ax = addr2;
30417+ regs->ip = addr2;
30418+ return 2;
30419+ }
30420+ } while (0);
30421+
30422+ do { /* PaX: gcc trampoline emulation #2 */
30423+ unsigned char mov, jmp;
30424+ unsigned int addr1, addr2;
30425+
30426+#ifdef CONFIG_X86_64
30427+ if ((regs->ip + 9) >> 32)
30428+ break;
30429+#endif
30430+
30431+ err = get_user(mov, (unsigned char __user *)regs->ip);
30432+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30433+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
30434+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30435+
30436+ if (err)
30437+ break;
30438+
30439+ if (mov == 0xB9 && jmp == 0xE9) {
30440+ regs->cx = addr1;
30441+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
30442+ return 2;
30443+ }
30444+ } while (0);
30445+
30446+ return 1; /* PaX in action */
30447+}
30448+
30449+#ifdef CONFIG_X86_64
30450+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
30451+{
30452+ int err;
30453+
30454+ do { /* PaX: libffi trampoline emulation */
30455+ unsigned short mov1, mov2, jmp1;
30456+ unsigned char stcclc, jmp2;
30457+ unsigned long addr1, addr2;
30458+
30459+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30460+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
30461+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
30462+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
30463+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
30464+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
30465+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
30466+
30467+ if (err)
30468+ break;
30469+
30470+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30471+ regs->r11 = addr1;
30472+ regs->r10 = addr2;
30473+ if (stcclc == 0xF8)
30474+ regs->flags &= ~X86_EFLAGS_CF;
30475+ else
30476+ regs->flags |= X86_EFLAGS_CF;
30477+ regs->ip = addr1;
30478+ return 2;
30479+ }
30480+ } while (0);
30481+
30482+ do { /* PaX: gcc trampoline emulation #1 */
30483+ unsigned short mov1, mov2, jmp1;
30484+ unsigned char jmp2;
30485+ unsigned int addr1;
30486+ unsigned long addr2;
30487+
30488+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30489+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
30490+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
30491+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
30492+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
30493+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
30494+
30495+ if (err)
30496+ break;
30497+
30498+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30499+ regs->r11 = addr1;
30500+ regs->r10 = addr2;
30501+ regs->ip = addr1;
30502+ return 2;
30503+ }
30504+ } while (0);
30505+
30506+ do { /* PaX: gcc trampoline emulation #2 */
30507+ unsigned short mov1, mov2, jmp1;
30508+ unsigned char jmp2;
30509+ unsigned long addr1, addr2;
30510+
30511+ err = get_user(mov1, (unsigned short __user *)regs->ip);
30512+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
30513+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
30514+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
30515+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
30516+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
30517+
30518+ if (err)
30519+ break;
30520+
30521+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
30522+ regs->r11 = addr1;
30523+ regs->r10 = addr2;
30524+ regs->ip = addr1;
30525+ return 2;
30526+ }
30527+ } while (0);
30528+
30529+ return 1; /* PaX in action */
30530+}
30531+#endif
30532+
30533+/*
30534+ * PaX: decide what to do with offenders (regs->ip = fault address)
30535+ *
30536+ * returns 1 when task should be killed
30537+ * 2 when gcc trampoline was detected
30538+ */
30539+static int pax_handle_fetch_fault(struct pt_regs *regs)
30540+{
30541+ if (v8086_mode(regs))
30542+ return 1;
30543+
30544+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
30545+ return 1;
30546+
30547+#ifdef CONFIG_X86_32
30548+ return pax_handle_fetch_fault_32(regs);
30549+#else
30550+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
30551+ return pax_handle_fetch_fault_32(regs);
30552+ else
30553+ return pax_handle_fetch_fault_64(regs);
30554+#endif
30555+}
30556+#endif
30557+
30558+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30559+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
30560+{
30561+ long i;
30562+
30563+ printk(KERN_ERR "PAX: bytes at PC: ");
30564+ for (i = 0; i < 20; i++) {
30565+ unsigned char c;
30566+ if (get_user(c, (unsigned char __force_user *)pc+i))
30567+ printk(KERN_CONT "?? ");
30568+ else
30569+ printk(KERN_CONT "%02x ", c);
30570+ }
30571+ printk("\n");
30572+
30573+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
30574+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
30575+ unsigned long c;
30576+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
30577+#ifdef CONFIG_X86_32
30578+ printk(KERN_CONT "???????? ");
30579+#else
30580+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
30581+ printk(KERN_CONT "???????? ???????? ");
30582+ else
30583+ printk(KERN_CONT "???????????????? ");
30584+#endif
30585+ } else {
30586+#ifdef CONFIG_X86_64
30587+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
30588+ printk(KERN_CONT "%08x ", (unsigned int)c);
30589+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
30590+ } else
30591+#endif
30592+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
30593+ }
30594+ }
30595+ printk("\n");
30596+}
30597+#endif
30598+
30599+/**
30600+ * probe_kernel_write(): safely attempt to write to a location
30601+ * @dst: address to write to
30602+ * @src: pointer to the data that shall be written
30603+ * @size: size of the data chunk
30604+ *
30605+ * Safely write to address @dst from the buffer at @src. If a kernel fault
30606+ * happens, handle that and return -EFAULT.
30607+ */
30608+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
30609+{
30610+ long ret;
30611+ mm_segment_t old_fs = get_fs();
30612+
30613+ set_fs(KERNEL_DS);
30614+ pagefault_disable();
30615+ pax_open_kernel();
30616+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
30617+ pax_close_kernel();
30618+ pagefault_enable();
30619+ set_fs(old_fs);
30620+
30621+ return ret ? -EFAULT : 0;
30622+}
30623diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
30624index dd74e46..7d26398 100644
30625--- a/arch/x86/mm/gup.c
30626+++ b/arch/x86/mm/gup.c
30627@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
30628 addr = start;
30629 len = (unsigned long) nr_pages << PAGE_SHIFT;
30630 end = start + len;
30631- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
30632+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
30633 (void __user *)start, len)))
30634 return 0;
30635
30636diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
30637index 252b8f5..4dcfdc1 100644
30638--- a/arch/x86/mm/highmem_32.c
30639+++ b/arch/x86/mm/highmem_32.c
30640@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
30641 idx = type + KM_TYPE_NR*smp_processor_id();
30642 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
30643 BUG_ON(!pte_none(*(kmap_pte-idx)));
30644+
30645+ pax_open_kernel();
30646 set_pte(kmap_pte-idx, mk_pte(page, prot));
30647+ pax_close_kernel();
30648+
30649 arch_flush_lazy_mmu_mode();
30650
30651 return (void *)vaddr;
30652diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
30653index ae1aa71..d9bea75 100644
30654--- a/arch/x86/mm/hugetlbpage.c
30655+++ b/arch/x86/mm/hugetlbpage.c
30656@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
30657 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
30658 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
30659 unsigned long addr, unsigned long len,
30660- unsigned long pgoff, unsigned long flags)
30661+ unsigned long pgoff, unsigned long flags, unsigned long offset)
30662 {
30663 struct hstate *h = hstate_file(file);
30664 struct vm_unmapped_area_info info;
30665-
30666+
30667 info.flags = 0;
30668 info.length = len;
30669 info.low_limit = TASK_UNMAPPED_BASE;
30670+
30671+#ifdef CONFIG_PAX_RANDMMAP
30672+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
30673+ info.low_limit += current->mm->delta_mmap;
30674+#endif
30675+
30676 info.high_limit = TASK_SIZE;
30677 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
30678 info.align_offset = 0;
30679+ info.threadstack_offset = offset;
30680 return vm_unmapped_area(&info);
30681 }
30682
30683 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30684 unsigned long addr0, unsigned long len,
30685- unsigned long pgoff, unsigned long flags)
30686+ unsigned long pgoff, unsigned long flags, unsigned long offset)
30687 {
30688 struct hstate *h = hstate_file(file);
30689 struct vm_unmapped_area_info info;
30690@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30691 info.high_limit = current->mm->mmap_base;
30692 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
30693 info.align_offset = 0;
30694+ info.threadstack_offset = offset;
30695 addr = vm_unmapped_area(&info);
30696
30697 /*
30698@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
30699 VM_BUG_ON(addr != -ENOMEM);
30700 info.flags = 0;
30701 info.low_limit = TASK_UNMAPPED_BASE;
30702+
30703+#ifdef CONFIG_PAX_RANDMMAP
30704+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
30705+ info.low_limit += current->mm->delta_mmap;
30706+#endif
30707+
30708 info.high_limit = TASK_SIZE;
30709 addr = vm_unmapped_area(&info);
30710 }
30711@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
30712 struct hstate *h = hstate_file(file);
30713 struct mm_struct *mm = current->mm;
30714 struct vm_area_struct *vma;
30715+ unsigned long pax_task_size = TASK_SIZE;
30716+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
30717
30718 if (len & ~huge_page_mask(h))
30719 return -EINVAL;
30720- if (len > TASK_SIZE)
30721+
30722+#ifdef CONFIG_PAX_SEGMEXEC
30723+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
30724+ pax_task_size = SEGMEXEC_TASK_SIZE;
30725+#endif
30726+
30727+ pax_task_size -= PAGE_SIZE;
30728+
30729+ if (len > pax_task_size)
30730 return -ENOMEM;
30731
30732 if (flags & MAP_FIXED) {
30733@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
30734 return addr;
30735 }
30736
30737+#ifdef CONFIG_PAX_RANDMMAP
30738+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
30739+#endif
30740+
30741 if (addr) {
30742 addr = ALIGN(addr, huge_page_size(h));
30743 vma = find_vma(mm, addr);
30744- if (TASK_SIZE - len >= addr &&
30745- (!vma || addr + len <= vma->vm_start))
30746+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
30747 return addr;
30748 }
30749 if (mm->get_unmapped_area == arch_get_unmapped_area)
30750 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
30751- pgoff, flags);
30752+ pgoff, flags, offset);
30753 else
30754 return hugetlb_get_unmapped_area_topdown(file, addr, len,
30755- pgoff, flags);
30756+ pgoff, flags, offset);
30757 }
30758
30759 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
30760diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
30761index 1f34e92..c97b98f 100644
30762--- a/arch/x86/mm/init.c
30763+++ b/arch/x86/mm/init.c
30764@@ -4,6 +4,7 @@
30765 #include <linux/swap.h>
30766 #include <linux/memblock.h>
30767 #include <linux/bootmem.h> /* for max_low_pfn */
30768+#include <linux/tboot.h>
30769
30770 #include <asm/cacheflush.h>
30771 #include <asm/e820.h>
30772@@ -17,6 +18,8 @@
30773 #include <asm/proto.h>
30774 #include <asm/dma.h> /* for MAX_DMA_PFN */
30775 #include <asm/microcode.h>
30776+#include <asm/desc.h>
30777+#include <asm/bios_ebda.h>
30778
30779 #include "mm_internal.h"
30780
30781@@ -465,7 +468,18 @@ void __init init_mem_mapping(void)
30782 early_ioremap_page_table_range_init();
30783 #endif
30784
30785+#ifdef CONFIG_PAX_PER_CPU_PGD
30786+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
30787+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
30788+ KERNEL_PGD_PTRS);
30789+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
30790+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
30791+ KERNEL_PGD_PTRS);
30792+ load_cr3(get_cpu_pgd(0, kernel));
30793+#else
30794 load_cr3(swapper_pg_dir);
30795+#endif
30796+
30797 __flush_tlb_all();
30798
30799 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
30800@@ -481,10 +495,40 @@ void __init init_mem_mapping(void)
30801 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
30802 * mmio resources as well as potential bios/acpi data regions.
30803 */
30804+
30805+#ifdef CONFIG_GRKERNSEC_KMEM
30806+static unsigned int ebda_start __read_only;
30807+static unsigned int ebda_end __read_only;
30808+#endif
30809+
30810 int devmem_is_allowed(unsigned long pagenr)
30811 {
30812- if (pagenr < 256)
30813+#ifdef CONFIG_GRKERNSEC_KMEM
30814+ /* allow BDA */
30815+ if (!pagenr)
30816 return 1;
30817+ /* allow EBDA */
30818+ if (pagenr >= ebda_start && pagenr < ebda_end)
30819+ return 1;
30820+ /* if tboot is in use, allow access to its hardcoded serial log range */
30821+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
30822+ return 1;
30823+#else
30824+ if (!pagenr)
30825+ return 1;
30826+#ifdef CONFIG_VM86
30827+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
30828+ return 1;
30829+#endif
30830+#endif
30831+
30832+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
30833+ return 1;
30834+#ifdef CONFIG_GRKERNSEC_KMEM
30835+ /* throw out everything else below 1MB */
30836+ if (pagenr <= 256)
30837+ return 0;
30838+#endif
30839 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
30840 return 0;
30841 if (!page_is_ram(pagenr))
30842@@ -538,8 +582,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
30843 #endif
30844 }
30845
30846+#ifdef CONFIG_GRKERNSEC_KMEM
30847+static inline void gr_init_ebda(void)
30848+{
30849+ unsigned int ebda_addr;
30850+ unsigned int ebda_size = 0;
30851+
30852+ ebda_addr = get_bios_ebda();
30853+ if (ebda_addr) {
30854+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
30855+ ebda_size <<= 10;
30856+ }
30857+ if (ebda_addr && ebda_size) {
30858+ ebda_start = ebda_addr >> PAGE_SHIFT;
30859+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
30860+ } else {
30861+ ebda_start = 0x9f000 >> PAGE_SHIFT;
30862+ ebda_end = 0xa0000 >> PAGE_SHIFT;
30863+ }
30864+}
30865+#else
30866+static inline void gr_init_ebda(void) { }
30867+#endif
30868+
30869 void free_initmem(void)
30870 {
30871+#ifdef CONFIG_PAX_KERNEXEC
30872+#ifdef CONFIG_X86_32
30873+ /* PaX: limit KERNEL_CS to actual size */
30874+ unsigned long addr, limit;
30875+ struct desc_struct d;
30876+ int cpu;
30877+#else
30878+ pgd_t *pgd;
30879+ pud_t *pud;
30880+ pmd_t *pmd;
30881+ unsigned long addr, end;
30882+#endif
30883+#endif
30884+
30885+ gr_init_ebda();
30886+
30887+#ifdef CONFIG_PAX_KERNEXEC
30888+#ifdef CONFIG_X86_32
30889+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
30890+ limit = (limit - 1UL) >> PAGE_SHIFT;
30891+
30892+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
30893+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30894+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
30895+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
30896+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
30897+ }
30898+
30899+ /* PaX: make KERNEL_CS read-only */
30900+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
30901+ if (!paravirt_enabled())
30902+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
30903+/*
30904+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
30905+ pgd = pgd_offset_k(addr);
30906+ pud = pud_offset(pgd, addr);
30907+ pmd = pmd_offset(pud, addr);
30908+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30909+ }
30910+*/
30911+#ifdef CONFIG_X86_PAE
30912+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
30913+/*
30914+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
30915+ pgd = pgd_offset_k(addr);
30916+ pud = pud_offset(pgd, addr);
30917+ pmd = pmd_offset(pud, addr);
30918+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30919+ }
30920+*/
30921+#endif
30922+
30923+#ifdef CONFIG_MODULES
30924+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
30925+#endif
30926+
30927+#else
30928+ /* PaX: make kernel code/rodata read-only, rest non-executable */
30929+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
30930+ pgd = pgd_offset_k(addr);
30931+ pud = pud_offset(pgd, addr);
30932+ pmd = pmd_offset(pud, addr);
30933+ if (!pmd_present(*pmd))
30934+ continue;
30935+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
30936+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30937+ else
30938+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30939+ }
30940+
30941+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
30942+ end = addr + KERNEL_IMAGE_SIZE;
30943+ for (; addr < end; addr += PMD_SIZE) {
30944+ pgd = pgd_offset_k(addr);
30945+ pud = pud_offset(pgd, addr);
30946+ pmd = pmd_offset(pud, addr);
30947+ if (!pmd_present(*pmd))
30948+ continue;
30949+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
30950+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30951+ }
30952+#endif
30953+
30954+ flush_tlb_all();
30955+#endif
30956+
30957 free_init_pages("unused kernel memory",
30958 (unsigned long)(&__init_begin),
30959 (unsigned long)(&__init_end));
30960diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
30961index 3ac7e31..89611b7 100644
30962--- a/arch/x86/mm/init_32.c
30963+++ b/arch/x86/mm/init_32.c
30964@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
30965 bool __read_mostly __vmalloc_start_set = false;
30966
30967 /*
30968- * Creates a middle page table and puts a pointer to it in the
30969- * given global directory entry. This only returns the gd entry
30970- * in non-PAE compilation mode, since the middle layer is folded.
30971- */
30972-static pmd_t * __init one_md_table_init(pgd_t *pgd)
30973-{
30974- pud_t *pud;
30975- pmd_t *pmd_table;
30976-
30977-#ifdef CONFIG_X86_PAE
30978- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
30979- pmd_table = (pmd_t *)alloc_low_page();
30980- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
30981- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
30982- pud = pud_offset(pgd, 0);
30983- BUG_ON(pmd_table != pmd_offset(pud, 0));
30984-
30985- return pmd_table;
30986- }
30987-#endif
30988- pud = pud_offset(pgd, 0);
30989- pmd_table = pmd_offset(pud, 0);
30990-
30991- return pmd_table;
30992-}
30993-
30994-/*
30995 * Create a page table and place a pointer to it in a middle page
30996 * directory entry:
30997 */
30998@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
30999 pte_t *page_table = (pte_t *)alloc_low_page();
31000
31001 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
31002+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31003+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
31004+#else
31005 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
31006+#endif
31007 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
31008 }
31009
31010 return pte_offset_kernel(pmd, 0);
31011 }
31012
31013+static pmd_t * __init one_md_table_init(pgd_t *pgd)
31014+{
31015+ pud_t *pud;
31016+ pmd_t *pmd_table;
31017+
31018+ pud = pud_offset(pgd, 0);
31019+ pmd_table = pmd_offset(pud, 0);
31020+
31021+ return pmd_table;
31022+}
31023+
31024 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
31025 {
31026 int pgd_idx = pgd_index(vaddr);
31027@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31028 int pgd_idx, pmd_idx;
31029 unsigned long vaddr;
31030 pgd_t *pgd;
31031+ pud_t *pud;
31032 pmd_t *pmd;
31033 pte_t *pte = NULL;
31034 unsigned long count = page_table_range_init_count(start, end);
31035@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31036 pgd = pgd_base + pgd_idx;
31037
31038 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
31039- pmd = one_md_table_init(pgd);
31040- pmd = pmd + pmd_index(vaddr);
31041+ pud = pud_offset(pgd, vaddr);
31042+ pmd = pmd_offset(pud, vaddr);
31043+
31044+#ifdef CONFIG_X86_PAE
31045+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31046+#endif
31047+
31048 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
31049 pmd++, pmd_idx++) {
31050 pte = page_table_kmap_check(one_page_table_init(pmd),
31051@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31052 }
31053 }
31054
31055-static inline int is_kernel_text(unsigned long addr)
31056+static inline int is_kernel_text(unsigned long start, unsigned long end)
31057 {
31058- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
31059- return 1;
31060- return 0;
31061+ if ((start > ktla_ktva((unsigned long)_etext) ||
31062+ end <= ktla_ktva((unsigned long)_stext)) &&
31063+ (start > ktla_ktva((unsigned long)_einittext) ||
31064+ end <= ktla_ktva((unsigned long)_sinittext)) &&
31065+
31066+#ifdef CONFIG_ACPI_SLEEP
31067+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
31068+#endif
31069+
31070+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
31071+ return 0;
31072+ return 1;
31073 }
31074
31075 /*
31076@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
31077 unsigned long last_map_addr = end;
31078 unsigned long start_pfn, end_pfn;
31079 pgd_t *pgd_base = swapper_pg_dir;
31080- int pgd_idx, pmd_idx, pte_ofs;
31081+ unsigned int pgd_idx, pmd_idx, pte_ofs;
31082 unsigned long pfn;
31083 pgd_t *pgd;
31084+ pud_t *pud;
31085 pmd_t *pmd;
31086 pte_t *pte;
31087 unsigned pages_2m, pages_4k;
31088@@ -291,8 +295,13 @@ repeat:
31089 pfn = start_pfn;
31090 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31091 pgd = pgd_base + pgd_idx;
31092- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
31093- pmd = one_md_table_init(pgd);
31094+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
31095+ pud = pud_offset(pgd, 0);
31096+ pmd = pmd_offset(pud, 0);
31097+
31098+#ifdef CONFIG_X86_PAE
31099+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31100+#endif
31101
31102 if (pfn >= end_pfn)
31103 continue;
31104@@ -304,14 +313,13 @@ repeat:
31105 #endif
31106 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
31107 pmd++, pmd_idx++) {
31108- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
31109+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
31110
31111 /*
31112 * Map with big pages if possible, otherwise
31113 * create normal page tables:
31114 */
31115 if (use_pse) {
31116- unsigned int addr2;
31117 pgprot_t prot = PAGE_KERNEL_LARGE;
31118 /*
31119 * first pass will use the same initial
31120@@ -322,11 +330,7 @@ repeat:
31121 _PAGE_PSE);
31122
31123 pfn &= PMD_MASK >> PAGE_SHIFT;
31124- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
31125- PAGE_OFFSET + PAGE_SIZE-1;
31126-
31127- if (is_kernel_text(addr) ||
31128- is_kernel_text(addr2))
31129+ if (is_kernel_text(address, address + PMD_SIZE))
31130 prot = PAGE_KERNEL_LARGE_EXEC;
31131
31132 pages_2m++;
31133@@ -343,7 +347,7 @@ repeat:
31134 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31135 pte += pte_ofs;
31136 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
31137- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
31138+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
31139 pgprot_t prot = PAGE_KERNEL;
31140 /*
31141 * first pass will use the same initial
31142@@ -351,7 +355,7 @@ repeat:
31143 */
31144 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
31145
31146- if (is_kernel_text(addr))
31147+ if (is_kernel_text(address, address + PAGE_SIZE))
31148 prot = PAGE_KERNEL_EXEC;
31149
31150 pages_4k++;
31151@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
31152
31153 pud = pud_offset(pgd, va);
31154 pmd = pmd_offset(pud, va);
31155- if (!pmd_present(*pmd))
31156+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
31157 break;
31158
31159 /* should not be large page here */
31160@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
31161
31162 static void __init pagetable_init(void)
31163 {
31164- pgd_t *pgd_base = swapper_pg_dir;
31165-
31166- permanent_kmaps_init(pgd_base);
31167+ permanent_kmaps_init(swapper_pg_dir);
31168 }
31169
31170-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31171+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31172 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31173
31174 /* user-defined highmem size */
31175@@ -772,7 +774,7 @@ void __init mem_init(void)
31176 after_bootmem = 1;
31177
31178 codesize = (unsigned long) &_etext - (unsigned long) &_text;
31179- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
31180+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
31181 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
31182
31183 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
31184@@ -813,10 +815,10 @@ void __init mem_init(void)
31185 ((unsigned long)&__init_end -
31186 (unsigned long)&__init_begin) >> 10,
31187
31188- (unsigned long)&_etext, (unsigned long)&_edata,
31189- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
31190+ (unsigned long)&_sdata, (unsigned long)&_edata,
31191+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
31192
31193- (unsigned long)&_text, (unsigned long)&_etext,
31194+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
31195 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
31196
31197 /*
31198@@ -906,6 +908,7 @@ void set_kernel_text_rw(void)
31199 if (!kernel_set_to_readonly)
31200 return;
31201
31202+ start = ktla_ktva(start);
31203 pr_debug("Set kernel text: %lx - %lx for read write\n",
31204 start, start+size);
31205
31206@@ -920,6 +923,7 @@ void set_kernel_text_ro(void)
31207 if (!kernel_set_to_readonly)
31208 return;
31209
31210+ start = ktla_ktva(start);
31211 pr_debug("Set kernel text: %lx - %lx for read only\n",
31212 start, start+size);
31213
31214@@ -948,6 +952,7 @@ void mark_rodata_ro(void)
31215 unsigned long start = PFN_ALIGN(_text);
31216 unsigned long size = PFN_ALIGN(_etext) - start;
31217
31218+ start = ktla_ktva(start);
31219 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
31220 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
31221 size >> 10);
31222diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
31223index bb00c46..bf91a67 100644
31224--- a/arch/x86/mm/init_64.c
31225+++ b/arch/x86/mm/init_64.c
31226@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
31227 * around without checking the pgd every time.
31228 */
31229
31230-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
31231+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
31232 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31233
31234 int force_personality32;
31235@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31236
31237 for (address = start; address <= end; address += PGDIR_SIZE) {
31238 const pgd_t *pgd_ref = pgd_offset_k(address);
31239+
31240+#ifdef CONFIG_PAX_PER_CPU_PGD
31241+ unsigned long cpu;
31242+#else
31243 struct page *page;
31244+#endif
31245
31246 if (pgd_none(*pgd_ref))
31247 continue;
31248
31249 spin_lock(&pgd_lock);
31250+
31251+#ifdef CONFIG_PAX_PER_CPU_PGD
31252+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31253+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
31254+
31255+ if (pgd_none(*pgd))
31256+ set_pgd(pgd, *pgd_ref);
31257+ else
31258+ BUG_ON(pgd_page_vaddr(*pgd)
31259+ != pgd_page_vaddr(*pgd_ref));
31260+ pgd = pgd_offset_cpu(cpu, kernel, address);
31261+#else
31262 list_for_each_entry(page, &pgd_list, lru) {
31263 pgd_t *pgd;
31264 spinlock_t *pgt_lock;
31265@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31266 /* the pgt_lock only for Xen */
31267 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31268 spin_lock(pgt_lock);
31269+#endif
31270
31271 if (pgd_none(*pgd))
31272 set_pgd(pgd, *pgd_ref);
31273@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31274 BUG_ON(pgd_page_vaddr(*pgd)
31275 != pgd_page_vaddr(*pgd_ref));
31276
31277+#ifndef CONFIG_PAX_PER_CPU_PGD
31278 spin_unlock(pgt_lock);
31279+#endif
31280+
31281 }
31282 spin_unlock(&pgd_lock);
31283 }
31284@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
31285 {
31286 if (pgd_none(*pgd)) {
31287 pud_t *pud = (pud_t *)spp_getpage();
31288- pgd_populate(&init_mm, pgd, pud);
31289+ pgd_populate_kernel(&init_mm, pgd, pud);
31290 if (pud != pud_offset(pgd, 0))
31291 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
31292 pud, pud_offset(pgd, 0));
31293@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
31294 {
31295 if (pud_none(*pud)) {
31296 pmd_t *pmd = (pmd_t *) spp_getpage();
31297- pud_populate(&init_mm, pud, pmd);
31298+ pud_populate_kernel(&init_mm, pud, pmd);
31299 if (pmd != pmd_offset(pud, 0))
31300 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
31301 pmd, pmd_offset(pud, 0));
31302@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
31303 pmd = fill_pmd(pud, vaddr);
31304 pte = fill_pte(pmd, vaddr);
31305
31306+ pax_open_kernel();
31307 set_pte(pte, new_pte);
31308+ pax_close_kernel();
31309
31310 /*
31311 * It's enough to flush this one mapping.
31312@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
31313 pgd = pgd_offset_k((unsigned long)__va(phys));
31314 if (pgd_none(*pgd)) {
31315 pud = (pud_t *) spp_getpage();
31316- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
31317- _PAGE_USER));
31318+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
31319 }
31320 pud = pud_offset(pgd, (unsigned long)__va(phys));
31321 if (pud_none(*pud)) {
31322 pmd = (pmd_t *) spp_getpage();
31323- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
31324- _PAGE_USER));
31325+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
31326 }
31327 pmd = pmd_offset(pud, phys);
31328 BUG_ON(!pmd_none(*pmd));
31329@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
31330 prot);
31331
31332 spin_lock(&init_mm.page_table_lock);
31333- pud_populate(&init_mm, pud, pmd);
31334+ pud_populate_kernel(&init_mm, pud, pmd);
31335 spin_unlock(&init_mm.page_table_lock);
31336 }
31337 __flush_tlb_all();
31338@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
31339 page_size_mask);
31340
31341 spin_lock(&init_mm.page_table_lock);
31342- pgd_populate(&init_mm, pgd, pud);
31343+ pgd_populate_kernel(&init_mm, pgd, pud);
31344 spin_unlock(&init_mm.page_table_lock);
31345 pgd_changed = true;
31346 }
31347@@ -1221,8 +1242,8 @@ int kern_addr_valid(unsigned long addr)
31348 static struct vm_area_struct gate_vma = {
31349 .vm_start = VSYSCALL_START,
31350 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
31351- .vm_page_prot = PAGE_READONLY_EXEC,
31352- .vm_flags = VM_READ | VM_EXEC
31353+ .vm_page_prot = PAGE_READONLY,
31354+ .vm_flags = VM_READ
31355 };
31356
31357 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31358@@ -1256,7 +1277,7 @@ int in_gate_area_no_mm(unsigned long addr)
31359
31360 const char *arch_vma_name(struct vm_area_struct *vma)
31361 {
31362- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31363+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31364 return "[vdso]";
31365 if (vma == &gate_vma)
31366 return "[vsyscall]";
31367diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
31368index 7b179b4..6bd17777 100644
31369--- a/arch/x86/mm/iomap_32.c
31370+++ b/arch/x86/mm/iomap_32.c
31371@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
31372 type = kmap_atomic_idx_push();
31373 idx = type + KM_TYPE_NR * smp_processor_id();
31374 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31375+
31376+ pax_open_kernel();
31377 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
31378+ pax_close_kernel();
31379+
31380 arch_flush_lazy_mmu_mode();
31381
31382 return (void *)vaddr;
31383diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
31384index 9a1e658..da003f3 100644
31385--- a/arch/x86/mm/ioremap.c
31386+++ b/arch/x86/mm/ioremap.c
31387@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
31388 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
31389 int is_ram = page_is_ram(pfn);
31390
31391- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
31392+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
31393 return NULL;
31394 WARN_ON_ONCE(is_ram);
31395 }
31396@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
31397 *
31398 * Caller must ensure there is only one unmapping for the same pointer.
31399 */
31400-void iounmap(volatile void __iomem *addr)
31401+void iounmap(const volatile void __iomem *addr)
31402 {
31403 struct vm_struct *p, *o;
31404
31405@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
31406
31407 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
31408 if (page_is_ram(start >> PAGE_SHIFT))
31409+#ifdef CONFIG_HIGHMEM
31410+ if ((start >> PAGE_SHIFT) < max_low_pfn)
31411+#endif
31412 return __va(phys);
31413
31414 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
31415@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
31416 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
31417 {
31418 if (page_is_ram(phys >> PAGE_SHIFT))
31419+#ifdef CONFIG_HIGHMEM
31420+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
31421+#endif
31422 return;
31423
31424 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
31425@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
31426 early_param("early_ioremap_debug", early_ioremap_debug_setup);
31427
31428 static __initdata int after_paging_init;
31429-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
31430+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
31431
31432 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
31433 {
31434@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
31435 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
31436
31437 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
31438- memset(bm_pte, 0, sizeof(bm_pte));
31439- pmd_populate_kernel(&init_mm, pmd, bm_pte);
31440+ pmd_populate_user(&init_mm, pmd, bm_pte);
31441
31442 /*
31443 * The boot-ioremap range spans multiple pmds, for which
31444diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
31445index d87dd6d..bf3fa66 100644
31446--- a/arch/x86/mm/kmemcheck/kmemcheck.c
31447+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
31448@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
31449 * memory (e.g. tracked pages)? For now, we need this to avoid
31450 * invoking kmemcheck for PnP BIOS calls.
31451 */
31452- if (regs->flags & X86_VM_MASK)
31453+ if (v8086_mode(regs))
31454 return false;
31455- if (regs->cs != __KERNEL_CS)
31456+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
31457 return false;
31458
31459 pte = kmemcheck_pte_lookup(address);
31460diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
31461index c1af323..4758dad 100644
31462--- a/arch/x86/mm/mmap.c
31463+++ b/arch/x86/mm/mmap.c
31464@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
31465 * Leave an at least ~128 MB hole with possible stack randomization.
31466 */
31467 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
31468-#define MAX_GAP (TASK_SIZE/6*5)
31469+#define MAX_GAP (pax_task_size/6*5)
31470
31471 static int mmap_is_legacy(void)
31472 {
31473@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
31474 return rnd << PAGE_SHIFT;
31475 }
31476
31477-static unsigned long mmap_base(void)
31478+static unsigned long mmap_base(struct mm_struct *mm)
31479 {
31480 unsigned long gap = rlimit(RLIMIT_STACK);
31481+ unsigned long pax_task_size = TASK_SIZE;
31482+
31483+#ifdef CONFIG_PAX_SEGMEXEC
31484+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31485+ pax_task_size = SEGMEXEC_TASK_SIZE;
31486+#endif
31487
31488 if (gap < MIN_GAP)
31489 gap = MIN_GAP;
31490 else if (gap > MAX_GAP)
31491 gap = MAX_GAP;
31492
31493- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
31494+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
31495 }
31496
31497 /*
31498 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
31499 * does, but not when emulating X86_32
31500 */
31501-unsigned long mmap_legacy_base(void)
31502+unsigned long mmap_legacy_base(struct mm_struct *mm)
31503 {
31504- if (mmap_is_ia32())
31505+ if (mmap_is_ia32()) {
31506+
31507+#ifdef CONFIG_PAX_SEGMEXEC
31508+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31509+ return SEGMEXEC_TASK_UNMAPPED_BASE;
31510+ else
31511+#endif
31512+
31513 return TASK_UNMAPPED_BASE;
31514- else
31515+ } else
31516 return TASK_UNMAPPED_BASE + mmap_rnd();
31517 }
31518
31519@@ -113,11 +126,23 @@ unsigned long mmap_legacy_base(void)
31520 void arch_pick_mmap_layout(struct mm_struct *mm)
31521 {
31522 if (mmap_is_legacy()) {
31523- mm->mmap_base = mmap_legacy_base();
31524+ mm->mmap_base = mmap_legacy_base(mm);
31525+
31526+#ifdef CONFIG_PAX_RANDMMAP
31527+ if (mm->pax_flags & MF_PAX_RANDMMAP)
31528+ mm->mmap_base += mm->delta_mmap;
31529+#endif
31530+
31531 mm->get_unmapped_area = arch_get_unmapped_area;
31532 mm->unmap_area = arch_unmap_area;
31533 } else {
31534- mm->mmap_base = mmap_base();
31535+ mm->mmap_base = mmap_base(mm);
31536+
31537+#ifdef CONFIG_PAX_RANDMMAP
31538+ if (mm->pax_flags & MF_PAX_RANDMMAP)
31539+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
31540+#endif
31541+
31542 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
31543 mm->unmap_area = arch_unmap_area_topdown;
31544 }
31545diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
31546index dc0b727..f612039 100644
31547--- a/arch/x86/mm/mmio-mod.c
31548+++ b/arch/x86/mm/mmio-mod.c
31549@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
31550 break;
31551 default:
31552 {
31553- unsigned char *ip = (unsigned char *)instptr;
31554+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
31555 my_trace->opcode = MMIO_UNKNOWN_OP;
31556 my_trace->width = 0;
31557 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
31558@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
31559 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
31560 void __iomem *addr)
31561 {
31562- static atomic_t next_id;
31563+ static atomic_unchecked_t next_id;
31564 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
31565 /* These are page-unaligned. */
31566 struct mmiotrace_map map = {
31567@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
31568 .private = trace
31569 },
31570 .phys = offset,
31571- .id = atomic_inc_return(&next_id)
31572+ .id = atomic_inc_return_unchecked(&next_id)
31573 };
31574 map.map_id = trace->id;
31575
31576@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
31577 ioremap_trace_core(offset, size, addr);
31578 }
31579
31580-static void iounmap_trace_core(volatile void __iomem *addr)
31581+static void iounmap_trace_core(const volatile void __iomem *addr)
31582 {
31583 struct mmiotrace_map map = {
31584 .phys = 0,
31585@@ -328,7 +328,7 @@ not_enabled:
31586 }
31587 }
31588
31589-void mmiotrace_iounmap(volatile void __iomem *addr)
31590+void mmiotrace_iounmap(const volatile void __iomem *addr)
31591 {
31592 might_sleep();
31593 if (is_enabled()) /* recheck and proper locking in *_core() */
31594diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
31595index a71c4e2..301ae44 100644
31596--- a/arch/x86/mm/numa.c
31597+++ b/arch/x86/mm/numa.c
31598@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
31599 return true;
31600 }
31601
31602-static int __init numa_register_memblks(struct numa_meminfo *mi)
31603+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
31604 {
31605 unsigned long uninitialized_var(pfn_align);
31606 int i, nid;
31607diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
31608index d0b1773..4c3327c 100644
31609--- a/arch/x86/mm/pageattr-test.c
31610+++ b/arch/x86/mm/pageattr-test.c
31611@@ -36,7 +36,7 @@ enum {
31612
31613 static int pte_testbit(pte_t pte)
31614 {
31615- return pte_flags(pte) & _PAGE_UNUSED1;
31616+ return pte_flags(pte) & _PAGE_CPA_TEST;
31617 }
31618
31619 struct split_state {
31620diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
31621index bb32480..75f2f5e 100644
31622--- a/arch/x86/mm/pageattr.c
31623+++ b/arch/x86/mm/pageattr.c
31624@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31625 */
31626 #ifdef CONFIG_PCI_BIOS
31627 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
31628- pgprot_val(forbidden) |= _PAGE_NX;
31629+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31630 #endif
31631
31632 /*
31633@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31634 * Does not cover __inittext since that is gone later on. On
31635 * 64bit we do not enforce !NX on the low mapping
31636 */
31637- if (within(address, (unsigned long)_text, (unsigned long)_etext))
31638- pgprot_val(forbidden) |= _PAGE_NX;
31639+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
31640+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31641
31642+#ifdef CONFIG_DEBUG_RODATA
31643 /*
31644 * The .rodata section needs to be read-only. Using the pfn
31645 * catches all aliases.
31646@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31647 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
31648 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
31649 pgprot_val(forbidden) |= _PAGE_RW;
31650+#endif
31651
31652 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
31653 /*
31654@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
31655 }
31656 #endif
31657
31658+#ifdef CONFIG_PAX_KERNEXEC
31659+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
31660+ pgprot_val(forbidden) |= _PAGE_RW;
31661+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
31662+ }
31663+#endif
31664+
31665 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
31666
31667 return prot;
31668@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
31669 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
31670 {
31671 /* change init_mm */
31672+ pax_open_kernel();
31673 set_pte_atomic(kpte, pte);
31674+
31675 #ifdef CONFIG_X86_32
31676 if (!SHARED_KERNEL_PMD) {
31677+
31678+#ifdef CONFIG_PAX_PER_CPU_PGD
31679+ unsigned long cpu;
31680+#else
31681 struct page *page;
31682+#endif
31683
31684+#ifdef CONFIG_PAX_PER_CPU_PGD
31685+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31686+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
31687+#else
31688 list_for_each_entry(page, &pgd_list, lru) {
31689- pgd_t *pgd;
31690+ pgd_t *pgd = (pgd_t *)page_address(page);
31691+#endif
31692+
31693 pud_t *pud;
31694 pmd_t *pmd;
31695
31696- pgd = (pgd_t *)page_address(page) + pgd_index(address);
31697+ pgd += pgd_index(address);
31698 pud = pud_offset(pgd, address);
31699 pmd = pmd_offset(pud, address);
31700 set_pte_atomic((pte_t *)pmd, pte);
31701 }
31702 }
31703 #endif
31704+ pax_close_kernel();
31705 }
31706
31707 static int
31708diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
31709index 6574388..87e9bef 100644
31710--- a/arch/x86/mm/pat.c
31711+++ b/arch/x86/mm/pat.c
31712@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
31713
31714 if (!entry) {
31715 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
31716- current->comm, current->pid, start, end - 1);
31717+ current->comm, task_pid_nr(current), start, end - 1);
31718 return -EINVAL;
31719 }
31720
31721@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31722
31723 while (cursor < to) {
31724 if (!devmem_is_allowed(pfn)) {
31725- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
31726- current->comm, from, to - 1);
31727+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
31728+ current->comm, from, to - 1, cursor);
31729 return 0;
31730 }
31731 cursor += PAGE_SIZE;
31732@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
31733 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
31734 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
31735 "for [mem %#010Lx-%#010Lx]\n",
31736- current->comm, current->pid,
31737+ current->comm, task_pid_nr(current),
31738 cattr_name(flags),
31739 base, (unsigned long long)(base + size-1));
31740 return -EINVAL;
31741@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
31742 flags = lookup_memtype(paddr);
31743 if (want_flags != flags) {
31744 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
31745- current->comm, current->pid,
31746+ current->comm, task_pid_nr(current),
31747 cattr_name(want_flags),
31748 (unsigned long long)paddr,
31749 (unsigned long long)(paddr + size - 1),
31750@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
31751 free_memtype(paddr, paddr + size);
31752 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
31753 " for [mem %#010Lx-%#010Lx], got %s\n",
31754- current->comm, current->pid,
31755+ current->comm, task_pid_nr(current),
31756 cattr_name(want_flags),
31757 (unsigned long long)paddr,
31758 (unsigned long long)(paddr + size - 1),
31759diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
31760index 415f6c4..d319983 100644
31761--- a/arch/x86/mm/pat_rbtree.c
31762+++ b/arch/x86/mm/pat_rbtree.c
31763@@ -160,7 +160,7 @@ success:
31764
31765 failure:
31766 printk(KERN_INFO "%s:%d conflicting memory types "
31767- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
31768+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
31769 end, cattr_name(found_type), cattr_name(match->type));
31770 return -EBUSY;
31771 }
31772diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
31773index 9f0614d..92ae64a 100644
31774--- a/arch/x86/mm/pf_in.c
31775+++ b/arch/x86/mm/pf_in.c
31776@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
31777 int i;
31778 enum reason_type rv = OTHERS;
31779
31780- p = (unsigned char *)ins_addr;
31781+ p = (unsigned char *)ktla_ktva(ins_addr);
31782 p += skip_prefix(p, &prf);
31783 p += get_opcode(p, &opcode);
31784
31785@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
31786 struct prefix_bits prf;
31787 int i;
31788
31789- p = (unsigned char *)ins_addr;
31790+ p = (unsigned char *)ktla_ktva(ins_addr);
31791 p += skip_prefix(p, &prf);
31792 p += get_opcode(p, &opcode);
31793
31794@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
31795 struct prefix_bits prf;
31796 int i;
31797
31798- p = (unsigned char *)ins_addr;
31799+ p = (unsigned char *)ktla_ktva(ins_addr);
31800 p += skip_prefix(p, &prf);
31801 p += get_opcode(p, &opcode);
31802
31803@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
31804 struct prefix_bits prf;
31805 int i;
31806
31807- p = (unsigned char *)ins_addr;
31808+ p = (unsigned char *)ktla_ktva(ins_addr);
31809 p += skip_prefix(p, &prf);
31810 p += get_opcode(p, &opcode);
31811 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
31812@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
31813 struct prefix_bits prf;
31814 int i;
31815
31816- p = (unsigned char *)ins_addr;
31817+ p = (unsigned char *)ktla_ktva(ins_addr);
31818 p += skip_prefix(p, &prf);
31819 p += get_opcode(p, &opcode);
31820 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
31821diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
31822index 17fda6a..f7d54a0 100644
31823--- a/arch/x86/mm/pgtable.c
31824+++ b/arch/x86/mm/pgtable.c
31825@@ -91,10 +91,67 @@ static inline void pgd_list_del(pgd_t *pgd)
31826 list_del(&page->lru);
31827 }
31828
31829-#define UNSHARED_PTRS_PER_PGD \
31830- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31831+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31832+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
31833
31834+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
31835+{
31836+ unsigned int count = USER_PGD_PTRS;
31837
31838+ if (!pax_user_shadow_base)
31839+ return;
31840+
31841+ while (count--)
31842+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
31843+}
31844+#endif
31845+
31846+#ifdef CONFIG_PAX_PER_CPU_PGD
31847+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
31848+{
31849+ unsigned int count = USER_PGD_PTRS;
31850+
31851+ while (count--) {
31852+ pgd_t pgd;
31853+
31854+#ifdef CONFIG_X86_64
31855+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
31856+#else
31857+ pgd = *src++;
31858+#endif
31859+
31860+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31861+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
31862+#endif
31863+
31864+ *dst++ = pgd;
31865+ }
31866+
31867+}
31868+#endif
31869+
31870+#ifdef CONFIG_X86_64
31871+#define pxd_t pud_t
31872+#define pyd_t pgd_t
31873+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
31874+#define pxd_free(mm, pud) pud_free((mm), (pud))
31875+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
31876+#define pyd_offset(mm, address) pgd_offset((mm), (address))
31877+#define PYD_SIZE PGDIR_SIZE
31878+#else
31879+#define pxd_t pmd_t
31880+#define pyd_t pud_t
31881+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
31882+#define pxd_free(mm, pud) pmd_free((mm), (pud))
31883+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
31884+#define pyd_offset(mm, address) pud_offset((mm), (address))
31885+#define PYD_SIZE PUD_SIZE
31886+#endif
31887+
31888+#ifdef CONFIG_PAX_PER_CPU_PGD
31889+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
31890+static inline void pgd_dtor(pgd_t *pgd) {}
31891+#else
31892 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
31893 {
31894 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
31895@@ -135,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
31896 pgd_list_del(pgd);
31897 spin_unlock(&pgd_lock);
31898 }
31899+#endif
31900
31901 /*
31902 * List of all pgd's needed for non-PAE so it can invalidate entries
31903@@ -147,7 +205,7 @@ static void pgd_dtor(pgd_t *pgd)
31904 * -- nyc
31905 */
31906
31907-#ifdef CONFIG_X86_PAE
31908+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
31909 /*
31910 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
31911 * updating the top-level pagetable entries to guarantee the
31912@@ -159,7 +217,7 @@ static void pgd_dtor(pgd_t *pgd)
31913 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
31914 * and initialize the kernel pmds here.
31915 */
31916-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
31917+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31918
31919 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31920 {
31921@@ -177,36 +235,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31922 */
31923 flush_tlb_mm(mm);
31924 }
31925+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
31926+#define PREALLOCATED_PXDS USER_PGD_PTRS
31927 #else /* !CONFIG_X86_PAE */
31928
31929 /* No need to prepopulate any pagetable entries in non-PAE modes. */
31930-#define PREALLOCATED_PMDS 0
31931+#define PREALLOCATED_PXDS 0
31932
31933 #endif /* CONFIG_X86_PAE */
31934
31935-static void free_pmds(pmd_t *pmds[])
31936+static void free_pxds(pxd_t *pxds[])
31937 {
31938 int i;
31939
31940- for(i = 0; i < PREALLOCATED_PMDS; i++)
31941- if (pmds[i])
31942- free_page((unsigned long)pmds[i]);
31943+ for(i = 0; i < PREALLOCATED_PXDS; i++)
31944+ if (pxds[i])
31945+ free_page((unsigned long)pxds[i]);
31946 }
31947
31948-static int preallocate_pmds(pmd_t *pmds[])
31949+static int preallocate_pxds(pxd_t *pxds[])
31950 {
31951 int i;
31952 bool failed = false;
31953
31954- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31955- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
31956- if (pmd == NULL)
31957+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31958+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
31959+ if (pxd == NULL)
31960 failed = true;
31961- pmds[i] = pmd;
31962+ pxds[i] = pxd;
31963 }
31964
31965 if (failed) {
31966- free_pmds(pmds);
31967+ free_pxds(pxds);
31968 return -ENOMEM;
31969 }
31970
31971@@ -219,51 +279,55 @@ static int preallocate_pmds(pmd_t *pmds[])
31972 * preallocate which never got a corresponding vma will need to be
31973 * freed manually.
31974 */
31975-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
31976+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
31977 {
31978 int i;
31979
31980- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31981+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31982 pgd_t pgd = pgdp[i];
31983
31984 if (pgd_val(pgd) != 0) {
31985- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
31986+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
31987
31988- pgdp[i] = native_make_pgd(0);
31989+ set_pgd(pgdp + i, native_make_pgd(0));
31990
31991- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
31992- pmd_free(mm, pmd);
31993+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
31994+ pxd_free(mm, pxd);
31995 }
31996 }
31997 }
31998
31999-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
32000+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
32001 {
32002- pud_t *pud;
32003+ pyd_t *pyd;
32004 unsigned long addr;
32005 int i;
32006
32007- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
32008+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
32009 return;
32010
32011- pud = pud_offset(pgd, 0);
32012+#ifdef CONFIG_X86_64
32013+ pyd = pyd_offset(mm, 0L);
32014+#else
32015+ pyd = pyd_offset(pgd, 0L);
32016+#endif
32017
32018- for (addr = i = 0; i < PREALLOCATED_PMDS;
32019- i++, pud++, addr += PUD_SIZE) {
32020- pmd_t *pmd = pmds[i];
32021+ for (addr = i = 0; i < PREALLOCATED_PXDS;
32022+ i++, pyd++, addr += PYD_SIZE) {
32023+ pxd_t *pxd = pxds[i];
32024
32025 if (i >= KERNEL_PGD_BOUNDARY)
32026- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32027- sizeof(pmd_t) * PTRS_PER_PMD);
32028+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32029+ sizeof(pxd_t) * PTRS_PER_PMD);
32030
32031- pud_populate(mm, pud, pmd);
32032+ pyd_populate(mm, pyd, pxd);
32033 }
32034 }
32035
32036 pgd_t *pgd_alloc(struct mm_struct *mm)
32037 {
32038 pgd_t *pgd;
32039- pmd_t *pmds[PREALLOCATED_PMDS];
32040+ pxd_t *pxds[PREALLOCATED_PXDS];
32041
32042 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
32043
32044@@ -272,11 +336,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32045
32046 mm->pgd = pgd;
32047
32048- if (preallocate_pmds(pmds) != 0)
32049+ if (preallocate_pxds(pxds) != 0)
32050 goto out_free_pgd;
32051
32052 if (paravirt_pgd_alloc(mm) != 0)
32053- goto out_free_pmds;
32054+ goto out_free_pxds;
32055
32056 /*
32057 * Make sure that pre-populating the pmds is atomic with
32058@@ -286,14 +350,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32059 spin_lock(&pgd_lock);
32060
32061 pgd_ctor(mm, pgd);
32062- pgd_prepopulate_pmd(mm, pgd, pmds);
32063+ pgd_prepopulate_pxd(mm, pgd, pxds);
32064
32065 spin_unlock(&pgd_lock);
32066
32067 return pgd;
32068
32069-out_free_pmds:
32070- free_pmds(pmds);
32071+out_free_pxds:
32072+ free_pxds(pxds);
32073 out_free_pgd:
32074 free_page((unsigned long)pgd);
32075 out:
32076@@ -302,7 +366,7 @@ out:
32077
32078 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32079 {
32080- pgd_mop_up_pmds(mm, pgd);
32081+ pgd_mop_up_pxds(mm, pgd);
32082 pgd_dtor(pgd);
32083 paravirt_pgd_free(mm, pgd);
32084 free_page((unsigned long)pgd);
32085diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
32086index a69bcb8..19068ab 100644
32087--- a/arch/x86/mm/pgtable_32.c
32088+++ b/arch/x86/mm/pgtable_32.c
32089@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
32090 return;
32091 }
32092 pte = pte_offset_kernel(pmd, vaddr);
32093+
32094+ pax_open_kernel();
32095 if (pte_val(pteval))
32096 set_pte_at(&init_mm, vaddr, pte, pteval);
32097 else
32098 pte_clear(&init_mm, vaddr, pte);
32099+ pax_close_kernel();
32100
32101 /*
32102 * It's enough to flush this one mapping.
32103diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
32104index e666cbb..61788c45 100644
32105--- a/arch/x86/mm/physaddr.c
32106+++ b/arch/x86/mm/physaddr.c
32107@@ -10,7 +10,7 @@
32108 #ifdef CONFIG_X86_64
32109
32110 #ifdef CONFIG_DEBUG_VIRTUAL
32111-unsigned long __phys_addr(unsigned long x)
32112+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32113 {
32114 unsigned long y = x - __START_KERNEL_map;
32115
32116@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
32117 #else
32118
32119 #ifdef CONFIG_DEBUG_VIRTUAL
32120-unsigned long __phys_addr(unsigned long x)
32121+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32122 {
32123 unsigned long phys_addr = x - PAGE_OFFSET;
32124 /* VMALLOC_* aren't constants */
32125diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
32126index 410531d..0f16030 100644
32127--- a/arch/x86/mm/setup_nx.c
32128+++ b/arch/x86/mm/setup_nx.c
32129@@ -5,8 +5,10 @@
32130 #include <asm/pgtable.h>
32131 #include <asm/proto.h>
32132
32133+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32134 static int disable_nx __cpuinitdata;
32135
32136+#ifndef CONFIG_PAX_PAGEEXEC
32137 /*
32138 * noexec = on|off
32139 *
32140@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
32141 return 0;
32142 }
32143 early_param("noexec", noexec_setup);
32144+#endif
32145+
32146+#endif
32147
32148 void __cpuinit x86_configure_nx(void)
32149 {
32150+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32151 if (cpu_has_nx && !disable_nx)
32152 __supported_pte_mask |= _PAGE_NX;
32153 else
32154+#endif
32155 __supported_pte_mask &= ~_PAGE_NX;
32156 }
32157
32158diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
32159index 282375f..e03a98f 100644
32160--- a/arch/x86/mm/tlb.c
32161+++ b/arch/x86/mm/tlb.c
32162@@ -48,7 +48,11 @@ void leave_mm(int cpu)
32163 BUG();
32164 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
32165 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
32166+
32167+#ifndef CONFIG_PAX_PER_CPU_PGD
32168 load_cr3(swapper_pg_dir);
32169+#endif
32170+
32171 }
32172 }
32173 EXPORT_SYMBOL_GPL(leave_mm);
32174diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
32175new file mode 100644
32176index 0000000..dace51c
32177--- /dev/null
32178+++ b/arch/x86/mm/uderef_64.c
32179@@ -0,0 +1,37 @@
32180+#include <linux/mm.h>
32181+#include <asm/pgtable.h>
32182+#include <asm/uaccess.h>
32183+
32184+#ifdef CONFIG_PAX_MEMORY_UDEREF
32185+/* PaX: due to the special call convention these functions must
32186+ * - remain leaf functions under all configurations,
32187+ * - never be called directly, only dereferenced from the wrappers.
32188+ */
32189+void __pax_open_userland(void)
32190+{
32191+ unsigned int cpu;
32192+
32193+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32194+ return;
32195+
32196+ cpu = raw_get_cpu();
32197+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
32198+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
32199+ raw_put_cpu_no_resched();
32200+}
32201+EXPORT_SYMBOL(__pax_open_userland);
32202+
32203+void __pax_close_userland(void)
32204+{
32205+ unsigned int cpu;
32206+
32207+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
32208+ return;
32209+
32210+ cpu = raw_get_cpu();
32211+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
32212+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
32213+ raw_put_cpu_no_resched();
32214+}
32215+EXPORT_SYMBOL(__pax_close_userland);
32216+#endif
32217diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
32218index 877b9a1..a8ecf42 100644
32219--- a/arch/x86/net/bpf_jit.S
32220+++ b/arch/x86/net/bpf_jit.S
32221@@ -9,6 +9,7 @@
32222 */
32223 #include <linux/linkage.h>
32224 #include <asm/dwarf2.h>
32225+#include <asm/alternative-asm.h>
32226
32227 /*
32228 * Calling convention :
32229@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
32230 jle bpf_slow_path_word
32231 mov (SKBDATA,%rsi),%eax
32232 bswap %eax /* ntohl() */
32233+ pax_force_retaddr
32234 ret
32235
32236 sk_load_half:
32237@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
32238 jle bpf_slow_path_half
32239 movzwl (SKBDATA,%rsi),%eax
32240 rol $8,%ax # ntohs()
32241+ pax_force_retaddr
32242 ret
32243
32244 sk_load_byte:
32245@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
32246 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
32247 jle bpf_slow_path_byte
32248 movzbl (SKBDATA,%rsi),%eax
32249+ pax_force_retaddr
32250 ret
32251
32252 /**
32253@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
32254 movzbl (SKBDATA,%rsi),%ebx
32255 and $15,%bl
32256 shl $2,%bl
32257+ pax_force_retaddr
32258 ret
32259
32260 /* rsi contains offset and can be scratched */
32261@@ -109,6 +114,7 @@ bpf_slow_path_word:
32262 js bpf_error
32263 mov -12(%rbp),%eax
32264 bswap %eax
32265+ pax_force_retaddr
32266 ret
32267
32268 bpf_slow_path_half:
32269@@ -117,12 +123,14 @@ bpf_slow_path_half:
32270 mov -12(%rbp),%ax
32271 rol $8,%ax
32272 movzwl %ax,%eax
32273+ pax_force_retaddr
32274 ret
32275
32276 bpf_slow_path_byte:
32277 bpf_slow_path_common(1)
32278 js bpf_error
32279 movzbl -12(%rbp),%eax
32280+ pax_force_retaddr
32281 ret
32282
32283 bpf_slow_path_byte_msh:
32284@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
32285 and $15,%al
32286 shl $2,%al
32287 xchg %eax,%ebx
32288+ pax_force_retaddr
32289 ret
32290
32291 #define sk_negative_common(SIZE) \
32292@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
32293 sk_negative_common(4)
32294 mov (%rax), %eax
32295 bswap %eax
32296+ pax_force_retaddr
32297 ret
32298
32299 bpf_slow_path_half_neg:
32300@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
32301 mov (%rax),%ax
32302 rol $8,%ax
32303 movzwl %ax,%eax
32304+ pax_force_retaddr
32305 ret
32306
32307 bpf_slow_path_byte_neg:
32308@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
32309 .globl sk_load_byte_negative_offset
32310 sk_negative_common(1)
32311 movzbl (%rax), %eax
32312+ pax_force_retaddr
32313 ret
32314
32315 bpf_slow_path_byte_msh_neg:
32316@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
32317 and $15,%al
32318 shl $2,%al
32319 xchg %eax,%ebx
32320+ pax_force_retaddr
32321 ret
32322
32323 bpf_error:
32324@@ -197,4 +210,5 @@ bpf_error:
32325 xor %eax,%eax
32326 mov -8(%rbp),%rbx
32327 leaveq
32328+ pax_force_retaddr
32329 ret
32330diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
32331index f66b540..3e88dfb 100644
32332--- a/arch/x86/net/bpf_jit_comp.c
32333+++ b/arch/x86/net/bpf_jit_comp.c
32334@@ -12,6 +12,7 @@
32335 #include <linux/netdevice.h>
32336 #include <linux/filter.h>
32337 #include <linux/if_vlan.h>
32338+#include <linux/random.h>
32339
32340 /*
32341 * Conventions :
32342@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
32343 return ptr + len;
32344 }
32345
32346+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32347+#define MAX_INSTR_CODE_SIZE 96
32348+#else
32349+#define MAX_INSTR_CODE_SIZE 64
32350+#endif
32351+
32352 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
32353
32354 #define EMIT1(b1) EMIT(b1, 1)
32355 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
32356 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
32357 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
32358+
32359+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32360+/* original constant will appear in ecx */
32361+#define DILUTE_CONST_SEQUENCE(_off, _key) \
32362+do { \
32363+ /* mov ecx, randkey */ \
32364+ EMIT1(0xb9); \
32365+ EMIT(_key, 4); \
32366+ /* xor ecx, randkey ^ off */ \
32367+ EMIT2(0x81, 0xf1); \
32368+ EMIT((_key) ^ (_off), 4); \
32369+} while (0)
32370+
32371+#define EMIT1_off32(b1, _off) \
32372+do { \
32373+ switch (b1) { \
32374+ case 0x05: /* add eax, imm32 */ \
32375+ case 0x2d: /* sub eax, imm32 */ \
32376+ case 0x25: /* and eax, imm32 */ \
32377+ case 0x0d: /* or eax, imm32 */ \
32378+ case 0xb8: /* mov eax, imm32 */ \
32379+ case 0x35: /* xor eax, imm32 */ \
32380+ case 0x3d: /* cmp eax, imm32 */ \
32381+ case 0xa9: /* test eax, imm32 */ \
32382+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32383+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
32384+ break; \
32385+ case 0xbb: /* mov ebx, imm32 */ \
32386+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32387+ /* mov ebx, ecx */ \
32388+ EMIT2(0x89, 0xcb); \
32389+ break; \
32390+ case 0xbe: /* mov esi, imm32 */ \
32391+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32392+ /* mov esi, ecx */ \
32393+ EMIT2(0x89, 0xce); \
32394+ break; \
32395+ case 0xe8: /* call rel imm32, always to known funcs */ \
32396+ EMIT1(b1); \
32397+ EMIT(_off, 4); \
32398+ break; \
32399+ case 0xe9: /* jmp rel imm32 */ \
32400+ EMIT1(b1); \
32401+ EMIT(_off, 4); \
32402+ /* prevent fall-through, we're not called if off = 0 */ \
32403+ EMIT(0xcccccccc, 4); \
32404+ EMIT(0xcccccccc, 4); \
32405+ break; \
32406+ default: \
32407+ BUILD_BUG(); \
32408+ } \
32409+} while (0)
32410+
32411+#define EMIT2_off32(b1, b2, _off) \
32412+do { \
32413+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
32414+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
32415+ EMIT(randkey, 4); \
32416+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
32417+ EMIT((_off) - randkey, 4); \
32418+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
32419+ DILUTE_CONST_SEQUENCE(_off, randkey); \
32420+ /* imul eax, ecx */ \
32421+ EMIT3(0x0f, 0xaf, 0xc1); \
32422+ } else { \
32423+ BUILD_BUG(); \
32424+ } \
32425+} while (0)
32426+#else
32427 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
32428+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
32429+#endif
32430
32431 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
32432 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
32433@@ -90,6 +168,24 @@ do { \
32434 #define X86_JBE 0x76
32435 #define X86_JA 0x77
32436
32437+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32438+#define APPEND_FLOW_VERIFY() \
32439+do { \
32440+ /* mov ecx, randkey */ \
32441+ EMIT1(0xb9); \
32442+ EMIT(randkey, 4); \
32443+ /* cmp ecx, randkey */ \
32444+ EMIT2(0x81, 0xf9); \
32445+ EMIT(randkey, 4); \
32446+ /* jz after 8 int 3s */ \
32447+ EMIT2(0x74, 0x08); \
32448+ EMIT(0xcccccccc, 4); \
32449+ EMIT(0xcccccccc, 4); \
32450+} while (0)
32451+#else
32452+#define APPEND_FLOW_VERIFY() do { } while (0)
32453+#endif
32454+
32455 #define EMIT_COND_JMP(op, offset) \
32456 do { \
32457 if (is_near(offset)) \
32458@@ -97,6 +193,7 @@ do { \
32459 else { \
32460 EMIT2(0x0f, op + 0x10); \
32461 EMIT(offset, 4); /* jxx .+off32 */ \
32462+ APPEND_FLOW_VERIFY(); \
32463 } \
32464 } while (0)
32465
32466@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
32467 set_fs(old_fs);
32468 }
32469
32470+struct bpf_jit_work {
32471+ struct work_struct work;
32472+ void *image;
32473+};
32474+
32475 #define CHOOSE_LOAD_FUNC(K, func) \
32476 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
32477
32478@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
32479
32480 void bpf_jit_compile(struct sk_filter *fp)
32481 {
32482- u8 temp[64];
32483+ u8 temp[MAX_INSTR_CODE_SIZE];
32484 u8 *prog;
32485 unsigned int proglen, oldproglen = 0;
32486 int ilen, i;
32487@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
32488 unsigned int *addrs;
32489 const struct sock_filter *filter = fp->insns;
32490 int flen = fp->len;
32491+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32492+ unsigned int randkey;
32493+#endif
32494
32495 if (!bpf_jit_enable)
32496 return;
32497@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
32498 if (addrs == NULL)
32499 return;
32500
32501+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
32502+ if (!fp->work)
32503+ goto out;
32504+
32505+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32506+ randkey = get_random_int();
32507+#endif
32508+
32509 /* Before first pass, make a rough estimation of addrs[]
32510- * each bpf instruction is translated to less than 64 bytes
32511+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
32512 */
32513 for (proglen = 0, i = 0; i < flen; i++) {
32514- proglen += 64;
32515+ proglen += MAX_INSTR_CODE_SIZE;
32516 addrs[i] = proglen;
32517 }
32518 cleanup_addr = proglen; /* epilogue address */
32519@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
32520 case BPF_S_ALU_MUL_K: /* A *= K */
32521 if (is_imm8(K))
32522 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
32523- else {
32524- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
32525- EMIT(K, 4);
32526- }
32527+ else
32528+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
32529 break;
32530 case BPF_S_ALU_DIV_X: /* A /= X; */
32531 seen |= SEEN_XREG;
32532@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
32533 break;
32534 case BPF_S_ALU_MOD_K: /* A %= K; */
32535 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
32536+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32537+ DILUTE_CONST_SEQUENCE(K, randkey);
32538+#else
32539 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
32540+#endif
32541 EMIT2(0xf7, 0xf1); /* div %ecx */
32542 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
32543 break;
32544 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
32545+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32546+ DILUTE_CONST_SEQUENCE(K, randkey);
32547+ // imul rax, rcx
32548+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
32549+#else
32550 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
32551 EMIT(K, 4);
32552+#endif
32553 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
32554 break;
32555 case BPF_S_ALU_AND_X:
32556@@ -602,8 +723,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
32557 if (is_imm8(K)) {
32558 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
32559 } else {
32560- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
32561- EMIT(K, 4);
32562+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
32563 }
32564 } else {
32565 EMIT2(0x89,0xde); /* mov %ebx,%esi */
32566@@ -686,17 +806,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32567 break;
32568 default:
32569 /* hmm, too complex filter, give up with jit compiler */
32570- goto out;
32571+ goto error;
32572 }
32573 ilen = prog - temp;
32574 if (image) {
32575 if (unlikely(proglen + ilen > oldproglen)) {
32576 pr_err("bpb_jit_compile fatal error\n");
32577- kfree(addrs);
32578- module_free(NULL, image);
32579- return;
32580+ module_free_exec(NULL, image);
32581+ goto error;
32582 }
32583+ pax_open_kernel();
32584 memcpy(image + proglen, temp, ilen);
32585+ pax_close_kernel();
32586 }
32587 proglen += ilen;
32588 addrs[i] = proglen;
32589@@ -717,11 +838,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32590 break;
32591 }
32592 if (proglen == oldproglen) {
32593- image = module_alloc(max_t(unsigned int,
32594- proglen,
32595- sizeof(struct work_struct)));
32596+ image = module_alloc_exec(proglen);
32597 if (!image)
32598- goto out;
32599+ goto error;
32600 }
32601 oldproglen = proglen;
32602 }
32603@@ -732,7 +851,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
32604 if (image) {
32605 bpf_flush_icache(image, image + proglen);
32606 fp->bpf_func = (void *)image;
32607- }
32608+ } else
32609+error:
32610+ kfree(fp->work);
32611+
32612 out:
32613 kfree(addrs);
32614 return;
32615@@ -740,18 +862,20 @@ out:
32616
32617 static void jit_free_defer(struct work_struct *arg)
32618 {
32619- module_free(NULL, arg);
32620+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
32621+ kfree(arg);
32622 }
32623
32624 /* run from softirq, we must use a work_struct to call
32625- * module_free() from process context
32626+ * module_free_exec() from process context
32627 */
32628 void bpf_jit_free(struct sk_filter *fp)
32629 {
32630 if (fp->bpf_func != sk_run_filter) {
32631- struct work_struct *work = (struct work_struct *)fp->bpf_func;
32632+ struct work_struct *work = &fp->work->work;
32633
32634 INIT_WORK(work, jit_free_defer);
32635+ fp->work->image = fp->bpf_func;
32636 schedule_work(work);
32637 }
32638 }
32639diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
32640index d6aa6e8..266395a 100644
32641--- a/arch/x86/oprofile/backtrace.c
32642+++ b/arch/x86/oprofile/backtrace.c
32643@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
32644 struct stack_frame_ia32 *fp;
32645 unsigned long bytes;
32646
32647- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
32648+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
32649 if (bytes != sizeof(bufhead))
32650 return NULL;
32651
32652- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
32653+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
32654
32655 oprofile_add_trace(bufhead[0].return_address);
32656
32657@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
32658 struct stack_frame bufhead[2];
32659 unsigned long bytes;
32660
32661- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
32662+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
32663 if (bytes != sizeof(bufhead))
32664 return NULL;
32665
32666@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
32667 {
32668 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
32669
32670- if (!user_mode_vm(regs)) {
32671+ if (!user_mode(regs)) {
32672 unsigned long stack = kernel_stack_pointer(regs);
32673 if (depth)
32674 dump_trace(NULL, regs, (unsigned long *)stack, 0,
32675diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
32676index 48768df..ba9143c 100644
32677--- a/arch/x86/oprofile/nmi_int.c
32678+++ b/arch/x86/oprofile/nmi_int.c
32679@@ -23,6 +23,7 @@
32680 #include <asm/nmi.h>
32681 #include <asm/msr.h>
32682 #include <asm/apic.h>
32683+#include <asm/pgtable.h>
32684
32685 #include "op_counter.h"
32686 #include "op_x86_model.h"
32687@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
32688 if (ret)
32689 return ret;
32690
32691- if (!model->num_virt_counters)
32692- model->num_virt_counters = model->num_counters;
32693+ if (!model->num_virt_counters) {
32694+ pax_open_kernel();
32695+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
32696+ pax_close_kernel();
32697+ }
32698
32699 mux_init(ops);
32700
32701diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
32702index b2b9443..be58856 100644
32703--- a/arch/x86/oprofile/op_model_amd.c
32704+++ b/arch/x86/oprofile/op_model_amd.c
32705@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
32706 num_counters = AMD64_NUM_COUNTERS;
32707 }
32708
32709- op_amd_spec.num_counters = num_counters;
32710- op_amd_spec.num_controls = num_counters;
32711- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
32712+ pax_open_kernel();
32713+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
32714+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
32715+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
32716+ pax_close_kernel();
32717
32718 return 0;
32719 }
32720diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
32721index d90528e..0127e2b 100644
32722--- a/arch/x86/oprofile/op_model_ppro.c
32723+++ b/arch/x86/oprofile/op_model_ppro.c
32724@@ -19,6 +19,7 @@
32725 #include <asm/msr.h>
32726 #include <asm/apic.h>
32727 #include <asm/nmi.h>
32728+#include <asm/pgtable.h>
32729
32730 #include "op_x86_model.h"
32731 #include "op_counter.h"
32732@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
32733
32734 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
32735
32736- op_arch_perfmon_spec.num_counters = num_counters;
32737- op_arch_perfmon_spec.num_controls = num_counters;
32738+ pax_open_kernel();
32739+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
32740+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
32741+ pax_close_kernel();
32742 }
32743
32744 static int arch_perfmon_init(struct oprofile_operations *ignore)
32745diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
32746index 71e8a67..6a313bb 100644
32747--- a/arch/x86/oprofile/op_x86_model.h
32748+++ b/arch/x86/oprofile/op_x86_model.h
32749@@ -52,7 +52,7 @@ struct op_x86_model_spec {
32750 void (*switch_ctrl)(struct op_x86_model_spec const *model,
32751 struct op_msrs const * const msrs);
32752 #endif
32753-};
32754+} __do_const;
32755
32756 struct op_counter_config;
32757
32758diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
32759index e9e6ed5..e47ae67 100644
32760--- a/arch/x86/pci/amd_bus.c
32761+++ b/arch/x86/pci/amd_bus.c
32762@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
32763 return NOTIFY_OK;
32764 }
32765
32766-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
32767+static struct notifier_block amd_cpu_notifier = {
32768 .notifier_call = amd_cpu_notify,
32769 };
32770
32771diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
32772index 372e9b8..e775a6c 100644
32773--- a/arch/x86/pci/irq.c
32774+++ b/arch/x86/pci/irq.c
32775@@ -50,7 +50,7 @@ struct irq_router {
32776 struct irq_router_handler {
32777 u16 vendor;
32778 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
32779-};
32780+} __do_const;
32781
32782 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
32783 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
32784@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
32785 return 0;
32786 }
32787
32788-static __initdata struct irq_router_handler pirq_routers[] = {
32789+static __initconst const struct irq_router_handler pirq_routers[] = {
32790 { PCI_VENDOR_ID_INTEL, intel_router_probe },
32791 { PCI_VENDOR_ID_AL, ali_router_probe },
32792 { PCI_VENDOR_ID_ITE, ite_router_probe },
32793@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
32794 static void __init pirq_find_router(struct irq_router *r)
32795 {
32796 struct irq_routing_table *rt = pirq_table;
32797- struct irq_router_handler *h;
32798+ const struct irq_router_handler *h;
32799
32800 #ifdef CONFIG_PCI_BIOS
32801 if (!rt->signature) {
32802@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
32803 return 0;
32804 }
32805
32806-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
32807+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
32808 {
32809 .callback = fix_broken_hp_bios_irq9,
32810 .ident = "HP Pavilion N5400 Series Laptop",
32811diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
32812index 6eb18c4..20d83de 100644
32813--- a/arch/x86/pci/mrst.c
32814+++ b/arch/x86/pci/mrst.c
32815@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
32816 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
32817 pci_mmcfg_late_init();
32818 pcibios_enable_irq = mrst_pci_irq_enable;
32819- pci_root_ops = pci_mrst_ops;
32820+ pax_open_kernel();
32821+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
32822+ pax_close_kernel();
32823 pci_soc_mode = 1;
32824 /* Continue with standard init */
32825 return 1;
32826diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
32827index c77b24a..c979855 100644
32828--- a/arch/x86/pci/pcbios.c
32829+++ b/arch/x86/pci/pcbios.c
32830@@ -79,7 +79,7 @@ union bios32 {
32831 static struct {
32832 unsigned long address;
32833 unsigned short segment;
32834-} bios32_indirect = { 0, __KERNEL_CS };
32835+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
32836
32837 /*
32838 * Returns the entry point for the given service, NULL on error
32839@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
32840 unsigned long length; /* %ecx */
32841 unsigned long entry; /* %edx */
32842 unsigned long flags;
32843+ struct desc_struct d, *gdt;
32844
32845 local_irq_save(flags);
32846- __asm__("lcall *(%%edi); cld"
32847+
32848+ gdt = get_cpu_gdt_table(smp_processor_id());
32849+
32850+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
32851+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32852+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
32853+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32854+
32855+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
32856 : "=a" (return_code),
32857 "=b" (address),
32858 "=c" (length),
32859 "=d" (entry)
32860 : "0" (service),
32861 "1" (0),
32862- "D" (&bios32_indirect));
32863+ "D" (&bios32_indirect),
32864+ "r"(__PCIBIOS_DS)
32865+ : "memory");
32866+
32867+ pax_open_kernel();
32868+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
32869+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
32870+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
32871+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
32872+ pax_close_kernel();
32873+
32874 local_irq_restore(flags);
32875
32876 switch (return_code) {
32877- case 0:
32878- return address + entry;
32879- case 0x80: /* Not present */
32880- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32881- return 0;
32882- default: /* Shouldn't happen */
32883- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32884- service, return_code);
32885+ case 0: {
32886+ int cpu;
32887+ unsigned char flags;
32888+
32889+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
32890+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
32891+ printk(KERN_WARNING "bios32_service: not valid\n");
32892 return 0;
32893+ }
32894+ address = address + PAGE_OFFSET;
32895+ length += 16UL; /* some BIOSs underreport this... */
32896+ flags = 4;
32897+ if (length >= 64*1024*1024) {
32898+ length >>= PAGE_SHIFT;
32899+ flags |= 8;
32900+ }
32901+
32902+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32903+ gdt = get_cpu_gdt_table(cpu);
32904+ pack_descriptor(&d, address, length, 0x9b, flags);
32905+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32906+ pack_descriptor(&d, address, length, 0x93, flags);
32907+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32908+ }
32909+ return entry;
32910+ }
32911+ case 0x80: /* Not present */
32912+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32913+ return 0;
32914+ default: /* Shouldn't happen */
32915+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32916+ service, return_code);
32917+ return 0;
32918 }
32919 }
32920
32921 static struct {
32922 unsigned long address;
32923 unsigned short segment;
32924-} pci_indirect = { 0, __KERNEL_CS };
32925+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
32926
32927-static int pci_bios_present;
32928+static int pci_bios_present __read_only;
32929
32930 static int check_pcibios(void)
32931 {
32932@@ -131,11 +174,13 @@ static int check_pcibios(void)
32933 unsigned long flags, pcibios_entry;
32934
32935 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
32936- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
32937+ pci_indirect.address = pcibios_entry;
32938
32939 local_irq_save(flags);
32940- __asm__(
32941- "lcall *(%%edi); cld\n\t"
32942+ __asm__("movw %w6, %%ds\n\t"
32943+ "lcall *%%ss:(%%edi); cld\n\t"
32944+ "push %%ss\n\t"
32945+ "pop %%ds\n\t"
32946 "jc 1f\n\t"
32947 "xor %%ah, %%ah\n"
32948 "1:"
32949@@ -144,7 +189,8 @@ static int check_pcibios(void)
32950 "=b" (ebx),
32951 "=c" (ecx)
32952 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
32953- "D" (&pci_indirect)
32954+ "D" (&pci_indirect),
32955+ "r" (__PCIBIOS_DS)
32956 : "memory");
32957 local_irq_restore(flags);
32958
32959@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32960
32961 switch (len) {
32962 case 1:
32963- __asm__("lcall *(%%esi); cld\n\t"
32964+ __asm__("movw %w6, %%ds\n\t"
32965+ "lcall *%%ss:(%%esi); cld\n\t"
32966+ "push %%ss\n\t"
32967+ "pop %%ds\n\t"
32968 "jc 1f\n\t"
32969 "xor %%ah, %%ah\n"
32970 "1:"
32971@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32972 : "1" (PCIBIOS_READ_CONFIG_BYTE),
32973 "b" (bx),
32974 "D" ((long)reg),
32975- "S" (&pci_indirect));
32976+ "S" (&pci_indirect),
32977+ "r" (__PCIBIOS_DS));
32978 /*
32979 * Zero-extend the result beyond 8 bits, do not trust the
32980 * BIOS having done it:
32981@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32982 *value &= 0xff;
32983 break;
32984 case 2:
32985- __asm__("lcall *(%%esi); cld\n\t"
32986+ __asm__("movw %w6, %%ds\n\t"
32987+ "lcall *%%ss:(%%esi); cld\n\t"
32988+ "push %%ss\n\t"
32989+ "pop %%ds\n\t"
32990 "jc 1f\n\t"
32991 "xor %%ah, %%ah\n"
32992 "1:"
32993@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32994 : "1" (PCIBIOS_READ_CONFIG_WORD),
32995 "b" (bx),
32996 "D" ((long)reg),
32997- "S" (&pci_indirect));
32998+ "S" (&pci_indirect),
32999+ "r" (__PCIBIOS_DS));
33000 /*
33001 * Zero-extend the result beyond 16 bits, do not trust the
33002 * BIOS having done it:
33003@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33004 *value &= 0xffff;
33005 break;
33006 case 4:
33007- __asm__("lcall *(%%esi); cld\n\t"
33008+ __asm__("movw %w6, %%ds\n\t"
33009+ "lcall *%%ss:(%%esi); cld\n\t"
33010+ "push %%ss\n\t"
33011+ "pop %%ds\n\t"
33012 "jc 1f\n\t"
33013 "xor %%ah, %%ah\n"
33014 "1:"
33015@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33016 : "1" (PCIBIOS_READ_CONFIG_DWORD),
33017 "b" (bx),
33018 "D" ((long)reg),
33019- "S" (&pci_indirect));
33020+ "S" (&pci_indirect),
33021+ "r" (__PCIBIOS_DS));
33022 break;
33023 }
33024
33025@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33026
33027 switch (len) {
33028 case 1:
33029- __asm__("lcall *(%%esi); cld\n\t"
33030+ __asm__("movw %w6, %%ds\n\t"
33031+ "lcall *%%ss:(%%esi); cld\n\t"
33032+ "push %%ss\n\t"
33033+ "pop %%ds\n\t"
33034 "jc 1f\n\t"
33035 "xor %%ah, %%ah\n"
33036 "1:"
33037@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33038 "c" (value),
33039 "b" (bx),
33040 "D" ((long)reg),
33041- "S" (&pci_indirect));
33042+ "S" (&pci_indirect),
33043+ "r" (__PCIBIOS_DS));
33044 break;
33045 case 2:
33046- __asm__("lcall *(%%esi); cld\n\t"
33047+ __asm__("movw %w6, %%ds\n\t"
33048+ "lcall *%%ss:(%%esi); cld\n\t"
33049+ "push %%ss\n\t"
33050+ "pop %%ds\n\t"
33051 "jc 1f\n\t"
33052 "xor %%ah, %%ah\n"
33053 "1:"
33054@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33055 "c" (value),
33056 "b" (bx),
33057 "D" ((long)reg),
33058- "S" (&pci_indirect));
33059+ "S" (&pci_indirect),
33060+ "r" (__PCIBIOS_DS));
33061 break;
33062 case 4:
33063- __asm__("lcall *(%%esi); cld\n\t"
33064+ __asm__("movw %w6, %%ds\n\t"
33065+ "lcall *%%ss:(%%esi); cld\n\t"
33066+ "push %%ss\n\t"
33067+ "pop %%ds\n\t"
33068 "jc 1f\n\t"
33069 "xor %%ah, %%ah\n"
33070 "1:"
33071@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33072 "c" (value),
33073 "b" (bx),
33074 "D" ((long)reg),
33075- "S" (&pci_indirect));
33076+ "S" (&pci_indirect),
33077+ "r" (__PCIBIOS_DS));
33078 break;
33079 }
33080
33081@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33082
33083 DBG("PCI: Fetching IRQ routing table... ");
33084 __asm__("push %%es\n\t"
33085+ "movw %w8, %%ds\n\t"
33086 "push %%ds\n\t"
33087 "pop %%es\n\t"
33088- "lcall *(%%esi); cld\n\t"
33089+ "lcall *%%ss:(%%esi); cld\n\t"
33090 "pop %%es\n\t"
33091+ "push %%ss\n\t"
33092+ "pop %%ds\n"
33093 "jc 1f\n\t"
33094 "xor %%ah, %%ah\n"
33095 "1:"
33096@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33097 "1" (0),
33098 "D" ((long) &opt),
33099 "S" (&pci_indirect),
33100- "m" (opt)
33101+ "m" (opt),
33102+ "r" (__PCIBIOS_DS)
33103 : "memory");
33104 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
33105 if (ret & 0xff00)
33106@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33107 {
33108 int ret;
33109
33110- __asm__("lcall *(%%esi); cld\n\t"
33111+ __asm__("movw %w5, %%ds\n\t"
33112+ "lcall *%%ss:(%%esi); cld\n\t"
33113+ "push %%ss\n\t"
33114+ "pop %%ds\n"
33115 "jc 1f\n\t"
33116 "xor %%ah, %%ah\n"
33117 "1:"
33118@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33119 : "0" (PCIBIOS_SET_PCI_HW_INT),
33120 "b" ((dev->bus->number << 8) | dev->devfn),
33121 "c" ((irq << 8) | (pin + 10)),
33122- "S" (&pci_indirect));
33123+ "S" (&pci_indirect),
33124+ "r" (__PCIBIOS_DS));
33125 return !(ret & 0xff00);
33126 }
33127 EXPORT_SYMBOL(pcibios_set_irq_routing);
33128diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
33129index 40e4469..d915bf9 100644
33130--- a/arch/x86/platform/efi/efi_32.c
33131+++ b/arch/x86/platform/efi/efi_32.c
33132@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
33133 {
33134 struct desc_ptr gdt_descr;
33135
33136+#ifdef CONFIG_PAX_KERNEXEC
33137+ struct desc_struct d;
33138+#endif
33139+
33140 local_irq_save(efi_rt_eflags);
33141
33142 load_cr3(initial_page_table);
33143 __flush_tlb_all();
33144
33145+#ifdef CONFIG_PAX_KERNEXEC
33146+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
33147+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33148+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
33149+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33150+#endif
33151+
33152 gdt_descr.address = __pa(get_cpu_gdt_table(0));
33153 gdt_descr.size = GDT_SIZE - 1;
33154 load_gdt(&gdt_descr);
33155@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
33156 {
33157 struct desc_ptr gdt_descr;
33158
33159+#ifdef CONFIG_PAX_KERNEXEC
33160+ struct desc_struct d;
33161+
33162+ memset(&d, 0, sizeof d);
33163+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33164+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33165+#endif
33166+
33167 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
33168 gdt_descr.size = GDT_SIZE - 1;
33169 load_gdt(&gdt_descr);
33170
33171+#ifdef CONFIG_PAX_PER_CPU_PGD
33172+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33173+#else
33174 load_cr3(swapper_pg_dir);
33175+#endif
33176+
33177 __flush_tlb_all();
33178
33179 local_irq_restore(efi_rt_eflags);
33180diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
33181index 39a0e7f1..872396e 100644
33182--- a/arch/x86/platform/efi/efi_64.c
33183+++ b/arch/x86/platform/efi/efi_64.c
33184@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
33185 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
33186 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
33187 }
33188+
33189+#ifdef CONFIG_PAX_PER_CPU_PGD
33190+ load_cr3(swapper_pg_dir);
33191+#endif
33192+
33193 __flush_tlb_all();
33194 }
33195
33196@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
33197 for (pgd = 0; pgd < n_pgds; pgd++)
33198 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
33199 kfree(save_pgd);
33200+
33201+#ifdef CONFIG_PAX_PER_CPU_PGD
33202+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33203+#endif
33204+
33205 __flush_tlb_all();
33206 local_irq_restore(efi_flags);
33207 early_code_mapping_set_exec(0);
33208diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
33209index fbe66e6..eae5e38 100644
33210--- a/arch/x86/platform/efi/efi_stub_32.S
33211+++ b/arch/x86/platform/efi/efi_stub_32.S
33212@@ -6,7 +6,9 @@
33213 */
33214
33215 #include <linux/linkage.h>
33216+#include <linux/init.h>
33217 #include <asm/page_types.h>
33218+#include <asm/segment.h>
33219
33220 /*
33221 * efi_call_phys(void *, ...) is a function with variable parameters.
33222@@ -20,7 +22,7 @@
33223 * service functions will comply with gcc calling convention, too.
33224 */
33225
33226-.text
33227+__INIT
33228 ENTRY(efi_call_phys)
33229 /*
33230 * 0. The function can only be called in Linux kernel. So CS has been
33231@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
33232 * The mapping of lower virtual memory has been created in prelog and
33233 * epilog.
33234 */
33235- movl $1f, %edx
33236- subl $__PAGE_OFFSET, %edx
33237- jmp *%edx
33238+#ifdef CONFIG_PAX_KERNEXEC
33239+ movl $(__KERNEXEC_EFI_DS), %edx
33240+ mov %edx, %ds
33241+ mov %edx, %es
33242+ mov %edx, %ss
33243+ addl $2f,(1f)
33244+ ljmp *(1f)
33245+
33246+__INITDATA
33247+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
33248+.previous
33249+
33250+2:
33251+ subl $2b,(1b)
33252+#else
33253+ jmp 1f-__PAGE_OFFSET
33254 1:
33255+#endif
33256
33257 /*
33258 * 2. Now on the top of stack is the return
33259@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
33260 * parameter 2, ..., param n. To make things easy, we save the return
33261 * address of efi_call_phys in a global variable.
33262 */
33263- popl %edx
33264- movl %edx, saved_return_addr
33265- /* get the function pointer into ECX*/
33266- popl %ecx
33267- movl %ecx, efi_rt_function_ptr
33268- movl $2f, %edx
33269- subl $__PAGE_OFFSET, %edx
33270- pushl %edx
33271+ popl (saved_return_addr)
33272+ popl (efi_rt_function_ptr)
33273
33274 /*
33275 * 3. Clear PG bit in %CR0.
33276@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
33277 /*
33278 * 5. Call the physical function.
33279 */
33280- jmp *%ecx
33281+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
33282
33283-2:
33284 /*
33285 * 6. After EFI runtime service returns, control will return to
33286 * following instruction. We'd better readjust stack pointer first.
33287@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
33288 movl %cr0, %edx
33289 orl $0x80000000, %edx
33290 movl %edx, %cr0
33291- jmp 1f
33292-1:
33293+
33294 /*
33295 * 8. Now restore the virtual mode from flat mode by
33296 * adding EIP with PAGE_OFFSET.
33297 */
33298- movl $1f, %edx
33299- jmp *%edx
33300+#ifdef CONFIG_PAX_KERNEXEC
33301+ movl $(__KERNEL_DS), %edx
33302+ mov %edx, %ds
33303+ mov %edx, %es
33304+ mov %edx, %ss
33305+ ljmp $(__KERNEL_CS),$1f
33306+#else
33307+ jmp 1f+__PAGE_OFFSET
33308+#endif
33309 1:
33310
33311 /*
33312 * 9. Balance the stack. And because EAX contain the return value,
33313 * we'd better not clobber it.
33314 */
33315- leal efi_rt_function_ptr, %edx
33316- movl (%edx), %ecx
33317- pushl %ecx
33318+ pushl (efi_rt_function_ptr)
33319
33320 /*
33321- * 10. Push the saved return address onto the stack and return.
33322+ * 10. Return to the saved return address.
33323 */
33324- leal saved_return_addr, %edx
33325- movl (%edx), %ecx
33326- pushl %ecx
33327- ret
33328+ jmpl *(saved_return_addr)
33329 ENDPROC(efi_call_phys)
33330 .previous
33331
33332-.data
33333+__INITDATA
33334 saved_return_addr:
33335 .long 0
33336 efi_rt_function_ptr:
33337diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
33338index 4c07cca..2c8427d 100644
33339--- a/arch/x86/platform/efi/efi_stub_64.S
33340+++ b/arch/x86/platform/efi/efi_stub_64.S
33341@@ -7,6 +7,7 @@
33342 */
33343
33344 #include <linux/linkage.h>
33345+#include <asm/alternative-asm.h>
33346
33347 #define SAVE_XMM \
33348 mov %rsp, %rax; \
33349@@ -40,6 +41,7 @@ ENTRY(efi_call0)
33350 call *%rdi
33351 addq $32, %rsp
33352 RESTORE_XMM
33353+ pax_force_retaddr 0, 1
33354 ret
33355 ENDPROC(efi_call0)
33356
33357@@ -50,6 +52,7 @@ ENTRY(efi_call1)
33358 call *%rdi
33359 addq $32, %rsp
33360 RESTORE_XMM
33361+ pax_force_retaddr 0, 1
33362 ret
33363 ENDPROC(efi_call1)
33364
33365@@ -60,6 +63,7 @@ ENTRY(efi_call2)
33366 call *%rdi
33367 addq $32, %rsp
33368 RESTORE_XMM
33369+ pax_force_retaddr 0, 1
33370 ret
33371 ENDPROC(efi_call2)
33372
33373@@ -71,6 +75,7 @@ ENTRY(efi_call3)
33374 call *%rdi
33375 addq $32, %rsp
33376 RESTORE_XMM
33377+ pax_force_retaddr 0, 1
33378 ret
33379 ENDPROC(efi_call3)
33380
33381@@ -83,6 +88,7 @@ ENTRY(efi_call4)
33382 call *%rdi
33383 addq $32, %rsp
33384 RESTORE_XMM
33385+ pax_force_retaddr 0, 1
33386 ret
33387 ENDPROC(efi_call4)
33388
33389@@ -96,6 +102,7 @@ ENTRY(efi_call5)
33390 call *%rdi
33391 addq $48, %rsp
33392 RESTORE_XMM
33393+ pax_force_retaddr 0, 1
33394 ret
33395 ENDPROC(efi_call5)
33396
33397@@ -112,5 +119,6 @@ ENTRY(efi_call6)
33398 call *%rdi
33399 addq $48, %rsp
33400 RESTORE_XMM
33401+ pax_force_retaddr 0, 1
33402 ret
33403 ENDPROC(efi_call6)
33404diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
33405index a0a0a43..a48e233 100644
33406--- a/arch/x86/platform/mrst/mrst.c
33407+++ b/arch/x86/platform/mrst/mrst.c
33408@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
33409 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
33410 int sfi_mrtc_num;
33411
33412-static void mrst_power_off(void)
33413+static __noreturn void mrst_power_off(void)
33414 {
33415+ BUG();
33416 }
33417
33418-static void mrst_reboot(void)
33419+static __noreturn void mrst_reboot(void)
33420 {
33421 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
33422+ BUG();
33423 }
33424
33425 /* parse all the mtimer info to a static mtimer array */
33426diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
33427index d6ee929..3637cb5 100644
33428--- a/arch/x86/platform/olpc/olpc_dt.c
33429+++ b/arch/x86/platform/olpc/olpc_dt.c
33430@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
33431 return res;
33432 }
33433
33434-static struct of_pdt_ops prom_olpc_ops __initdata = {
33435+static struct of_pdt_ops prom_olpc_ops __initconst = {
33436 .nextprop = olpc_dt_nextprop,
33437 .getproplen = olpc_dt_getproplen,
33438 .getproperty = olpc_dt_getproperty,
33439diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
33440index 1cf5b30..fd45732 100644
33441--- a/arch/x86/power/cpu.c
33442+++ b/arch/x86/power/cpu.c
33443@@ -137,11 +137,8 @@ static void do_fpu_end(void)
33444 static void fix_processor_context(void)
33445 {
33446 int cpu = smp_processor_id();
33447- struct tss_struct *t = &per_cpu(init_tss, cpu);
33448-#ifdef CONFIG_X86_64
33449- struct desc_struct *desc = get_cpu_gdt_table(cpu);
33450- tss_desc tss;
33451-#endif
33452+ struct tss_struct *t = init_tss + cpu;
33453+
33454 set_tss_desc(cpu, t); /*
33455 * This just modifies memory; should not be
33456 * necessary. But... This is necessary, because
33457@@ -150,10 +147,6 @@ static void fix_processor_context(void)
33458 */
33459
33460 #ifdef CONFIG_X86_64
33461- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
33462- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
33463- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
33464-
33465 syscall_init(); /* This sets MSR_*STAR and related */
33466 #endif
33467 load_TR_desc(); /* This does ltr */
33468diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
33469index a44f457..9140171 100644
33470--- a/arch/x86/realmode/init.c
33471+++ b/arch/x86/realmode/init.c
33472@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
33473 __va(real_mode_header->trampoline_header);
33474
33475 #ifdef CONFIG_X86_32
33476- trampoline_header->start = __pa_symbol(startup_32_smp);
33477+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
33478+
33479+#ifdef CONFIG_PAX_KERNEXEC
33480+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
33481+#endif
33482+
33483+ trampoline_header->boot_cs = __BOOT_CS;
33484 trampoline_header->gdt_limit = __BOOT_DS + 7;
33485 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
33486 #else
33487@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
33488 *trampoline_cr4_features = read_cr4();
33489
33490 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
33491- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
33492+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
33493 trampoline_pgd[511] = init_level4_pgt[511].pgd;
33494 #endif
33495 }
33496diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
33497index 8869287..d577672 100644
33498--- a/arch/x86/realmode/rm/Makefile
33499+++ b/arch/x86/realmode/rm/Makefile
33500@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
33501 $(call cc-option, -fno-unit-at-a-time)) \
33502 $(call cc-option, -fno-stack-protector) \
33503 $(call cc-option, -mpreferred-stack-boundary=2)
33504+ifdef CONSTIFY_PLUGIN
33505+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
33506+endif
33507 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
33508 GCOV_PROFILE := n
33509diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
33510index a28221d..93c40f1 100644
33511--- a/arch/x86/realmode/rm/header.S
33512+++ b/arch/x86/realmode/rm/header.S
33513@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
33514 #endif
33515 /* APM/BIOS reboot */
33516 .long pa_machine_real_restart_asm
33517-#ifdef CONFIG_X86_64
33518+#ifdef CONFIG_X86_32
33519+ .long __KERNEL_CS
33520+#else
33521 .long __KERNEL32_CS
33522 #endif
33523 END(real_mode_header)
33524diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
33525index c1b2791..f9e31c7 100644
33526--- a/arch/x86/realmode/rm/trampoline_32.S
33527+++ b/arch/x86/realmode/rm/trampoline_32.S
33528@@ -25,6 +25,12 @@
33529 #include <asm/page_types.h>
33530 #include "realmode.h"
33531
33532+#ifdef CONFIG_PAX_KERNEXEC
33533+#define ta(X) (X)
33534+#else
33535+#define ta(X) (pa_ ## X)
33536+#endif
33537+
33538 .text
33539 .code16
33540
33541@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
33542
33543 cli # We should be safe anyway
33544
33545- movl tr_start, %eax # where we need to go
33546-
33547 movl $0xA5A5A5A5, trampoline_status
33548 # write marker for master knows we're running
33549
33550@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
33551 movw $1, %dx # protected mode (PE) bit
33552 lmsw %dx # into protected mode
33553
33554- ljmpl $__BOOT_CS, $pa_startup_32
33555+ ljmpl *(trampoline_header)
33556
33557 .section ".text32","ax"
33558 .code32
33559@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
33560 .balign 8
33561 GLOBAL(trampoline_header)
33562 tr_start: .space 4
33563- tr_gdt_pad: .space 2
33564+ tr_boot_cs: .space 2
33565 tr_gdt: .space 6
33566 END(trampoline_header)
33567
33568diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
33569index bb360dc..d0fd8f8 100644
33570--- a/arch/x86/realmode/rm/trampoline_64.S
33571+++ b/arch/x86/realmode/rm/trampoline_64.S
33572@@ -94,6 +94,7 @@ ENTRY(startup_32)
33573 movl %edx, %gs
33574
33575 movl pa_tr_cr4, %eax
33576+ andl $~X86_CR4_PCIDE, %eax
33577 movl %eax, %cr4 # Enable PAE mode
33578
33579 # Setup trampoline 4 level pagetables
33580@@ -107,7 +108,7 @@ ENTRY(startup_32)
33581 wrmsr
33582
33583 # Enable paging and in turn activate Long Mode
33584- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
33585+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
33586 movl %eax, %cr0
33587
33588 /*
33589diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
33590index e812034..c747134 100644
33591--- a/arch/x86/tools/Makefile
33592+++ b/arch/x86/tools/Makefile
33593@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
33594
33595 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
33596
33597-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
33598+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
33599 hostprogs-y += relocs
33600 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
33601 relocs: $(obj)/relocs
33602diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
33603index f7bab68..b6d9886 100644
33604--- a/arch/x86/tools/relocs.c
33605+++ b/arch/x86/tools/relocs.c
33606@@ -1,5 +1,7 @@
33607 /* This is included from relocs_32/64.c */
33608
33609+#include "../../../include/generated/autoconf.h"
33610+
33611 #define ElfW(type) _ElfW(ELF_BITS, type)
33612 #define _ElfW(bits, type) __ElfW(bits, type)
33613 #define __ElfW(bits, type) Elf##bits##_##type
33614@@ -11,6 +13,7 @@
33615 #define Elf_Sym ElfW(Sym)
33616
33617 static Elf_Ehdr ehdr;
33618+static Elf_Phdr *phdr;
33619
33620 struct relocs {
33621 uint32_t *offset;
33622@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
33623 }
33624 }
33625
33626+static void read_phdrs(FILE *fp)
33627+{
33628+ unsigned int i;
33629+
33630+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
33631+ if (!phdr) {
33632+ die("Unable to allocate %d program headers\n",
33633+ ehdr.e_phnum);
33634+ }
33635+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
33636+ die("Seek to %d failed: %s\n",
33637+ ehdr.e_phoff, strerror(errno));
33638+ }
33639+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
33640+ die("Cannot read ELF program headers: %s\n",
33641+ strerror(errno));
33642+ }
33643+ for(i = 0; i < ehdr.e_phnum; i++) {
33644+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
33645+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
33646+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
33647+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
33648+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
33649+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
33650+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
33651+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
33652+ }
33653+
33654+}
33655+
33656 static void read_shdrs(FILE *fp)
33657 {
33658- int i;
33659+ unsigned int i;
33660 Elf_Shdr shdr;
33661
33662 secs = calloc(ehdr.e_shnum, sizeof(struct section));
33663@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
33664
33665 static void read_strtabs(FILE *fp)
33666 {
33667- int i;
33668+ unsigned int i;
33669 for (i = 0; i < ehdr.e_shnum; i++) {
33670 struct section *sec = &secs[i];
33671 if (sec->shdr.sh_type != SHT_STRTAB) {
33672@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
33673
33674 static void read_symtabs(FILE *fp)
33675 {
33676- int i,j;
33677+ unsigned int i,j;
33678 for (i = 0; i < ehdr.e_shnum; i++) {
33679 struct section *sec = &secs[i];
33680 if (sec->shdr.sh_type != SHT_SYMTAB) {
33681@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
33682 }
33683
33684
33685-static void read_relocs(FILE *fp)
33686+static void read_relocs(FILE *fp, int use_real_mode)
33687 {
33688- int i,j;
33689+ unsigned int i,j;
33690+ uint32_t base;
33691+
33692 for (i = 0; i < ehdr.e_shnum; i++) {
33693 struct section *sec = &secs[i];
33694 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33695@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
33696 die("Cannot read symbol table: %s\n",
33697 strerror(errno));
33698 }
33699+ base = 0;
33700+
33701+#ifdef CONFIG_X86_32
33702+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
33703+ if (phdr[j].p_type != PT_LOAD )
33704+ continue;
33705+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
33706+ continue;
33707+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
33708+ break;
33709+ }
33710+#endif
33711+
33712 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
33713 Elf_Rel *rel = &sec->reltab[j];
33714- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
33715+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
33716 rel->r_info = elf_xword_to_cpu(rel->r_info);
33717 #if (SHT_REL_TYPE == SHT_RELA)
33718 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
33719@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
33720
33721 static void print_absolute_symbols(void)
33722 {
33723- int i;
33724+ unsigned int i;
33725 const char *format;
33726
33727 if (ELF_BITS == 64)
33728@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
33729 for (i = 0; i < ehdr.e_shnum; i++) {
33730 struct section *sec = &secs[i];
33731 char *sym_strtab;
33732- int j;
33733+ unsigned int j;
33734
33735 if (sec->shdr.sh_type != SHT_SYMTAB) {
33736 continue;
33737@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
33738
33739 static void print_absolute_relocs(void)
33740 {
33741- int i, printed = 0;
33742+ unsigned int i, printed = 0;
33743 const char *format;
33744
33745 if (ELF_BITS == 64)
33746@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
33747 struct section *sec_applies, *sec_symtab;
33748 char *sym_strtab;
33749 Elf_Sym *sh_symtab;
33750- int j;
33751+ unsigned int j;
33752 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33753 continue;
33754 }
33755@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
33756 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
33757 Elf_Sym *sym, const char *symname))
33758 {
33759- int i;
33760+ unsigned int i;
33761 /* Walk through the relocations */
33762 for (i = 0; i < ehdr.e_shnum; i++) {
33763 char *sym_strtab;
33764 Elf_Sym *sh_symtab;
33765 struct section *sec_applies, *sec_symtab;
33766- int j;
33767+ unsigned int j;
33768 struct section *sec = &secs[i];
33769
33770 if (sec->shdr.sh_type != SHT_REL_TYPE) {
33771@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
33772 {
33773 unsigned r_type = ELF32_R_TYPE(rel->r_info);
33774 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
33775+ char *sym_strtab = sec->link->link->strtab;
33776+
33777+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
33778+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
33779+ return 0;
33780+
33781+#ifdef CONFIG_PAX_KERNEXEC
33782+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
33783+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
33784+ return 0;
33785+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
33786+ return 0;
33787+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
33788+ return 0;
33789+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
33790+ return 0;
33791+#endif
33792
33793 switch (r_type) {
33794 case R_386_NONE:
33795@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
33796
33797 static void emit_relocs(int as_text, int use_real_mode)
33798 {
33799- int i;
33800+ unsigned int i;
33801 int (*write_reloc)(uint32_t, FILE *) = write32;
33802 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
33803 const char *symname);
33804@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
33805 {
33806 regex_init(use_real_mode);
33807 read_ehdr(fp);
33808+ read_phdrs(fp);
33809 read_shdrs(fp);
33810 read_strtabs(fp);
33811 read_symtabs(fp);
33812- read_relocs(fp);
33813+ read_relocs(fp, use_real_mode);
33814 if (ELF_BITS == 64)
33815 percpu_init();
33816 if (show_absolute_syms) {
33817diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
33818index 80ffa5b..a33bd15 100644
33819--- a/arch/x86/um/tls_32.c
33820+++ b/arch/x86/um/tls_32.c
33821@@ -260,7 +260,7 @@ out:
33822 if (unlikely(task == current &&
33823 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
33824 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
33825- "without flushed TLS.", current->pid);
33826+ "without flushed TLS.", task_pid_nr(current));
33827 }
33828
33829 return 0;
33830diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
33831index fd14be1..e3c79c0 100644
33832--- a/arch/x86/vdso/Makefile
33833+++ b/arch/x86/vdso/Makefile
33834@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
33835 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
33836 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
33837
33838-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33839+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33840 GCOV_PROFILE := n
33841
33842 #
33843diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
33844index 0faad64..39ef157 100644
33845--- a/arch/x86/vdso/vdso32-setup.c
33846+++ b/arch/x86/vdso/vdso32-setup.c
33847@@ -25,6 +25,7 @@
33848 #include <asm/tlbflush.h>
33849 #include <asm/vdso.h>
33850 #include <asm/proto.h>
33851+#include <asm/mman.h>
33852
33853 enum {
33854 VDSO_DISABLED = 0,
33855@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
33856 void enable_sep_cpu(void)
33857 {
33858 int cpu = get_cpu();
33859- struct tss_struct *tss = &per_cpu(init_tss, cpu);
33860+ struct tss_struct *tss = init_tss + cpu;
33861
33862 if (!boot_cpu_has(X86_FEATURE_SEP)) {
33863 put_cpu();
33864@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
33865 gate_vma.vm_start = FIXADDR_USER_START;
33866 gate_vma.vm_end = FIXADDR_USER_END;
33867 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
33868- gate_vma.vm_page_prot = __P101;
33869+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
33870
33871 return 0;
33872 }
33873@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33874 if (compat)
33875 addr = VDSO_HIGH_BASE;
33876 else {
33877- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
33878+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
33879 if (IS_ERR_VALUE(addr)) {
33880 ret = addr;
33881 goto up_fail;
33882 }
33883 }
33884
33885- current->mm->context.vdso = (void *)addr;
33886+ current->mm->context.vdso = addr;
33887
33888 if (compat_uses_vma || !compat) {
33889 /*
33890@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33891 }
33892
33893 current_thread_info()->sysenter_return =
33894- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33895+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33896
33897 up_fail:
33898 if (ret)
33899- current->mm->context.vdso = NULL;
33900+ current->mm->context.vdso = 0;
33901
33902 up_write(&mm->mmap_sem);
33903
33904@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
33905
33906 const char *arch_vma_name(struct vm_area_struct *vma)
33907 {
33908- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
33909+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
33910 return "[vdso]";
33911+
33912+#ifdef CONFIG_PAX_SEGMEXEC
33913+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
33914+ return "[vdso]";
33915+#endif
33916+
33917 return NULL;
33918 }
33919
33920@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
33921 * Check to see if the corresponding task was created in compat vdso
33922 * mode.
33923 */
33924- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
33925+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
33926 return &gate_vma;
33927 return NULL;
33928 }
33929diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
33930index 431e875..cbb23f3 100644
33931--- a/arch/x86/vdso/vma.c
33932+++ b/arch/x86/vdso/vma.c
33933@@ -16,8 +16,6 @@
33934 #include <asm/vdso.h>
33935 #include <asm/page.h>
33936
33937-unsigned int __read_mostly vdso_enabled = 1;
33938-
33939 extern char vdso_start[], vdso_end[];
33940 extern unsigned short vdso_sync_cpuid;
33941
33942@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
33943 * unaligned here as a result of stack start randomization.
33944 */
33945 addr = PAGE_ALIGN(addr);
33946- addr = align_vdso_addr(addr);
33947
33948 return addr;
33949 }
33950@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
33951 unsigned size)
33952 {
33953 struct mm_struct *mm = current->mm;
33954- unsigned long addr;
33955+ unsigned long addr = 0;
33956 int ret;
33957
33958- if (!vdso_enabled)
33959- return 0;
33960-
33961 down_write(&mm->mmap_sem);
33962+
33963+#ifdef CONFIG_PAX_RANDMMAP
33964+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
33965+#endif
33966+
33967 addr = vdso_addr(mm->start_stack, size);
33968+ addr = align_vdso_addr(addr);
33969 addr = get_unmapped_area(NULL, addr, size, 0, 0);
33970 if (IS_ERR_VALUE(addr)) {
33971 ret = addr;
33972 goto up_fail;
33973 }
33974
33975- current->mm->context.vdso = (void *)addr;
33976+ mm->context.vdso = addr;
33977
33978 ret = install_special_mapping(mm, addr, size,
33979 VM_READ|VM_EXEC|
33980 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
33981 pages);
33982- if (ret) {
33983- current->mm->context.vdso = NULL;
33984- goto up_fail;
33985- }
33986+ if (ret)
33987+ mm->context.vdso = 0;
33988
33989 up_fail:
33990 up_write(&mm->mmap_sem);
33991@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33992 vdsox32_size);
33993 }
33994 #endif
33995-
33996-static __init int vdso_setup(char *s)
33997-{
33998- vdso_enabled = simple_strtoul(s, NULL, 0);
33999- return 0;
34000-}
34001-__setup("vdso=", vdso_setup);
34002diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
34003index a492be2..08678da 100644
34004--- a/arch/x86/xen/enlighten.c
34005+++ b/arch/x86/xen/enlighten.c
34006@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
34007
34008 struct shared_info xen_dummy_shared_info;
34009
34010-void *xen_initial_gdt;
34011-
34012 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
34013 __read_mostly int xen_have_vector_callback;
34014 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
34015@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
34016 {
34017 unsigned long va = dtr->address;
34018 unsigned int size = dtr->size + 1;
34019- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34020- unsigned long frames[pages];
34021+ unsigned long frames[65536 / PAGE_SIZE];
34022 int f;
34023
34024 /*
34025@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34026 {
34027 unsigned long va = dtr->address;
34028 unsigned int size = dtr->size + 1;
34029- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34030- unsigned long frames[pages];
34031+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
34032 int f;
34033
34034 /*
34035@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34036 * 8-byte entries, or 16 4k pages..
34037 */
34038
34039- BUG_ON(size > 65536);
34040+ BUG_ON(size > GDT_SIZE);
34041 BUG_ON(va & ~PAGE_MASK);
34042
34043 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
34044@@ -985,7 +981,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
34045 return 0;
34046 }
34047
34048-static void set_xen_basic_apic_ops(void)
34049+static void __init set_xen_basic_apic_ops(void)
34050 {
34051 apic->read = xen_apic_read;
34052 apic->write = xen_apic_write;
34053@@ -1290,30 +1286,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
34054 #endif
34055 };
34056
34057-static void xen_reboot(int reason)
34058+static __noreturn void xen_reboot(int reason)
34059 {
34060 struct sched_shutdown r = { .reason = reason };
34061
34062- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
34063- BUG();
34064+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
34065+ BUG();
34066 }
34067
34068-static void xen_restart(char *msg)
34069+static __noreturn void xen_restart(char *msg)
34070 {
34071 xen_reboot(SHUTDOWN_reboot);
34072 }
34073
34074-static void xen_emergency_restart(void)
34075+static __noreturn void xen_emergency_restart(void)
34076 {
34077 xen_reboot(SHUTDOWN_reboot);
34078 }
34079
34080-static void xen_machine_halt(void)
34081+static __noreturn void xen_machine_halt(void)
34082 {
34083 xen_reboot(SHUTDOWN_poweroff);
34084 }
34085
34086-static void xen_machine_power_off(void)
34087+static __noreturn void xen_machine_power_off(void)
34088 {
34089 if (pm_power_off)
34090 pm_power_off();
34091@@ -1464,7 +1460,17 @@ asmlinkage void __init xen_start_kernel(void)
34092 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
34093
34094 /* Work out if we support NX */
34095- x86_configure_nx();
34096+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34097+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
34098+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
34099+ unsigned l, h;
34100+
34101+ __supported_pte_mask |= _PAGE_NX;
34102+ rdmsr(MSR_EFER, l, h);
34103+ l |= EFER_NX;
34104+ wrmsr(MSR_EFER, l, h);
34105+ }
34106+#endif
34107
34108 xen_setup_features();
34109
34110@@ -1495,13 +1501,6 @@ asmlinkage void __init xen_start_kernel(void)
34111
34112 machine_ops = xen_machine_ops;
34113
34114- /*
34115- * The only reliable way to retain the initial address of the
34116- * percpu gdt_page is to remember it here, so we can go and
34117- * mark it RW later, when the initial percpu area is freed.
34118- */
34119- xen_initial_gdt = &per_cpu(gdt_page, 0);
34120-
34121 xen_smp_init();
34122
34123 #ifdef CONFIG_ACPI_NUMA
34124@@ -1700,7 +1699,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
34125 return NOTIFY_OK;
34126 }
34127
34128-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
34129+static struct notifier_block xen_hvm_cpu_notifier = {
34130 .notifier_call = xen_hvm_cpu_notify,
34131 };
34132
34133diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
34134index fdc3ba2..3daee39 100644
34135--- a/arch/x86/xen/mmu.c
34136+++ b/arch/x86/xen/mmu.c
34137@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34138 /* L3_k[510] -> level2_kernel_pgt
34139 * L3_i[511] -> level2_fixmap_pgt */
34140 convert_pfn_mfn(level3_kernel_pgt);
34141+ convert_pfn_mfn(level3_vmalloc_start_pgt);
34142+ convert_pfn_mfn(level3_vmalloc_end_pgt);
34143+ convert_pfn_mfn(level3_vmemmap_pgt);
34144
34145 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
34146 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
34147@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34148 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
34149 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
34150 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
34151+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
34152+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
34153+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
34154 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
34155 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
34156+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
34157 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
34158 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
34159
34160@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
34161 pv_mmu_ops.set_pud = xen_set_pud;
34162 #if PAGETABLE_LEVELS == 4
34163 pv_mmu_ops.set_pgd = xen_set_pgd;
34164+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
34165 #endif
34166
34167 /* This will work as long as patching hasn't happened yet
34168@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
34169 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
34170 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
34171 .set_pgd = xen_set_pgd_hyper,
34172+ .set_pgd_batched = xen_set_pgd_hyper,
34173
34174 .alloc_pud = xen_alloc_pmd_init,
34175 .release_pud = xen_release_pmd_init,
34176diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
34177index d99cae8..18401e1 100644
34178--- a/arch/x86/xen/smp.c
34179+++ b/arch/x86/xen/smp.c
34180@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
34181 {
34182 BUG_ON(smp_processor_id() != 0);
34183 native_smp_prepare_boot_cpu();
34184-
34185- /* We've switched to the "real" per-cpu gdt, so make sure the
34186- old memory can be recycled */
34187- make_lowmem_page_readwrite(xen_initial_gdt);
34188-
34189 xen_filter_cpu_maps();
34190 xen_setup_vcpu_info_placement();
34191 }
34192@@ -314,7 +309,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34193 ctxt->user_regs.ss = __KERNEL_DS;
34194 #ifdef CONFIG_X86_32
34195 ctxt->user_regs.fs = __KERNEL_PERCPU;
34196- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
34197+ savesegment(gs, ctxt->user_regs.gs);
34198 #else
34199 ctxt->gs_base_kernel = per_cpu_offset(cpu);
34200 #endif
34201@@ -324,8 +319,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34202
34203 {
34204 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
34205- ctxt->user_regs.ds = __USER_DS;
34206- ctxt->user_regs.es = __USER_DS;
34207+ ctxt->user_regs.ds = __KERNEL_DS;
34208+ ctxt->user_regs.es = __KERNEL_DS;
34209
34210 xen_copy_trap_info(ctxt->trap_ctxt);
34211
34212@@ -370,13 +365,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
34213 int rc;
34214
34215 per_cpu(current_task, cpu) = idle;
34216+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
34217 #ifdef CONFIG_X86_32
34218 irq_ctx_init(cpu);
34219 #else
34220 clear_tsk_thread_flag(idle, TIF_FORK);
34221- per_cpu(kernel_stack, cpu) =
34222- (unsigned long)task_stack_page(idle) -
34223- KERNEL_STACK_OFFSET + THREAD_SIZE;
34224+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
34225 #endif
34226 xen_setup_runstate_info(cpu);
34227 xen_setup_timer(cpu);
34228@@ -651,7 +645,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
34229
34230 void __init xen_smp_init(void)
34231 {
34232- smp_ops = xen_smp_ops;
34233+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
34234 xen_fill_possible_map();
34235 xen_init_spinlocks();
34236 }
34237diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
34238index 33ca6e4..0ded929 100644
34239--- a/arch/x86/xen/xen-asm_32.S
34240+++ b/arch/x86/xen/xen-asm_32.S
34241@@ -84,14 +84,14 @@ ENTRY(xen_iret)
34242 ESP_OFFSET=4 # bytes pushed onto stack
34243
34244 /*
34245- * Store vcpu_info pointer for easy access. Do it this way to
34246- * avoid having to reload %fs
34247+ * Store vcpu_info pointer for easy access.
34248 */
34249 #ifdef CONFIG_SMP
34250- GET_THREAD_INFO(%eax)
34251- movl %ss:TI_cpu(%eax), %eax
34252- movl %ss:__per_cpu_offset(,%eax,4), %eax
34253- mov %ss:xen_vcpu(%eax), %eax
34254+ push %fs
34255+ mov $(__KERNEL_PERCPU), %eax
34256+ mov %eax, %fs
34257+ mov PER_CPU_VAR(xen_vcpu), %eax
34258+ pop %fs
34259 #else
34260 movl %ss:xen_vcpu, %eax
34261 #endif
34262diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
34263index 7faed58..ba4427c 100644
34264--- a/arch/x86/xen/xen-head.S
34265+++ b/arch/x86/xen/xen-head.S
34266@@ -19,6 +19,17 @@ ENTRY(startup_xen)
34267 #ifdef CONFIG_X86_32
34268 mov %esi,xen_start_info
34269 mov $init_thread_union+THREAD_SIZE,%esp
34270+#ifdef CONFIG_SMP
34271+ movl $cpu_gdt_table,%edi
34272+ movl $__per_cpu_load,%eax
34273+ movw %ax,__KERNEL_PERCPU + 2(%edi)
34274+ rorl $16,%eax
34275+ movb %al,__KERNEL_PERCPU + 4(%edi)
34276+ movb %ah,__KERNEL_PERCPU + 7(%edi)
34277+ movl $__per_cpu_end - 1,%eax
34278+ subl $__per_cpu_start,%eax
34279+ movw %ax,__KERNEL_PERCPU + 0(%edi)
34280+#endif
34281 #else
34282 mov %rsi,xen_start_info
34283 mov $init_thread_union+THREAD_SIZE,%rsp
34284diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
34285index a95b417..b6dbd0b 100644
34286--- a/arch/x86/xen/xen-ops.h
34287+++ b/arch/x86/xen/xen-ops.h
34288@@ -10,8 +10,6 @@
34289 extern const char xen_hypervisor_callback[];
34290 extern const char xen_failsafe_callback[];
34291
34292-extern void *xen_initial_gdt;
34293-
34294 struct trap_info;
34295 void xen_copy_trap_info(struct trap_info *traps);
34296
34297diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
34298index 525bd3d..ef888b1 100644
34299--- a/arch/xtensa/variants/dc232b/include/variant/core.h
34300+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
34301@@ -119,9 +119,9 @@
34302 ----------------------------------------------------------------------*/
34303
34304 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
34305-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
34306 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
34307 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
34308+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34309
34310 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
34311 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
34312diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
34313index 2f33760..835e50a 100644
34314--- a/arch/xtensa/variants/fsf/include/variant/core.h
34315+++ b/arch/xtensa/variants/fsf/include/variant/core.h
34316@@ -11,6 +11,7 @@
34317 #ifndef _XTENSA_CORE_H
34318 #define _XTENSA_CORE_H
34319
34320+#include <linux/const.h>
34321
34322 /****************************************************************************
34323 Parameters Useful for Any Code, USER or PRIVILEGED
34324@@ -112,9 +113,9 @@
34325 ----------------------------------------------------------------------*/
34326
34327 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34328-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34329 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34330 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34331+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34332
34333 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
34334 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
34335diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
34336index af00795..2bb8105 100644
34337--- a/arch/xtensa/variants/s6000/include/variant/core.h
34338+++ b/arch/xtensa/variants/s6000/include/variant/core.h
34339@@ -11,6 +11,7 @@
34340 #ifndef _XTENSA_CORE_CONFIGURATION_H
34341 #define _XTENSA_CORE_CONFIGURATION_H
34342
34343+#include <linux/const.h>
34344
34345 /****************************************************************************
34346 Parameters Useful for Any Code, USER or PRIVILEGED
34347@@ -118,9 +119,9 @@
34348 ----------------------------------------------------------------------*/
34349
34350 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34351-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34352 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34353 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34354+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34355
34356 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
34357 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
34358diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
34359index 58916af..eb9dbcf6 100644
34360--- a/block/blk-iopoll.c
34361+++ b/block/blk-iopoll.c
34362@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
34363 }
34364 EXPORT_SYMBOL(blk_iopoll_complete);
34365
34366-static void blk_iopoll_softirq(struct softirq_action *h)
34367+static void blk_iopoll_softirq(void)
34368 {
34369 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
34370 int rearm = 0, budget = blk_iopoll_budget;
34371@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
34372 return NOTIFY_OK;
34373 }
34374
34375-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
34376+static struct notifier_block blk_iopoll_cpu_notifier = {
34377 .notifier_call = blk_iopoll_cpu_notify,
34378 };
34379
34380diff --git a/block/blk-map.c b/block/blk-map.c
34381index 623e1cd..ca1e109 100644
34382--- a/block/blk-map.c
34383+++ b/block/blk-map.c
34384@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
34385 if (!len || !kbuf)
34386 return -EINVAL;
34387
34388- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
34389+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
34390 if (do_copy)
34391 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
34392 else
34393diff --git a/block/blk-softirq.c b/block/blk-softirq.c
34394index 467c8de..f3628c5 100644
34395--- a/block/blk-softirq.c
34396+++ b/block/blk-softirq.c
34397@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
34398 * Softirq action handler - move entries to local list and loop over them
34399 * while passing them to the queue registered handler.
34400 */
34401-static void blk_done_softirq(struct softirq_action *h)
34402+static void blk_done_softirq(void)
34403 {
34404 struct list_head *cpu_list, local_list;
34405
34406@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
34407 return NOTIFY_OK;
34408 }
34409
34410-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
34411+static struct notifier_block blk_cpu_notifier = {
34412 .notifier_call = blk_cpu_notify,
34413 };
34414
34415diff --git a/block/bsg.c b/block/bsg.c
34416index 420a5a9..23834aa 100644
34417--- a/block/bsg.c
34418+++ b/block/bsg.c
34419@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
34420 struct sg_io_v4 *hdr, struct bsg_device *bd,
34421 fmode_t has_write_perm)
34422 {
34423+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34424+ unsigned char *cmdptr;
34425+
34426 if (hdr->request_len > BLK_MAX_CDB) {
34427 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
34428 if (!rq->cmd)
34429 return -ENOMEM;
34430- }
34431+ cmdptr = rq->cmd;
34432+ } else
34433+ cmdptr = tmpcmd;
34434
34435- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
34436+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
34437 hdr->request_len))
34438 return -EFAULT;
34439
34440+ if (cmdptr != rq->cmd)
34441+ memcpy(rq->cmd, cmdptr, hdr->request_len);
34442+
34443 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
34444 if (blk_verify_command(rq->cmd, has_write_perm))
34445 return -EPERM;
34446diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
34447index 7c668c8..db3521c 100644
34448--- a/block/compat_ioctl.c
34449+++ b/block/compat_ioctl.c
34450@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
34451 err |= __get_user(f->spec1, &uf->spec1);
34452 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
34453 err |= __get_user(name, &uf->name);
34454- f->name = compat_ptr(name);
34455+ f->name = (void __force_kernel *)compat_ptr(name);
34456 if (err) {
34457 err = -EFAULT;
34458 goto out;
34459diff --git a/block/genhd.c b/block/genhd.c
34460index cdeb527..10aa34db 100644
34461--- a/block/genhd.c
34462+++ b/block/genhd.c
34463@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
34464
34465 /*
34466 * Register device numbers dev..(dev+range-1)
34467- * range must be nonzero
34468+ * Noop if @range is zero.
34469 * The hash chain is sorted on range, so that subranges can override.
34470 */
34471 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
34472 struct kobject *(*probe)(dev_t, int *, void *),
34473 int (*lock)(dev_t, void *), void *data)
34474 {
34475- kobj_map(bdev_map, devt, range, module, probe, lock, data);
34476+ if (range)
34477+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
34478 }
34479
34480 EXPORT_SYMBOL(blk_register_region);
34481
34482+/* undo blk_register_region(), noop if @range is zero */
34483 void blk_unregister_region(dev_t devt, unsigned long range)
34484 {
34485- kobj_unmap(bdev_map, devt, range);
34486+ if (range)
34487+ kobj_unmap(bdev_map, devt, range);
34488 }
34489
34490 EXPORT_SYMBOL(blk_unregister_region);
34491diff --git a/block/partitions/efi.c b/block/partitions/efi.c
34492index c85fc89..51e690b 100644
34493--- a/block/partitions/efi.c
34494+++ b/block/partitions/efi.c
34495@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
34496 if (!gpt)
34497 return NULL;
34498
34499+ if (!le32_to_cpu(gpt->num_partition_entries))
34500+ return NULL;
34501+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
34502+ if (!pte)
34503+ return NULL;
34504+
34505 count = le32_to_cpu(gpt->num_partition_entries) *
34506 le32_to_cpu(gpt->sizeof_partition_entry);
34507- if (!count)
34508- return NULL;
34509- pte = kmalloc(count, GFP_KERNEL);
34510- if (!pte)
34511- return NULL;
34512-
34513 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
34514 (u8 *) pte,
34515 count) < count) {
34516diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
34517index a5ffcc9..3cedc9c 100644
34518--- a/block/scsi_ioctl.c
34519+++ b/block/scsi_ioctl.c
34520@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
34521 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
34522 struct sg_io_hdr *hdr, fmode_t mode)
34523 {
34524- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
34525+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34526+ unsigned char *cmdptr;
34527+
34528+ if (rq->cmd != rq->__cmd)
34529+ cmdptr = rq->cmd;
34530+ else
34531+ cmdptr = tmpcmd;
34532+
34533+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
34534 return -EFAULT;
34535+
34536+ if (cmdptr != rq->cmd)
34537+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
34538+
34539 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
34540 return -EPERM;
34541
34542@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
34543 int err;
34544 unsigned int in_len, out_len, bytes, opcode, cmdlen;
34545 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
34546+ unsigned char tmpcmd[sizeof(rq->__cmd)];
34547+ unsigned char *cmdptr;
34548
34549 if (!sic)
34550 return -EINVAL;
34551@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
34552 */
34553 err = -EFAULT;
34554 rq->cmd_len = cmdlen;
34555- if (copy_from_user(rq->cmd, sic->data, cmdlen))
34556+
34557+ if (rq->cmd != rq->__cmd)
34558+ cmdptr = rq->cmd;
34559+ else
34560+ cmdptr = tmpcmd;
34561+
34562+ if (copy_from_user(cmdptr, sic->data, cmdlen))
34563 goto error;
34564
34565+ if (rq->cmd != cmdptr)
34566+ memcpy(rq->cmd, cmdptr, cmdlen);
34567+
34568 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
34569 goto error;
34570
34571diff --git a/crypto/cryptd.c b/crypto/cryptd.c
34572index 7bdd61b..afec999 100644
34573--- a/crypto/cryptd.c
34574+++ b/crypto/cryptd.c
34575@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
34576
34577 struct cryptd_blkcipher_request_ctx {
34578 crypto_completion_t complete;
34579-};
34580+} __no_const;
34581
34582 struct cryptd_hash_ctx {
34583 struct crypto_shash *child;
34584@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
34585
34586 struct cryptd_aead_request_ctx {
34587 crypto_completion_t complete;
34588-};
34589+} __no_const;
34590
34591 static void cryptd_queue_worker(struct work_struct *work);
34592
34593diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
34594index b2c99dc..476c9fb 100644
34595--- a/crypto/pcrypt.c
34596+++ b/crypto/pcrypt.c
34597@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
34598 int ret;
34599
34600 pinst->kobj.kset = pcrypt_kset;
34601- ret = kobject_add(&pinst->kobj, NULL, name);
34602+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
34603 if (!ret)
34604 kobject_uevent(&pinst->kobj, KOBJ_ADD);
34605
34606@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
34607
34608 get_online_cpus();
34609
34610- pcrypt->wq = alloc_workqueue(name,
34611- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
34612+ pcrypt->wq = alloc_workqueue("%s",
34613+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
34614 if (!pcrypt->wq)
34615 goto err;
34616
34617diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
34618index f220d64..d359ad6 100644
34619--- a/drivers/acpi/apei/apei-internal.h
34620+++ b/drivers/acpi/apei/apei-internal.h
34621@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
34622 struct apei_exec_ins_type {
34623 u32 flags;
34624 apei_exec_ins_func_t run;
34625-};
34626+} __do_const;
34627
34628 struct apei_exec_context {
34629 u32 ip;
34630diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
34631index 33dc6a0..4b24b47 100644
34632--- a/drivers/acpi/apei/cper.c
34633+++ b/drivers/acpi/apei/cper.c
34634@@ -39,12 +39,12 @@
34635 */
34636 u64 cper_next_record_id(void)
34637 {
34638- static atomic64_t seq;
34639+ static atomic64_unchecked_t seq;
34640
34641- if (!atomic64_read(&seq))
34642- atomic64_set(&seq, ((u64)get_seconds()) << 32);
34643+ if (!atomic64_read_unchecked(&seq))
34644+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
34645
34646- return atomic64_inc_return(&seq);
34647+ return atomic64_inc_return_unchecked(&seq);
34648 }
34649 EXPORT_SYMBOL_GPL(cper_next_record_id);
34650
34651diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
34652index be60399..778b33e8 100644
34653--- a/drivers/acpi/bgrt.c
34654+++ b/drivers/acpi/bgrt.c
34655@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
34656 return -ENODEV;
34657
34658 sysfs_bin_attr_init(&image_attr);
34659- image_attr.private = bgrt_image;
34660- image_attr.size = bgrt_image_size;
34661+ pax_open_kernel();
34662+ *(void **)&image_attr.private = bgrt_image;
34663+ *(size_t *)&image_attr.size = bgrt_image_size;
34664+ pax_close_kernel();
34665
34666 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
34667 if (!bgrt_kobj)
34668diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
34669index cb96296..b81293b 100644
34670--- a/drivers/acpi/blacklist.c
34671+++ b/drivers/acpi/blacklist.c
34672@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
34673 u32 is_critical_error;
34674 };
34675
34676-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
34677+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
34678
34679 /*
34680 * POLICY: If *anything* doesn't work, put it on the blacklist.
34681@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
34682 return 0;
34683 }
34684
34685-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
34686+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
34687 {
34688 .callback = dmi_disable_osi_vista,
34689 .ident = "Fujitsu Siemens",
34690diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
34691index 7586544..636a2f0 100644
34692--- a/drivers/acpi/ec_sys.c
34693+++ b/drivers/acpi/ec_sys.c
34694@@ -12,6 +12,7 @@
34695 #include <linux/acpi.h>
34696 #include <linux/debugfs.h>
34697 #include <linux/module.h>
34698+#include <linux/uaccess.h>
34699 #include "internal.h"
34700
34701 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
34702@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
34703 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
34704 */
34705 unsigned int size = EC_SPACE_SIZE;
34706- u8 *data = (u8 *) buf;
34707+ u8 data;
34708 loff_t init_off = *off;
34709 int err = 0;
34710
34711@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
34712 size = count;
34713
34714 while (size) {
34715- err = ec_read(*off, &data[*off - init_off]);
34716+ err = ec_read(*off, &data);
34717 if (err)
34718 return err;
34719+ if (put_user(data, &buf[*off - init_off]))
34720+ return -EFAULT;
34721 *off += 1;
34722 size--;
34723 }
34724@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
34725
34726 unsigned int size = count;
34727 loff_t init_off = *off;
34728- u8 *data = (u8 *) buf;
34729 int err = 0;
34730
34731 if (*off >= EC_SPACE_SIZE)
34732@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
34733 }
34734
34735 while (size) {
34736- u8 byte_write = data[*off - init_off];
34737+ u8 byte_write;
34738+ if (get_user(byte_write, &buf[*off - init_off]))
34739+ return -EFAULT;
34740 err = ec_write(*off, byte_write);
34741 if (err)
34742 return err;
34743diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
34744index eb133c7..f571552 100644
34745--- a/drivers/acpi/processor_idle.c
34746+++ b/drivers/acpi/processor_idle.c
34747@@ -994,7 +994,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
34748 {
34749 int i, count = CPUIDLE_DRIVER_STATE_START;
34750 struct acpi_processor_cx *cx;
34751- struct cpuidle_state *state;
34752+ cpuidle_state_no_const *state;
34753 struct cpuidle_driver *drv = &acpi_idle_driver;
34754
34755 if (!pr->flags.power_setup_done)
34756diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
34757index fcae5fa..e9f71ea 100644
34758--- a/drivers/acpi/sysfs.c
34759+++ b/drivers/acpi/sysfs.c
34760@@ -423,11 +423,11 @@ static u32 num_counters;
34761 static struct attribute **all_attrs;
34762 static u32 acpi_gpe_count;
34763
34764-static struct attribute_group interrupt_stats_attr_group = {
34765+static attribute_group_no_const interrupt_stats_attr_group = {
34766 .name = "interrupts",
34767 };
34768
34769-static struct kobj_attribute *counter_attrs;
34770+static kobj_attribute_no_const *counter_attrs;
34771
34772 static void delete_gpe_attr_array(void)
34773 {
34774diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
34775index 7b9bdd8..37638ca 100644
34776--- a/drivers/ata/libahci.c
34777+++ b/drivers/ata/libahci.c
34778@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
34779 }
34780 EXPORT_SYMBOL_GPL(ahci_kick_engine);
34781
34782-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
34783+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
34784 struct ata_taskfile *tf, int is_cmd, u16 flags,
34785 unsigned long timeout_msec)
34786 {
34787diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
34788index adf002a..39bb8f9 100644
34789--- a/drivers/ata/libata-core.c
34790+++ b/drivers/ata/libata-core.c
34791@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
34792 struct ata_port *ap;
34793 unsigned int tag;
34794
34795- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34796+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34797 ap = qc->ap;
34798
34799 qc->flags = 0;
34800@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
34801 struct ata_port *ap;
34802 struct ata_link *link;
34803
34804- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34805+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
34806 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
34807 ap = qc->ap;
34808 link = qc->dev->link;
34809@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34810 return;
34811
34812 spin_lock(&lock);
34813+ pax_open_kernel();
34814
34815 for (cur = ops->inherits; cur; cur = cur->inherits) {
34816 void **inherit = (void **)cur;
34817@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34818 if (IS_ERR(*pp))
34819 *pp = NULL;
34820
34821- ops->inherits = NULL;
34822+ *(struct ata_port_operations **)&ops->inherits = NULL;
34823
34824+ pax_close_kernel();
34825 spin_unlock(&lock);
34826 }
34827
34828diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
34829index 7638121..357a965 100644
34830--- a/drivers/ata/pata_arasan_cf.c
34831+++ b/drivers/ata/pata_arasan_cf.c
34832@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
34833 /* Handle platform specific quirks */
34834 if (quirk) {
34835 if (quirk & CF_BROKEN_PIO) {
34836- ap->ops->set_piomode = NULL;
34837+ pax_open_kernel();
34838+ *(void **)&ap->ops->set_piomode = NULL;
34839+ pax_close_kernel();
34840 ap->pio_mask = 0;
34841 }
34842 if (quirk & CF_BROKEN_MWDMA)
34843diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
34844index f9b983a..887b9d8 100644
34845--- a/drivers/atm/adummy.c
34846+++ b/drivers/atm/adummy.c
34847@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
34848 vcc->pop(vcc, skb);
34849 else
34850 dev_kfree_skb_any(skb);
34851- atomic_inc(&vcc->stats->tx);
34852+ atomic_inc_unchecked(&vcc->stats->tx);
34853
34854 return 0;
34855 }
34856diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
34857index 77a7480d..05cde58 100644
34858--- a/drivers/atm/ambassador.c
34859+++ b/drivers/atm/ambassador.c
34860@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
34861 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
34862
34863 // VC layer stats
34864- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34865+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34866
34867 // free the descriptor
34868 kfree (tx_descr);
34869@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34870 dump_skb ("<<<", vc, skb);
34871
34872 // VC layer stats
34873- atomic_inc(&atm_vcc->stats->rx);
34874+ atomic_inc_unchecked(&atm_vcc->stats->rx);
34875 __net_timestamp(skb);
34876 // end of our responsibility
34877 atm_vcc->push (atm_vcc, skb);
34878@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34879 } else {
34880 PRINTK (KERN_INFO, "dropped over-size frame");
34881 // should we count this?
34882- atomic_inc(&atm_vcc->stats->rx_drop);
34883+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34884 }
34885
34886 } else {
34887@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
34888 }
34889
34890 if (check_area (skb->data, skb->len)) {
34891- atomic_inc(&atm_vcc->stats->tx_err);
34892+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
34893 return -ENOMEM; // ?
34894 }
34895
34896diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
34897index 0e3f8f9..765a7a5 100644
34898--- a/drivers/atm/atmtcp.c
34899+++ b/drivers/atm/atmtcp.c
34900@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34901 if (vcc->pop) vcc->pop(vcc,skb);
34902 else dev_kfree_skb(skb);
34903 if (dev_data) return 0;
34904- atomic_inc(&vcc->stats->tx_err);
34905+ atomic_inc_unchecked(&vcc->stats->tx_err);
34906 return -ENOLINK;
34907 }
34908 size = skb->len+sizeof(struct atmtcp_hdr);
34909@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34910 if (!new_skb) {
34911 if (vcc->pop) vcc->pop(vcc,skb);
34912 else dev_kfree_skb(skb);
34913- atomic_inc(&vcc->stats->tx_err);
34914+ atomic_inc_unchecked(&vcc->stats->tx_err);
34915 return -ENOBUFS;
34916 }
34917 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
34918@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34919 if (vcc->pop) vcc->pop(vcc,skb);
34920 else dev_kfree_skb(skb);
34921 out_vcc->push(out_vcc,new_skb);
34922- atomic_inc(&vcc->stats->tx);
34923- atomic_inc(&out_vcc->stats->rx);
34924+ atomic_inc_unchecked(&vcc->stats->tx);
34925+ atomic_inc_unchecked(&out_vcc->stats->rx);
34926 return 0;
34927 }
34928
34929@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34930 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
34931 read_unlock(&vcc_sklist_lock);
34932 if (!out_vcc) {
34933- atomic_inc(&vcc->stats->tx_err);
34934+ atomic_inc_unchecked(&vcc->stats->tx_err);
34935 goto done;
34936 }
34937 skb_pull(skb,sizeof(struct atmtcp_hdr));
34938@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34939 __net_timestamp(new_skb);
34940 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
34941 out_vcc->push(out_vcc,new_skb);
34942- atomic_inc(&vcc->stats->tx);
34943- atomic_inc(&out_vcc->stats->rx);
34944+ atomic_inc_unchecked(&vcc->stats->tx);
34945+ atomic_inc_unchecked(&out_vcc->stats->rx);
34946 done:
34947 if (vcc->pop) vcc->pop(vcc,skb);
34948 else dev_kfree_skb(skb);
34949diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
34950index b1955ba..b179940 100644
34951--- a/drivers/atm/eni.c
34952+++ b/drivers/atm/eni.c
34953@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
34954 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
34955 vcc->dev->number);
34956 length = 0;
34957- atomic_inc(&vcc->stats->rx_err);
34958+ atomic_inc_unchecked(&vcc->stats->rx_err);
34959 }
34960 else {
34961 length = ATM_CELL_SIZE-1; /* no HEC */
34962@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34963 size);
34964 }
34965 eff = length = 0;
34966- atomic_inc(&vcc->stats->rx_err);
34967+ atomic_inc_unchecked(&vcc->stats->rx_err);
34968 }
34969 else {
34970 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
34971@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34972 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
34973 vcc->dev->number,vcc->vci,length,size << 2,descr);
34974 length = eff = 0;
34975- atomic_inc(&vcc->stats->rx_err);
34976+ atomic_inc_unchecked(&vcc->stats->rx_err);
34977 }
34978 }
34979 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
34980@@ -767,7 +767,7 @@ rx_dequeued++;
34981 vcc->push(vcc,skb);
34982 pushed++;
34983 }
34984- atomic_inc(&vcc->stats->rx);
34985+ atomic_inc_unchecked(&vcc->stats->rx);
34986 }
34987 wake_up(&eni_dev->rx_wait);
34988 }
34989@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
34990 PCI_DMA_TODEVICE);
34991 if (vcc->pop) vcc->pop(vcc,skb);
34992 else dev_kfree_skb_irq(skb);
34993- atomic_inc(&vcc->stats->tx);
34994+ atomic_inc_unchecked(&vcc->stats->tx);
34995 wake_up(&eni_dev->tx_wait);
34996 dma_complete++;
34997 }
34998diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
34999index b41c948..a002b17 100644
35000--- a/drivers/atm/firestream.c
35001+++ b/drivers/atm/firestream.c
35002@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
35003 }
35004 }
35005
35006- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35007+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35008
35009 fs_dprintk (FS_DEBUG_TXMEM, "i");
35010 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
35011@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35012 #endif
35013 skb_put (skb, qe->p1 & 0xffff);
35014 ATM_SKB(skb)->vcc = atm_vcc;
35015- atomic_inc(&atm_vcc->stats->rx);
35016+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35017 __net_timestamp(skb);
35018 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
35019 atm_vcc->push (atm_vcc, skb);
35020@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35021 kfree (pe);
35022 }
35023 if (atm_vcc)
35024- atomic_inc(&atm_vcc->stats->rx_drop);
35025+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35026 break;
35027 case 0x1f: /* Reassembly abort: no buffers. */
35028 /* Silently increment error counter. */
35029 if (atm_vcc)
35030- atomic_inc(&atm_vcc->stats->rx_drop);
35031+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35032 break;
35033 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
35034 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
35035diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
35036index 204814e..cede831 100644
35037--- a/drivers/atm/fore200e.c
35038+++ b/drivers/atm/fore200e.c
35039@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
35040 #endif
35041 /* check error condition */
35042 if (*entry->status & STATUS_ERROR)
35043- atomic_inc(&vcc->stats->tx_err);
35044+ atomic_inc_unchecked(&vcc->stats->tx_err);
35045 else
35046- atomic_inc(&vcc->stats->tx);
35047+ atomic_inc_unchecked(&vcc->stats->tx);
35048 }
35049 }
35050
35051@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35052 if (skb == NULL) {
35053 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
35054
35055- atomic_inc(&vcc->stats->rx_drop);
35056+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35057 return -ENOMEM;
35058 }
35059
35060@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35061
35062 dev_kfree_skb_any(skb);
35063
35064- atomic_inc(&vcc->stats->rx_drop);
35065+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35066 return -ENOMEM;
35067 }
35068
35069 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35070
35071 vcc->push(vcc, skb);
35072- atomic_inc(&vcc->stats->rx);
35073+ atomic_inc_unchecked(&vcc->stats->rx);
35074
35075 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35076
35077@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
35078 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
35079 fore200e->atm_dev->number,
35080 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
35081- atomic_inc(&vcc->stats->rx_err);
35082+ atomic_inc_unchecked(&vcc->stats->rx_err);
35083 }
35084 }
35085
35086@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
35087 goto retry_here;
35088 }
35089
35090- atomic_inc(&vcc->stats->tx_err);
35091+ atomic_inc_unchecked(&vcc->stats->tx_err);
35092
35093 fore200e->tx_sat++;
35094 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
35095diff --git a/drivers/atm/he.c b/drivers/atm/he.c
35096index 507362a..a845e57 100644
35097--- a/drivers/atm/he.c
35098+++ b/drivers/atm/he.c
35099@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35100
35101 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
35102 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
35103- atomic_inc(&vcc->stats->rx_drop);
35104+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35105 goto return_host_buffers;
35106 }
35107
35108@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35109 RBRQ_LEN_ERR(he_dev->rbrq_head)
35110 ? "LEN_ERR" : "",
35111 vcc->vpi, vcc->vci);
35112- atomic_inc(&vcc->stats->rx_err);
35113+ atomic_inc_unchecked(&vcc->stats->rx_err);
35114 goto return_host_buffers;
35115 }
35116
35117@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35118 vcc->push(vcc, skb);
35119 spin_lock(&he_dev->global_lock);
35120
35121- atomic_inc(&vcc->stats->rx);
35122+ atomic_inc_unchecked(&vcc->stats->rx);
35123
35124 return_host_buffers:
35125 ++pdus_assembled;
35126@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
35127 tpd->vcc->pop(tpd->vcc, tpd->skb);
35128 else
35129 dev_kfree_skb_any(tpd->skb);
35130- atomic_inc(&tpd->vcc->stats->tx_err);
35131+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
35132 }
35133 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
35134 return;
35135@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35136 vcc->pop(vcc, skb);
35137 else
35138 dev_kfree_skb_any(skb);
35139- atomic_inc(&vcc->stats->tx_err);
35140+ atomic_inc_unchecked(&vcc->stats->tx_err);
35141 return -EINVAL;
35142 }
35143
35144@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35145 vcc->pop(vcc, skb);
35146 else
35147 dev_kfree_skb_any(skb);
35148- atomic_inc(&vcc->stats->tx_err);
35149+ atomic_inc_unchecked(&vcc->stats->tx_err);
35150 return -EINVAL;
35151 }
35152 #endif
35153@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35154 vcc->pop(vcc, skb);
35155 else
35156 dev_kfree_skb_any(skb);
35157- atomic_inc(&vcc->stats->tx_err);
35158+ atomic_inc_unchecked(&vcc->stats->tx_err);
35159 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35160 return -ENOMEM;
35161 }
35162@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35163 vcc->pop(vcc, skb);
35164 else
35165 dev_kfree_skb_any(skb);
35166- atomic_inc(&vcc->stats->tx_err);
35167+ atomic_inc_unchecked(&vcc->stats->tx_err);
35168 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35169 return -ENOMEM;
35170 }
35171@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35172 __enqueue_tpd(he_dev, tpd, cid);
35173 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35174
35175- atomic_inc(&vcc->stats->tx);
35176+ atomic_inc_unchecked(&vcc->stats->tx);
35177
35178 return 0;
35179 }
35180diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
35181index 1dc0519..1aadaf7 100644
35182--- a/drivers/atm/horizon.c
35183+++ b/drivers/atm/horizon.c
35184@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
35185 {
35186 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
35187 // VC layer stats
35188- atomic_inc(&vcc->stats->rx);
35189+ atomic_inc_unchecked(&vcc->stats->rx);
35190 __net_timestamp(skb);
35191 // end of our responsibility
35192 vcc->push (vcc, skb);
35193@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
35194 dev->tx_iovec = NULL;
35195
35196 // VC layer stats
35197- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35198+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35199
35200 // free the skb
35201 hrz_kfree_skb (skb);
35202diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
35203index 272f009..a18ba55 100644
35204--- a/drivers/atm/idt77252.c
35205+++ b/drivers/atm/idt77252.c
35206@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
35207 else
35208 dev_kfree_skb(skb);
35209
35210- atomic_inc(&vcc->stats->tx);
35211+ atomic_inc_unchecked(&vcc->stats->tx);
35212 }
35213
35214 atomic_dec(&scq->used);
35215@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35216 if ((sb = dev_alloc_skb(64)) == NULL) {
35217 printk("%s: Can't allocate buffers for aal0.\n",
35218 card->name);
35219- atomic_add(i, &vcc->stats->rx_drop);
35220+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
35221 break;
35222 }
35223 if (!atm_charge(vcc, sb->truesize)) {
35224 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
35225 card->name);
35226- atomic_add(i - 1, &vcc->stats->rx_drop);
35227+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
35228 dev_kfree_skb(sb);
35229 break;
35230 }
35231@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35232 ATM_SKB(sb)->vcc = vcc;
35233 __net_timestamp(sb);
35234 vcc->push(vcc, sb);
35235- atomic_inc(&vcc->stats->rx);
35236+ atomic_inc_unchecked(&vcc->stats->rx);
35237
35238 cell += ATM_CELL_PAYLOAD;
35239 }
35240@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35241 "(CDC: %08x)\n",
35242 card->name, len, rpp->len, readl(SAR_REG_CDC));
35243 recycle_rx_pool_skb(card, rpp);
35244- atomic_inc(&vcc->stats->rx_err);
35245+ atomic_inc_unchecked(&vcc->stats->rx_err);
35246 return;
35247 }
35248 if (stat & SAR_RSQE_CRC) {
35249 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
35250 recycle_rx_pool_skb(card, rpp);
35251- atomic_inc(&vcc->stats->rx_err);
35252+ atomic_inc_unchecked(&vcc->stats->rx_err);
35253 return;
35254 }
35255 if (skb_queue_len(&rpp->queue) > 1) {
35256@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35257 RXPRINTK("%s: Can't alloc RX skb.\n",
35258 card->name);
35259 recycle_rx_pool_skb(card, rpp);
35260- atomic_inc(&vcc->stats->rx_err);
35261+ atomic_inc_unchecked(&vcc->stats->rx_err);
35262 return;
35263 }
35264 if (!atm_charge(vcc, skb->truesize)) {
35265@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35266 __net_timestamp(skb);
35267
35268 vcc->push(vcc, skb);
35269- atomic_inc(&vcc->stats->rx);
35270+ atomic_inc_unchecked(&vcc->stats->rx);
35271
35272 return;
35273 }
35274@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35275 __net_timestamp(skb);
35276
35277 vcc->push(vcc, skb);
35278- atomic_inc(&vcc->stats->rx);
35279+ atomic_inc_unchecked(&vcc->stats->rx);
35280
35281 if (skb->truesize > SAR_FB_SIZE_3)
35282 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
35283@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
35284 if (vcc->qos.aal != ATM_AAL0) {
35285 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
35286 card->name, vpi, vci);
35287- atomic_inc(&vcc->stats->rx_drop);
35288+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35289 goto drop;
35290 }
35291
35292 if ((sb = dev_alloc_skb(64)) == NULL) {
35293 printk("%s: Can't allocate buffers for AAL0.\n",
35294 card->name);
35295- atomic_inc(&vcc->stats->rx_err);
35296+ atomic_inc_unchecked(&vcc->stats->rx_err);
35297 goto drop;
35298 }
35299
35300@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
35301 ATM_SKB(sb)->vcc = vcc;
35302 __net_timestamp(sb);
35303 vcc->push(vcc, sb);
35304- atomic_inc(&vcc->stats->rx);
35305+ atomic_inc_unchecked(&vcc->stats->rx);
35306
35307 drop:
35308 skb_pull(queue, 64);
35309@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35310
35311 if (vc == NULL) {
35312 printk("%s: NULL connection in send().\n", card->name);
35313- atomic_inc(&vcc->stats->tx_err);
35314+ atomic_inc_unchecked(&vcc->stats->tx_err);
35315 dev_kfree_skb(skb);
35316 return -EINVAL;
35317 }
35318 if (!test_bit(VCF_TX, &vc->flags)) {
35319 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
35320- atomic_inc(&vcc->stats->tx_err);
35321+ atomic_inc_unchecked(&vcc->stats->tx_err);
35322 dev_kfree_skb(skb);
35323 return -EINVAL;
35324 }
35325@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35326 break;
35327 default:
35328 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
35329- atomic_inc(&vcc->stats->tx_err);
35330+ atomic_inc_unchecked(&vcc->stats->tx_err);
35331 dev_kfree_skb(skb);
35332 return -EINVAL;
35333 }
35334
35335 if (skb_shinfo(skb)->nr_frags != 0) {
35336 printk("%s: No scatter-gather yet.\n", card->name);
35337- atomic_inc(&vcc->stats->tx_err);
35338+ atomic_inc_unchecked(&vcc->stats->tx_err);
35339 dev_kfree_skb(skb);
35340 return -EINVAL;
35341 }
35342@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35343
35344 err = queue_skb(card, vc, skb, oam);
35345 if (err) {
35346- atomic_inc(&vcc->stats->tx_err);
35347+ atomic_inc_unchecked(&vcc->stats->tx_err);
35348 dev_kfree_skb(skb);
35349 return err;
35350 }
35351@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
35352 skb = dev_alloc_skb(64);
35353 if (!skb) {
35354 printk("%s: Out of memory in send_oam().\n", card->name);
35355- atomic_inc(&vcc->stats->tx_err);
35356+ atomic_inc_unchecked(&vcc->stats->tx_err);
35357 return -ENOMEM;
35358 }
35359 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
35360diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
35361index 4217f29..88f547a 100644
35362--- a/drivers/atm/iphase.c
35363+++ b/drivers/atm/iphase.c
35364@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
35365 status = (u_short) (buf_desc_ptr->desc_mode);
35366 if (status & (RX_CER | RX_PTE | RX_OFL))
35367 {
35368- atomic_inc(&vcc->stats->rx_err);
35369+ atomic_inc_unchecked(&vcc->stats->rx_err);
35370 IF_ERR(printk("IA: bad packet, dropping it");)
35371 if (status & RX_CER) {
35372 IF_ERR(printk(" cause: packet CRC error\n");)
35373@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
35374 len = dma_addr - buf_addr;
35375 if (len > iadev->rx_buf_sz) {
35376 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
35377- atomic_inc(&vcc->stats->rx_err);
35378+ atomic_inc_unchecked(&vcc->stats->rx_err);
35379 goto out_free_desc;
35380 }
35381
35382@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35383 ia_vcc = INPH_IA_VCC(vcc);
35384 if (ia_vcc == NULL)
35385 {
35386- atomic_inc(&vcc->stats->rx_err);
35387+ atomic_inc_unchecked(&vcc->stats->rx_err);
35388 atm_return(vcc, skb->truesize);
35389 dev_kfree_skb_any(skb);
35390 goto INCR_DLE;
35391@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35392 if ((length > iadev->rx_buf_sz) || (length >
35393 (skb->len - sizeof(struct cpcs_trailer))))
35394 {
35395- atomic_inc(&vcc->stats->rx_err);
35396+ atomic_inc_unchecked(&vcc->stats->rx_err);
35397 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
35398 length, skb->len);)
35399 atm_return(vcc, skb->truesize);
35400@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35401
35402 IF_RX(printk("rx_dle_intr: skb push");)
35403 vcc->push(vcc,skb);
35404- atomic_inc(&vcc->stats->rx);
35405+ atomic_inc_unchecked(&vcc->stats->rx);
35406 iadev->rx_pkt_cnt++;
35407 }
35408 INCR_DLE:
35409@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
35410 {
35411 struct k_sonet_stats *stats;
35412 stats = &PRIV(_ia_dev[board])->sonet_stats;
35413- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
35414- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
35415- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
35416- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
35417- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
35418- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
35419- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
35420- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
35421- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
35422+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
35423+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
35424+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
35425+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
35426+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
35427+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
35428+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
35429+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
35430+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
35431 }
35432 ia_cmds.status = 0;
35433 break;
35434@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
35435 if ((desc == 0) || (desc > iadev->num_tx_desc))
35436 {
35437 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
35438- atomic_inc(&vcc->stats->tx);
35439+ atomic_inc_unchecked(&vcc->stats->tx);
35440 if (vcc->pop)
35441 vcc->pop(vcc, skb);
35442 else
35443@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
35444 ATM_DESC(skb) = vcc->vci;
35445 skb_queue_tail(&iadev->tx_dma_q, skb);
35446
35447- atomic_inc(&vcc->stats->tx);
35448+ atomic_inc_unchecked(&vcc->stats->tx);
35449 iadev->tx_pkt_cnt++;
35450 /* Increment transaction counter */
35451 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
35452
35453 #if 0
35454 /* add flow control logic */
35455- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
35456+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
35457 if (iavcc->vc_desc_cnt > 10) {
35458 vcc->tx_quota = vcc->tx_quota * 3 / 4;
35459 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
35460diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
35461index fa7d701..1e404c7 100644
35462--- a/drivers/atm/lanai.c
35463+++ b/drivers/atm/lanai.c
35464@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
35465 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
35466 lanai_endtx(lanai, lvcc);
35467 lanai_free_skb(lvcc->tx.atmvcc, skb);
35468- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
35469+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
35470 }
35471
35472 /* Try to fill the buffer - don't call unless there is backlog */
35473@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
35474 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
35475 __net_timestamp(skb);
35476 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
35477- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
35478+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
35479 out:
35480 lvcc->rx.buf.ptr = end;
35481 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
35482@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35483 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
35484 "vcc %d\n", lanai->number, (unsigned int) s, vci);
35485 lanai->stats.service_rxnotaal5++;
35486- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35487+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35488 return 0;
35489 }
35490 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
35491@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35492 int bytes;
35493 read_unlock(&vcc_sklist_lock);
35494 DPRINTK("got trashed rx pdu on vci %d\n", vci);
35495- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35496+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35497 lvcc->stats.x.aal5.service_trash++;
35498 bytes = (SERVICE_GET_END(s) * 16) -
35499 (((unsigned long) lvcc->rx.buf.ptr) -
35500@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35501 }
35502 if (s & SERVICE_STREAM) {
35503 read_unlock(&vcc_sklist_lock);
35504- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35505+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35506 lvcc->stats.x.aal5.service_stream++;
35507 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
35508 "PDU on VCI %d!\n", lanai->number, vci);
35509@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
35510 return 0;
35511 }
35512 DPRINTK("got rx crc error on vci %d\n", vci);
35513- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
35514+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
35515 lvcc->stats.x.aal5.service_rxcrc++;
35516 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
35517 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
35518diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
35519index 6587dc2..149833d 100644
35520--- a/drivers/atm/nicstar.c
35521+++ b/drivers/atm/nicstar.c
35522@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35523 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
35524 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
35525 card->index);
35526- atomic_inc(&vcc->stats->tx_err);
35527+ atomic_inc_unchecked(&vcc->stats->tx_err);
35528 dev_kfree_skb_any(skb);
35529 return -EINVAL;
35530 }
35531@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35532 if (!vc->tx) {
35533 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
35534 card->index);
35535- atomic_inc(&vcc->stats->tx_err);
35536+ atomic_inc_unchecked(&vcc->stats->tx_err);
35537 dev_kfree_skb_any(skb);
35538 return -EINVAL;
35539 }
35540@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35541 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
35542 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
35543 card->index);
35544- atomic_inc(&vcc->stats->tx_err);
35545+ atomic_inc_unchecked(&vcc->stats->tx_err);
35546 dev_kfree_skb_any(skb);
35547 return -EINVAL;
35548 }
35549
35550 if (skb_shinfo(skb)->nr_frags != 0) {
35551 printk("nicstar%d: No scatter-gather yet.\n", card->index);
35552- atomic_inc(&vcc->stats->tx_err);
35553+ atomic_inc_unchecked(&vcc->stats->tx_err);
35554 dev_kfree_skb_any(skb);
35555 return -EINVAL;
35556 }
35557@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
35558 }
35559
35560 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
35561- atomic_inc(&vcc->stats->tx_err);
35562+ atomic_inc_unchecked(&vcc->stats->tx_err);
35563 dev_kfree_skb_any(skb);
35564 return -EIO;
35565 }
35566- atomic_inc(&vcc->stats->tx);
35567+ atomic_inc_unchecked(&vcc->stats->tx);
35568
35569 return 0;
35570 }
35571@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35572 printk
35573 ("nicstar%d: Can't allocate buffers for aal0.\n",
35574 card->index);
35575- atomic_add(i, &vcc->stats->rx_drop);
35576+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
35577 break;
35578 }
35579 if (!atm_charge(vcc, sb->truesize)) {
35580 RXPRINTK
35581 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
35582 card->index);
35583- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
35584+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
35585 dev_kfree_skb_any(sb);
35586 break;
35587 }
35588@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35589 ATM_SKB(sb)->vcc = vcc;
35590 __net_timestamp(sb);
35591 vcc->push(vcc, sb);
35592- atomic_inc(&vcc->stats->rx);
35593+ atomic_inc_unchecked(&vcc->stats->rx);
35594 cell += ATM_CELL_PAYLOAD;
35595 }
35596
35597@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35598 if (iovb == NULL) {
35599 printk("nicstar%d: Out of iovec buffers.\n",
35600 card->index);
35601- atomic_inc(&vcc->stats->rx_drop);
35602+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35603 recycle_rx_buf(card, skb);
35604 return;
35605 }
35606@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35607 small or large buffer itself. */
35608 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
35609 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
35610- atomic_inc(&vcc->stats->rx_err);
35611+ atomic_inc_unchecked(&vcc->stats->rx_err);
35612 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35613 NS_MAX_IOVECS);
35614 NS_PRV_IOVCNT(iovb) = 0;
35615@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35616 ("nicstar%d: Expected a small buffer, and this is not one.\n",
35617 card->index);
35618 which_list(card, skb);
35619- atomic_inc(&vcc->stats->rx_err);
35620+ atomic_inc_unchecked(&vcc->stats->rx_err);
35621 recycle_rx_buf(card, skb);
35622 vc->rx_iov = NULL;
35623 recycle_iov_buf(card, iovb);
35624@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35625 ("nicstar%d: Expected a large buffer, and this is not one.\n",
35626 card->index);
35627 which_list(card, skb);
35628- atomic_inc(&vcc->stats->rx_err);
35629+ atomic_inc_unchecked(&vcc->stats->rx_err);
35630 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35631 NS_PRV_IOVCNT(iovb));
35632 vc->rx_iov = NULL;
35633@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35634 printk(" - PDU size mismatch.\n");
35635 else
35636 printk(".\n");
35637- atomic_inc(&vcc->stats->rx_err);
35638+ atomic_inc_unchecked(&vcc->stats->rx_err);
35639 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
35640 NS_PRV_IOVCNT(iovb));
35641 vc->rx_iov = NULL;
35642@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35643 /* skb points to a small buffer */
35644 if (!atm_charge(vcc, skb->truesize)) {
35645 push_rxbufs(card, skb);
35646- atomic_inc(&vcc->stats->rx_drop);
35647+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35648 } else {
35649 skb_put(skb, len);
35650 dequeue_sm_buf(card, skb);
35651@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35652 ATM_SKB(skb)->vcc = vcc;
35653 __net_timestamp(skb);
35654 vcc->push(vcc, skb);
35655- atomic_inc(&vcc->stats->rx);
35656+ atomic_inc_unchecked(&vcc->stats->rx);
35657 }
35658 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
35659 struct sk_buff *sb;
35660@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35661 if (len <= NS_SMBUFSIZE) {
35662 if (!atm_charge(vcc, sb->truesize)) {
35663 push_rxbufs(card, sb);
35664- atomic_inc(&vcc->stats->rx_drop);
35665+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35666 } else {
35667 skb_put(sb, len);
35668 dequeue_sm_buf(card, sb);
35669@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35670 ATM_SKB(sb)->vcc = vcc;
35671 __net_timestamp(sb);
35672 vcc->push(vcc, sb);
35673- atomic_inc(&vcc->stats->rx);
35674+ atomic_inc_unchecked(&vcc->stats->rx);
35675 }
35676
35677 push_rxbufs(card, skb);
35678@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35679
35680 if (!atm_charge(vcc, skb->truesize)) {
35681 push_rxbufs(card, skb);
35682- atomic_inc(&vcc->stats->rx_drop);
35683+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35684 } else {
35685 dequeue_lg_buf(card, skb);
35686 #ifdef NS_USE_DESTRUCTORS
35687@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35688 ATM_SKB(skb)->vcc = vcc;
35689 __net_timestamp(skb);
35690 vcc->push(vcc, skb);
35691- atomic_inc(&vcc->stats->rx);
35692+ atomic_inc_unchecked(&vcc->stats->rx);
35693 }
35694
35695 push_rxbufs(card, sb);
35696@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35697 printk
35698 ("nicstar%d: Out of huge buffers.\n",
35699 card->index);
35700- atomic_inc(&vcc->stats->rx_drop);
35701+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35702 recycle_iovec_rx_bufs(card,
35703 (struct iovec *)
35704 iovb->data,
35705@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35706 card->hbpool.count++;
35707 } else
35708 dev_kfree_skb_any(hb);
35709- atomic_inc(&vcc->stats->rx_drop);
35710+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35711 } else {
35712 /* Copy the small buffer to the huge buffer */
35713 sb = (struct sk_buff *)iov->iov_base;
35714@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
35715 #endif /* NS_USE_DESTRUCTORS */
35716 __net_timestamp(hb);
35717 vcc->push(vcc, hb);
35718- atomic_inc(&vcc->stats->rx);
35719+ atomic_inc_unchecked(&vcc->stats->rx);
35720 }
35721 }
35722
35723diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
35724index 32784d1..4a8434a 100644
35725--- a/drivers/atm/solos-pci.c
35726+++ b/drivers/atm/solos-pci.c
35727@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
35728 }
35729 atm_charge(vcc, skb->truesize);
35730 vcc->push(vcc, skb);
35731- atomic_inc(&vcc->stats->rx);
35732+ atomic_inc_unchecked(&vcc->stats->rx);
35733 break;
35734
35735 case PKT_STATUS:
35736@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
35737 vcc = SKB_CB(oldskb)->vcc;
35738
35739 if (vcc) {
35740- atomic_inc(&vcc->stats->tx);
35741+ atomic_inc_unchecked(&vcc->stats->tx);
35742 solos_pop(vcc, oldskb);
35743 } else {
35744 dev_kfree_skb_irq(oldskb);
35745diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
35746index 0215934..ce9f5b1 100644
35747--- a/drivers/atm/suni.c
35748+++ b/drivers/atm/suni.c
35749@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
35750
35751
35752 #define ADD_LIMITED(s,v) \
35753- atomic_add((v),&stats->s); \
35754- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
35755+ atomic_add_unchecked((v),&stats->s); \
35756+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
35757
35758
35759 static void suni_hz(unsigned long from_timer)
35760diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
35761index 5120a96..e2572bd 100644
35762--- a/drivers/atm/uPD98402.c
35763+++ b/drivers/atm/uPD98402.c
35764@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
35765 struct sonet_stats tmp;
35766 int error = 0;
35767
35768- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
35769+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
35770 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
35771 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
35772 if (zero && !error) {
35773@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
35774
35775
35776 #define ADD_LIMITED(s,v) \
35777- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
35778- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
35779- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
35780+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
35781+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
35782+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
35783
35784
35785 static void stat_event(struct atm_dev *dev)
35786@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
35787 if (reason & uPD98402_INT_PFM) stat_event(dev);
35788 if (reason & uPD98402_INT_PCO) {
35789 (void) GET(PCOCR); /* clear interrupt cause */
35790- atomic_add(GET(HECCT),
35791+ atomic_add_unchecked(GET(HECCT),
35792 &PRIV(dev)->sonet_stats.uncorr_hcs);
35793 }
35794 if ((reason & uPD98402_INT_RFO) &&
35795@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
35796 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
35797 uPD98402_INT_LOS),PIMR); /* enable them */
35798 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
35799- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
35800- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
35801- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
35802+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
35803+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
35804+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
35805 return 0;
35806 }
35807
35808diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
35809index 969c3c2..9b72956 100644
35810--- a/drivers/atm/zatm.c
35811+++ b/drivers/atm/zatm.c
35812@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35813 }
35814 if (!size) {
35815 dev_kfree_skb_irq(skb);
35816- if (vcc) atomic_inc(&vcc->stats->rx_err);
35817+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
35818 continue;
35819 }
35820 if (!atm_charge(vcc,skb->truesize)) {
35821@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35822 skb->len = size;
35823 ATM_SKB(skb)->vcc = vcc;
35824 vcc->push(vcc,skb);
35825- atomic_inc(&vcc->stats->rx);
35826+ atomic_inc_unchecked(&vcc->stats->rx);
35827 }
35828 zout(pos & 0xffff,MTA(mbx));
35829 #if 0 /* probably a stupid idea */
35830@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
35831 skb_queue_head(&zatm_vcc->backlog,skb);
35832 break;
35833 }
35834- atomic_inc(&vcc->stats->tx);
35835+ atomic_inc_unchecked(&vcc->stats->tx);
35836 wake_up(&zatm_vcc->tx_wait);
35837 }
35838
35839diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
35840index d78b204..ecc1929 100644
35841--- a/drivers/base/attribute_container.c
35842+++ b/drivers/base/attribute_container.c
35843@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
35844 ic->classdev.parent = get_device(dev);
35845 ic->classdev.class = cont->class;
35846 cont->class->dev_release = attribute_container_release;
35847- dev_set_name(&ic->classdev, dev_name(dev));
35848+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
35849 if (fn)
35850 fn(cont, dev, &ic->classdev);
35851 else
35852diff --git a/drivers/base/bus.c b/drivers/base/bus.c
35853index d414331..b4dd4ba 100644
35854--- a/drivers/base/bus.c
35855+++ b/drivers/base/bus.c
35856@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
35857 return -EINVAL;
35858
35859 mutex_lock(&subsys->p->mutex);
35860- list_add_tail(&sif->node, &subsys->p->interfaces);
35861+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
35862 if (sif->add_dev) {
35863 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35864 while ((dev = subsys_dev_iter_next(&iter)))
35865@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
35866 subsys = sif->subsys;
35867
35868 mutex_lock(&subsys->p->mutex);
35869- list_del_init(&sif->node);
35870+ pax_list_del_init((struct list_head *)&sif->node);
35871 if (sif->remove_dev) {
35872 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35873 while ((dev = subsys_dev_iter_next(&iter)))
35874diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
35875index 7413d06..79155fa 100644
35876--- a/drivers/base/devtmpfs.c
35877+++ b/drivers/base/devtmpfs.c
35878@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
35879 if (!thread)
35880 return 0;
35881
35882- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
35883+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
35884 if (err)
35885 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
35886 else
35887@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
35888 *err = sys_unshare(CLONE_NEWNS);
35889 if (*err)
35890 goto out;
35891- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
35892+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
35893 if (*err)
35894 goto out;
35895- sys_chdir("/.."); /* will traverse into overmounted root */
35896- sys_chroot(".");
35897+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
35898+ sys_chroot((char __force_user *)".");
35899 complete(&setup_done);
35900 while (1) {
35901 spin_lock(&req_lock);
35902diff --git a/drivers/base/node.c b/drivers/base/node.c
35903index 7616a77c..8f57f51 100644
35904--- a/drivers/base/node.c
35905+++ b/drivers/base/node.c
35906@@ -626,7 +626,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
35907 struct node_attr {
35908 struct device_attribute attr;
35909 enum node_states state;
35910-};
35911+} __do_const;
35912
35913 static ssize_t show_node_state(struct device *dev,
35914 struct device_attribute *attr, char *buf)
35915diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
35916index 7072404..76dcebd 100644
35917--- a/drivers/base/power/domain.c
35918+++ b/drivers/base/power/domain.c
35919@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
35920 {
35921 struct cpuidle_driver *cpuidle_drv;
35922 struct gpd_cpu_data *cpu_data;
35923- struct cpuidle_state *idle_state;
35924+ cpuidle_state_no_const *idle_state;
35925 int ret = 0;
35926
35927 if (IS_ERR_OR_NULL(genpd) || state < 0)
35928@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
35929 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
35930 {
35931 struct gpd_cpu_data *cpu_data;
35932- struct cpuidle_state *idle_state;
35933+ cpuidle_state_no_const *idle_state;
35934 int ret = 0;
35935
35936 if (IS_ERR_OR_NULL(genpd))
35937diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
35938index a53ebd2..8f73eeb 100644
35939--- a/drivers/base/power/sysfs.c
35940+++ b/drivers/base/power/sysfs.c
35941@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
35942 return -EIO;
35943 }
35944 }
35945- return sprintf(buf, p);
35946+ return sprintf(buf, "%s", p);
35947 }
35948
35949 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
35950diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
35951index 79715e7..df06b3b 100644
35952--- a/drivers/base/power/wakeup.c
35953+++ b/drivers/base/power/wakeup.c
35954@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
35955 * They need to be modified together atomically, so it's better to use one
35956 * atomic variable to hold them both.
35957 */
35958-static atomic_t combined_event_count = ATOMIC_INIT(0);
35959+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
35960
35961 #define IN_PROGRESS_BITS (sizeof(int) * 4)
35962 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
35963
35964 static void split_counters(unsigned int *cnt, unsigned int *inpr)
35965 {
35966- unsigned int comb = atomic_read(&combined_event_count);
35967+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
35968
35969 *cnt = (comb >> IN_PROGRESS_BITS);
35970 *inpr = comb & MAX_IN_PROGRESS;
35971@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
35972 ws->start_prevent_time = ws->last_time;
35973
35974 /* Increment the counter of events in progress. */
35975- cec = atomic_inc_return(&combined_event_count);
35976+ cec = atomic_inc_return_unchecked(&combined_event_count);
35977
35978 trace_wakeup_source_activate(ws->name, cec);
35979 }
35980@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
35981 * Increment the counter of registered wakeup events and decrement the
35982 * couter of wakeup events in progress simultaneously.
35983 */
35984- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
35985+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
35986 trace_wakeup_source_deactivate(ws->name, cec);
35987
35988 split_counters(&cnt, &inpr);
35989diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
35990index e8d11b6..7b1b36f 100644
35991--- a/drivers/base/syscore.c
35992+++ b/drivers/base/syscore.c
35993@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
35994 void register_syscore_ops(struct syscore_ops *ops)
35995 {
35996 mutex_lock(&syscore_ops_lock);
35997- list_add_tail(&ops->node, &syscore_ops_list);
35998+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
35999 mutex_unlock(&syscore_ops_lock);
36000 }
36001 EXPORT_SYMBOL_GPL(register_syscore_ops);
36002@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
36003 void unregister_syscore_ops(struct syscore_ops *ops)
36004 {
36005 mutex_lock(&syscore_ops_lock);
36006- list_del(&ops->node);
36007+ pax_list_del((struct list_head *)&ops->node);
36008 mutex_unlock(&syscore_ops_lock);
36009 }
36010 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
36011diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
36012index 62b6c2c..4a11354 100644
36013--- a/drivers/block/cciss.c
36014+++ b/drivers/block/cciss.c
36015@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
36016 int err;
36017 u32 cp;
36018
36019+ memset(&arg64, 0, sizeof(arg64));
36020+
36021 err = 0;
36022 err |=
36023 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
36024@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
36025 while (!list_empty(&h->reqQ)) {
36026 c = list_entry(h->reqQ.next, CommandList_struct, list);
36027 /* can't do anything if fifo is full */
36028- if ((h->access.fifo_full(h))) {
36029+ if ((h->access->fifo_full(h))) {
36030 dev_warn(&h->pdev->dev, "fifo full\n");
36031 break;
36032 }
36033@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
36034 h->Qdepth--;
36035
36036 /* Tell the controller execute command */
36037- h->access.submit_command(h, c);
36038+ h->access->submit_command(h, c);
36039
36040 /* Put job onto the completed Q */
36041 addQ(&h->cmpQ, c);
36042@@ -3446,17 +3448,17 @@ startio:
36043
36044 static inline unsigned long get_next_completion(ctlr_info_t *h)
36045 {
36046- return h->access.command_completed(h);
36047+ return h->access->command_completed(h);
36048 }
36049
36050 static inline int interrupt_pending(ctlr_info_t *h)
36051 {
36052- return h->access.intr_pending(h);
36053+ return h->access->intr_pending(h);
36054 }
36055
36056 static inline long interrupt_not_for_us(ctlr_info_t *h)
36057 {
36058- return ((h->access.intr_pending(h) == 0) ||
36059+ return ((h->access->intr_pending(h) == 0) ||
36060 (h->interrupts_enabled == 0));
36061 }
36062
36063@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
36064 u32 a;
36065
36066 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36067- return h->access.command_completed(h);
36068+ return h->access->command_completed(h);
36069
36070 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36071 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36072@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
36073 trans_support & CFGTBL_Trans_use_short_tags);
36074
36075 /* Change the access methods to the performant access methods */
36076- h->access = SA5_performant_access;
36077+ h->access = &SA5_performant_access;
36078 h->transMethod = CFGTBL_Trans_Performant;
36079
36080 return;
36081@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
36082 if (prod_index < 0)
36083 return -ENODEV;
36084 h->product_name = products[prod_index].product_name;
36085- h->access = *(products[prod_index].access);
36086+ h->access = products[prod_index].access;
36087
36088 if (cciss_board_disabled(h)) {
36089 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36090@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
36091 }
36092
36093 /* make sure the board interrupts are off */
36094- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36095+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36096 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
36097 if (rc)
36098 goto clean2;
36099@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
36100 * fake ones to scoop up any residual completions.
36101 */
36102 spin_lock_irqsave(&h->lock, flags);
36103- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36104+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36105 spin_unlock_irqrestore(&h->lock, flags);
36106 free_irq(h->intr[h->intr_mode], h);
36107 rc = cciss_request_irq(h, cciss_msix_discard_completions,
36108@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
36109 dev_info(&h->pdev->dev, "Board READY.\n");
36110 dev_info(&h->pdev->dev,
36111 "Waiting for stale completions to drain.\n");
36112- h->access.set_intr_mask(h, CCISS_INTR_ON);
36113+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36114 msleep(10000);
36115- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36116+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36117
36118 rc = controller_reset_failed(h->cfgtable);
36119 if (rc)
36120@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
36121 cciss_scsi_setup(h);
36122
36123 /* Turn the interrupts on so we can service requests */
36124- h->access.set_intr_mask(h, CCISS_INTR_ON);
36125+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36126
36127 /* Get the firmware version */
36128 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
36129@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
36130 kfree(flush_buf);
36131 if (return_code != IO_OK)
36132 dev_warn(&h->pdev->dev, "Error flushing cache\n");
36133- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36134+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36135 free_irq(h->intr[h->intr_mode], h);
36136 }
36137
36138diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
36139index 7fda30e..eb5dfe0 100644
36140--- a/drivers/block/cciss.h
36141+++ b/drivers/block/cciss.h
36142@@ -101,7 +101,7 @@ struct ctlr_info
36143 /* information about each logical volume */
36144 drive_info_struct *drv[CISS_MAX_LUN];
36145
36146- struct access_method access;
36147+ struct access_method *access;
36148
36149 /* queue and queue Info */
36150 struct list_head reqQ;
36151diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
36152index 639d26b..fd6ad1f 100644
36153--- a/drivers/block/cpqarray.c
36154+++ b/drivers/block/cpqarray.c
36155@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36156 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
36157 goto Enomem4;
36158 }
36159- hba[i]->access.set_intr_mask(hba[i], 0);
36160+ hba[i]->access->set_intr_mask(hba[i], 0);
36161 if (request_irq(hba[i]->intr, do_ida_intr,
36162 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
36163 {
36164@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36165 add_timer(&hba[i]->timer);
36166
36167 /* Enable IRQ now that spinlock and rate limit timer are set up */
36168- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36169+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36170
36171 for(j=0; j<NWD; j++) {
36172 struct gendisk *disk = ida_gendisk[i][j];
36173@@ -694,7 +694,7 @@ DBGINFO(
36174 for(i=0; i<NR_PRODUCTS; i++) {
36175 if (board_id == products[i].board_id) {
36176 c->product_name = products[i].product_name;
36177- c->access = *(products[i].access);
36178+ c->access = products[i].access;
36179 break;
36180 }
36181 }
36182@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
36183 hba[ctlr]->intr = intr;
36184 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
36185 hba[ctlr]->product_name = products[j].product_name;
36186- hba[ctlr]->access = *(products[j].access);
36187+ hba[ctlr]->access = products[j].access;
36188 hba[ctlr]->ctlr = ctlr;
36189 hba[ctlr]->board_id = board_id;
36190 hba[ctlr]->pci_dev = NULL; /* not PCI */
36191@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
36192
36193 while((c = h->reqQ) != NULL) {
36194 /* Can't do anything if we're busy */
36195- if (h->access.fifo_full(h) == 0)
36196+ if (h->access->fifo_full(h) == 0)
36197 return;
36198
36199 /* Get the first entry from the request Q */
36200@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
36201 h->Qdepth--;
36202
36203 /* Tell the controller to do our bidding */
36204- h->access.submit_command(h, c);
36205+ h->access->submit_command(h, c);
36206
36207 /* Get onto the completion Q */
36208 addQ(&h->cmpQ, c);
36209@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36210 unsigned long flags;
36211 __u32 a,a1;
36212
36213- istat = h->access.intr_pending(h);
36214+ istat = h->access->intr_pending(h);
36215 /* Is this interrupt for us? */
36216 if (istat == 0)
36217 return IRQ_NONE;
36218@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36219 */
36220 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
36221 if (istat & FIFO_NOT_EMPTY) {
36222- while((a = h->access.command_completed(h))) {
36223+ while((a = h->access->command_completed(h))) {
36224 a1 = a; a &= ~3;
36225 if ((c = h->cmpQ) == NULL)
36226 {
36227@@ -1193,6 +1193,7 @@ out_passthru:
36228 ida_pci_info_struct pciinfo;
36229
36230 if (!arg) return -EINVAL;
36231+ memset(&pciinfo, 0, sizeof(pciinfo));
36232 pciinfo.bus = host->pci_dev->bus->number;
36233 pciinfo.dev_fn = host->pci_dev->devfn;
36234 pciinfo.board_id = host->board_id;
36235@@ -1447,11 +1448,11 @@ static int sendcmd(
36236 /*
36237 * Disable interrupt
36238 */
36239- info_p->access.set_intr_mask(info_p, 0);
36240+ info_p->access->set_intr_mask(info_p, 0);
36241 /* Make sure there is room in the command FIFO */
36242 /* Actually it should be completely empty at this time. */
36243 for (i = 200000; i > 0; i--) {
36244- temp = info_p->access.fifo_full(info_p);
36245+ temp = info_p->access->fifo_full(info_p);
36246 if (temp != 0) {
36247 break;
36248 }
36249@@ -1464,7 +1465,7 @@ DBG(
36250 /*
36251 * Send the cmd
36252 */
36253- info_p->access.submit_command(info_p, c);
36254+ info_p->access->submit_command(info_p, c);
36255 complete = pollcomplete(ctlr);
36256
36257 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
36258@@ -1547,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
36259 * we check the new geometry. Then turn interrupts back on when
36260 * we're done.
36261 */
36262- host->access.set_intr_mask(host, 0);
36263+ host->access->set_intr_mask(host, 0);
36264 getgeometry(ctlr);
36265- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
36266+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
36267
36268 for(i=0; i<NWD; i++) {
36269 struct gendisk *disk = ida_gendisk[ctlr][i];
36270@@ -1589,7 +1590,7 @@ static int pollcomplete(int ctlr)
36271 /* Wait (up to 2 seconds) for a command to complete */
36272
36273 for (i = 200000; i > 0; i--) {
36274- done = hba[ctlr]->access.command_completed(hba[ctlr]);
36275+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
36276 if (done == 0) {
36277 udelay(10); /* a short fixed delay */
36278 } else
36279diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
36280index be73e9d..7fbf140 100644
36281--- a/drivers/block/cpqarray.h
36282+++ b/drivers/block/cpqarray.h
36283@@ -99,7 +99,7 @@ struct ctlr_info {
36284 drv_info_t drv[NWD];
36285 struct proc_dir_entry *proc;
36286
36287- struct access_method access;
36288+ struct access_method *access;
36289
36290 cmdlist_t *reqQ;
36291 cmdlist_t *cmpQ;
36292diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
36293index f943aac..99bfd19 100644
36294--- a/drivers/block/drbd/drbd_int.h
36295+++ b/drivers/block/drbd/drbd_int.h
36296@@ -582,7 +582,7 @@ struct drbd_epoch {
36297 struct drbd_tconn *tconn;
36298 struct list_head list;
36299 unsigned int barrier_nr;
36300- atomic_t epoch_size; /* increased on every request added. */
36301+ atomic_unchecked_t epoch_size; /* increased on every request added. */
36302 atomic_t active; /* increased on every req. added, and dec on every finished. */
36303 unsigned long flags;
36304 };
36305@@ -1021,7 +1021,7 @@ struct drbd_conf {
36306 unsigned int al_tr_number;
36307 int al_tr_cycle;
36308 wait_queue_head_t seq_wait;
36309- atomic_t packet_seq;
36310+ atomic_unchecked_t packet_seq;
36311 unsigned int peer_seq;
36312 spinlock_t peer_seq_lock;
36313 unsigned int minor;
36314@@ -1562,7 +1562,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
36315 char __user *uoptval;
36316 int err;
36317
36318- uoptval = (char __user __force *)optval;
36319+ uoptval = (char __force_user *)optval;
36320
36321 set_fs(KERNEL_DS);
36322 if (level == SOL_SOCKET)
36323diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
36324index a5dca6a..bb27967 100644
36325--- a/drivers/block/drbd/drbd_main.c
36326+++ b/drivers/block/drbd/drbd_main.c
36327@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
36328 p->sector = sector;
36329 p->block_id = block_id;
36330 p->blksize = blksize;
36331- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36332+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36333 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
36334 }
36335
36336@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
36337 return -EIO;
36338 p->sector = cpu_to_be64(req->i.sector);
36339 p->block_id = (unsigned long)req;
36340- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36341+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36342 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
36343 if (mdev->state.conn >= C_SYNC_SOURCE &&
36344 mdev->state.conn <= C_PAUSED_SYNC_T)
36345@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
36346 {
36347 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
36348
36349- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
36350- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
36351+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
36352+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
36353 kfree(tconn->current_epoch);
36354
36355 idr_destroy(&tconn->volumes);
36356diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
36357index 4222aff..1f79506 100644
36358--- a/drivers/block/drbd/drbd_receiver.c
36359+++ b/drivers/block/drbd/drbd_receiver.c
36360@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
36361 {
36362 int err;
36363
36364- atomic_set(&mdev->packet_seq, 0);
36365+ atomic_set_unchecked(&mdev->packet_seq, 0);
36366 mdev->peer_seq = 0;
36367
36368 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
36369@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36370 do {
36371 next_epoch = NULL;
36372
36373- epoch_size = atomic_read(&epoch->epoch_size);
36374+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
36375
36376 switch (ev & ~EV_CLEANUP) {
36377 case EV_PUT:
36378@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36379 rv = FE_DESTROYED;
36380 } else {
36381 epoch->flags = 0;
36382- atomic_set(&epoch->epoch_size, 0);
36383+ atomic_set_unchecked(&epoch->epoch_size, 0);
36384 /* atomic_set(&epoch->active, 0); is already zero */
36385 if (rv == FE_STILL_LIVE)
36386 rv = FE_RECYCLED;
36387@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36388 conn_wait_active_ee_empty(tconn);
36389 drbd_flush(tconn);
36390
36391- if (atomic_read(&tconn->current_epoch->epoch_size)) {
36392+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36393 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
36394 if (epoch)
36395 break;
36396@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36397 }
36398
36399 epoch->flags = 0;
36400- atomic_set(&epoch->epoch_size, 0);
36401+ atomic_set_unchecked(&epoch->epoch_size, 0);
36402 atomic_set(&epoch->active, 0);
36403
36404 spin_lock(&tconn->epoch_lock);
36405- if (atomic_read(&tconn->current_epoch->epoch_size)) {
36406+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36407 list_add(&epoch->list, &tconn->current_epoch->list);
36408 tconn->current_epoch = epoch;
36409 tconn->epochs++;
36410@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
36411
36412 err = wait_for_and_update_peer_seq(mdev, peer_seq);
36413 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
36414- atomic_inc(&tconn->current_epoch->epoch_size);
36415+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
36416 err2 = drbd_drain_block(mdev, pi->size);
36417 if (!err)
36418 err = err2;
36419@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
36420
36421 spin_lock(&tconn->epoch_lock);
36422 peer_req->epoch = tconn->current_epoch;
36423- atomic_inc(&peer_req->epoch->epoch_size);
36424+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
36425 atomic_inc(&peer_req->epoch->active);
36426 spin_unlock(&tconn->epoch_lock);
36427
36428@@ -4347,7 +4347,7 @@ struct data_cmd {
36429 int expect_payload;
36430 size_t pkt_size;
36431 int (*fn)(struct drbd_tconn *, struct packet_info *);
36432-};
36433+} __do_const;
36434
36435 static struct data_cmd drbd_cmd_handler[] = {
36436 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
36437@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
36438 if (!list_empty(&tconn->current_epoch->list))
36439 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
36440 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
36441- atomic_set(&tconn->current_epoch->epoch_size, 0);
36442+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
36443 tconn->send.seen_any_write_yet = false;
36444
36445 conn_info(tconn, "Connection closed\n");
36446@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
36447 struct asender_cmd {
36448 size_t pkt_size;
36449 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
36450-};
36451+} __do_const;
36452
36453 static struct asender_cmd asender_tbl[] = {
36454 [P_PING] = { 0, got_Ping },
36455diff --git a/drivers/block/loop.c b/drivers/block/loop.c
36456index d92d50f..a7e9d97 100644
36457--- a/drivers/block/loop.c
36458+++ b/drivers/block/loop.c
36459@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
36460
36461 file_start_write(file);
36462 set_fs(get_ds());
36463- bw = file->f_op->write(file, buf, len, &pos);
36464+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
36465 set_fs(old_fs);
36466 file_end_write(file);
36467 if (likely(bw == len))
36468diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
36469index f5d0ea1..c62380a 100644
36470--- a/drivers/block/pktcdvd.c
36471+++ b/drivers/block/pktcdvd.c
36472@@ -84,7 +84,7 @@
36473 #define MAX_SPEED 0xffff
36474
36475 #define ZONE(sector, pd) (((sector) + (pd)->offset) & \
36476- ~(sector_t)((pd)->settings.size - 1))
36477+ ~(sector_t)((pd)->settings.size - 1UL))
36478
36479 static DEFINE_MUTEX(pktcdvd_mutex);
36480 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
36481diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
36482index 8a3aff7..d7538c2 100644
36483--- a/drivers/cdrom/cdrom.c
36484+++ b/drivers/cdrom/cdrom.c
36485@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
36486 ENSURE(reset, CDC_RESET);
36487 ENSURE(generic_packet, CDC_GENERIC_PACKET);
36488 cdi->mc_flags = 0;
36489- cdo->n_minors = 0;
36490 cdi->options = CDO_USE_FFLAGS;
36491
36492 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
36493@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
36494 else
36495 cdi->cdda_method = CDDA_OLD;
36496
36497- if (!cdo->generic_packet)
36498- cdo->generic_packet = cdrom_dummy_generic_packet;
36499+ if (!cdo->generic_packet) {
36500+ pax_open_kernel();
36501+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
36502+ pax_close_kernel();
36503+ }
36504
36505 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
36506 mutex_lock(&cdrom_mutex);
36507@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
36508 if (cdi->exit)
36509 cdi->exit(cdi);
36510
36511- cdi->ops->n_minors--;
36512 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
36513 }
36514
36515@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
36516 */
36517 nr = nframes;
36518 do {
36519- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
36520+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
36521 if (cgc.buffer)
36522 break;
36523
36524@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
36525 struct cdrom_device_info *cdi;
36526 int ret;
36527
36528- ret = scnprintf(info + *pos, max_size - *pos, header);
36529+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
36530 if (!ret)
36531 return 1;
36532
36533diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
36534index 4afcb65..a68a32d 100644
36535--- a/drivers/cdrom/gdrom.c
36536+++ b/drivers/cdrom/gdrom.c
36537@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
36538 .audio_ioctl = gdrom_audio_ioctl,
36539 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
36540 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
36541- .n_minors = 1,
36542 };
36543
36544 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
36545diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
36546index 3bb6fa3..34013fb 100644
36547--- a/drivers/char/Kconfig
36548+++ b/drivers/char/Kconfig
36549@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
36550
36551 config DEVKMEM
36552 bool "/dev/kmem virtual device support"
36553- default y
36554+ default n
36555+ depends on !GRKERNSEC_KMEM
36556 help
36557 Say Y here if you want to support the /dev/kmem device. The
36558 /dev/kmem device is rarely used, but can be used for certain
36559@@ -582,6 +583,7 @@ config DEVPORT
36560 bool
36561 depends on !M68K
36562 depends on ISA || PCI
36563+ depends on !GRKERNSEC_KMEM
36564 default y
36565
36566 source "drivers/s390/char/Kconfig"
36567diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
36568index a48e05b..6bac831 100644
36569--- a/drivers/char/agp/compat_ioctl.c
36570+++ b/drivers/char/agp/compat_ioctl.c
36571@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
36572 return -ENOMEM;
36573 }
36574
36575- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
36576+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
36577 sizeof(*usegment) * ureserve.seg_count)) {
36578 kfree(usegment);
36579 kfree(ksegment);
36580diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
36581index 2e04433..771f2cc 100644
36582--- a/drivers/char/agp/frontend.c
36583+++ b/drivers/char/agp/frontend.c
36584@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
36585 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
36586 return -EFAULT;
36587
36588- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
36589+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
36590 return -EFAULT;
36591
36592 client = agp_find_client_by_pid(reserve.pid);
36593@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
36594 if (segment == NULL)
36595 return -ENOMEM;
36596
36597- if (copy_from_user(segment, (void __user *) reserve.seg_list,
36598+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
36599 sizeof(struct agp_segment) * reserve.seg_count)) {
36600 kfree(segment);
36601 return -EFAULT;
36602diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
36603index 4f94375..413694e 100644
36604--- a/drivers/char/genrtc.c
36605+++ b/drivers/char/genrtc.c
36606@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
36607 switch (cmd) {
36608
36609 case RTC_PLL_GET:
36610+ memset(&pll, 0, sizeof(pll));
36611 if (get_rtc_pll(&pll))
36612 return -EINVAL;
36613 else
36614diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
36615index d784650..e8bfd69 100644
36616--- a/drivers/char/hpet.c
36617+++ b/drivers/char/hpet.c
36618@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
36619 }
36620
36621 static int
36622-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
36623+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
36624 struct hpet_info *info)
36625 {
36626 struct hpet_timer __iomem *timer;
36627diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
36628index 86fe45c..c0ea948 100644
36629--- a/drivers/char/hw_random/intel-rng.c
36630+++ b/drivers/char/hw_random/intel-rng.c
36631@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
36632
36633 if (no_fwh_detect)
36634 return -ENODEV;
36635- printk(warning);
36636+ printk("%s", warning);
36637 return -EBUSY;
36638 }
36639
36640diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
36641index 4445fa1..7c6de37 100644
36642--- a/drivers/char/ipmi/ipmi_msghandler.c
36643+++ b/drivers/char/ipmi/ipmi_msghandler.c
36644@@ -420,7 +420,7 @@ struct ipmi_smi {
36645 struct proc_dir_entry *proc_dir;
36646 char proc_dir_name[10];
36647
36648- atomic_t stats[IPMI_NUM_STATS];
36649+ atomic_unchecked_t stats[IPMI_NUM_STATS];
36650
36651 /*
36652 * run_to_completion duplicate of smb_info, smi_info
36653@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
36654
36655
36656 #define ipmi_inc_stat(intf, stat) \
36657- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
36658+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
36659 #define ipmi_get_stat(intf, stat) \
36660- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
36661+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
36662
36663 static int is_lan_addr(struct ipmi_addr *addr)
36664 {
36665@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
36666 INIT_LIST_HEAD(&intf->cmd_rcvrs);
36667 init_waitqueue_head(&intf->waitq);
36668 for (i = 0; i < IPMI_NUM_STATS; i++)
36669- atomic_set(&intf->stats[i], 0);
36670+ atomic_set_unchecked(&intf->stats[i], 0);
36671
36672 intf->proc_dir = NULL;
36673
36674diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
36675index af4b23f..79806fc 100644
36676--- a/drivers/char/ipmi/ipmi_si_intf.c
36677+++ b/drivers/char/ipmi/ipmi_si_intf.c
36678@@ -275,7 +275,7 @@ struct smi_info {
36679 unsigned char slave_addr;
36680
36681 /* Counters and things for the proc filesystem. */
36682- atomic_t stats[SI_NUM_STATS];
36683+ atomic_unchecked_t stats[SI_NUM_STATS];
36684
36685 struct task_struct *thread;
36686
36687@@ -284,9 +284,9 @@ struct smi_info {
36688 };
36689
36690 #define smi_inc_stat(smi, stat) \
36691- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
36692+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
36693 #define smi_get_stat(smi, stat) \
36694- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
36695+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
36696
36697 #define SI_MAX_PARMS 4
36698
36699@@ -3258,7 +3258,7 @@ static int try_smi_init(struct smi_info *new_smi)
36700 atomic_set(&new_smi->req_events, 0);
36701 new_smi->run_to_completion = 0;
36702 for (i = 0; i < SI_NUM_STATS; i++)
36703- atomic_set(&new_smi->stats[i], 0);
36704+ atomic_set_unchecked(&new_smi->stats[i], 0);
36705
36706 new_smi->interrupt_disabled = 1;
36707 atomic_set(&new_smi->stop_operation, 0);
36708diff --git a/drivers/char/mem.c b/drivers/char/mem.c
36709index 1ccbe94..6ad651a 100644
36710--- a/drivers/char/mem.c
36711+++ b/drivers/char/mem.c
36712@@ -18,6 +18,7 @@
36713 #include <linux/raw.h>
36714 #include <linux/tty.h>
36715 #include <linux/capability.h>
36716+#include <linux/security.h>
36717 #include <linux/ptrace.h>
36718 #include <linux/device.h>
36719 #include <linux/highmem.h>
36720@@ -38,6 +39,10 @@
36721
36722 #define DEVPORT_MINOR 4
36723
36724+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
36725+extern const struct file_operations grsec_fops;
36726+#endif
36727+
36728 static inline unsigned long size_inside_page(unsigned long start,
36729 unsigned long size)
36730 {
36731@@ -69,9 +74,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36732
36733 while (cursor < to) {
36734 if (!devmem_is_allowed(pfn)) {
36735+#ifdef CONFIG_GRKERNSEC_KMEM
36736+ gr_handle_mem_readwrite(from, to);
36737+#else
36738 printk(KERN_INFO
36739 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
36740 current->comm, from, to);
36741+#endif
36742 return 0;
36743 }
36744 cursor += PAGE_SIZE;
36745@@ -79,6 +88,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36746 }
36747 return 1;
36748 }
36749+#elif defined(CONFIG_GRKERNSEC_KMEM)
36750+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36751+{
36752+ return 0;
36753+}
36754 #else
36755 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
36756 {
36757@@ -121,6 +135,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
36758
36759 while (count > 0) {
36760 unsigned long remaining;
36761+ char *temp;
36762
36763 sz = size_inside_page(p, count);
36764
36765@@ -136,7 +151,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
36766 if (!ptr)
36767 return -EFAULT;
36768
36769- remaining = copy_to_user(buf, ptr, sz);
36770+#ifdef CONFIG_PAX_USERCOPY
36771+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
36772+ if (!temp) {
36773+ unxlate_dev_mem_ptr(p, ptr);
36774+ return -ENOMEM;
36775+ }
36776+ memcpy(temp, ptr, sz);
36777+#else
36778+ temp = ptr;
36779+#endif
36780+
36781+ remaining = copy_to_user(buf, temp, sz);
36782+
36783+#ifdef CONFIG_PAX_USERCOPY
36784+ kfree(temp);
36785+#endif
36786+
36787 unxlate_dev_mem_ptr(p, ptr);
36788 if (remaining)
36789 return -EFAULT;
36790@@ -379,7 +410,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
36791 else
36792 csize = count;
36793
36794- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
36795+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
36796 if (rc < 0)
36797 return rc;
36798 buf += csize;
36799@@ -399,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36800 size_t count, loff_t *ppos)
36801 {
36802 unsigned long p = *ppos;
36803- ssize_t low_count, read, sz;
36804+ ssize_t low_count, read, sz, err = 0;
36805 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
36806- int err = 0;
36807
36808 read = 0;
36809 if (p < (unsigned long) high_memory) {
36810@@ -423,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36811 }
36812 #endif
36813 while (low_count > 0) {
36814+ char *temp;
36815+
36816 sz = size_inside_page(p, low_count);
36817
36818 /*
36819@@ -432,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36820 */
36821 kbuf = xlate_dev_kmem_ptr((char *)p);
36822
36823- if (copy_to_user(buf, kbuf, sz))
36824+#ifdef CONFIG_PAX_USERCOPY
36825+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
36826+ if (!temp)
36827+ return -ENOMEM;
36828+ memcpy(temp, kbuf, sz);
36829+#else
36830+ temp = kbuf;
36831+#endif
36832+
36833+ err = copy_to_user(buf, temp, sz);
36834+
36835+#ifdef CONFIG_PAX_USERCOPY
36836+ kfree(temp);
36837+#endif
36838+
36839+ if (err)
36840 return -EFAULT;
36841 buf += sz;
36842 p += sz;
36843@@ -869,6 +916,9 @@ static const struct memdev {
36844 #ifdef CONFIG_CRASH_DUMP
36845 [12] = { "oldmem", 0, &oldmem_fops, NULL },
36846 #endif
36847+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
36848+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
36849+#endif
36850 };
36851
36852 static int memory_open(struct inode *inode, struct file *filp)
36853@@ -940,7 +990,7 @@ static int __init chr_dev_init(void)
36854 continue;
36855
36856 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
36857- NULL, devlist[minor].name);
36858+ NULL, "%s", devlist[minor].name);
36859 }
36860
36861 return tty_init();
36862diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
36863index c689697..04e6d6a2 100644
36864--- a/drivers/char/mwave/tp3780i.c
36865+++ b/drivers/char/mwave/tp3780i.c
36866@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
36867 PRINTK_2(TRACE_TP3780I,
36868 "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
36869
36870+ memset(pAbilities, 0, sizeof(*pAbilities));
36871 /* fill out standard constant fields */
36872 pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
36873 pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
36874diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
36875index 9df78e2..01ba9ae 100644
36876--- a/drivers/char/nvram.c
36877+++ b/drivers/char/nvram.c
36878@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
36879
36880 spin_unlock_irq(&rtc_lock);
36881
36882- if (copy_to_user(buf, contents, tmp - contents))
36883+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
36884 return -EFAULT;
36885
36886 *ppos = i;
36887diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
36888index 5c5cc00..ac9edb7 100644
36889--- a/drivers/char/pcmcia/synclink_cs.c
36890+++ b/drivers/char/pcmcia/synclink_cs.c
36891@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36892
36893 if (debug_level >= DEBUG_LEVEL_INFO)
36894 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
36895- __FILE__, __LINE__, info->device_name, port->count);
36896+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
36897
36898- WARN_ON(!port->count);
36899+ WARN_ON(!atomic_read(&port->count));
36900
36901 if (tty_port_close_start(port, tty, filp) == 0)
36902 goto cleanup;
36903@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36904 cleanup:
36905 if (debug_level >= DEBUG_LEVEL_INFO)
36906 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
36907- tty->driver->name, port->count);
36908+ tty->driver->name, atomic_read(&port->count));
36909 }
36910
36911 /* Wait until the transmitter is empty.
36912@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36913
36914 if (debug_level >= DEBUG_LEVEL_INFO)
36915 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
36916- __FILE__, __LINE__, tty->driver->name, port->count);
36917+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
36918
36919 /* If port is closing, signal caller to try again */
36920 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
36921@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36922 goto cleanup;
36923 }
36924 spin_lock(&port->lock);
36925- port->count++;
36926+ atomic_inc(&port->count);
36927 spin_unlock(&port->lock);
36928 spin_unlock_irqrestore(&info->netlock, flags);
36929
36930- if (port->count == 1) {
36931+ if (atomic_read(&port->count) == 1) {
36932 /* 1st open on this device, init hardware */
36933 retval = startup(info, tty);
36934 if (retval < 0)
36935@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
36936 unsigned short new_crctype;
36937
36938 /* return error if TTY interface open */
36939- if (info->port.count)
36940+ if (atomic_read(&info->port.count))
36941 return -EBUSY;
36942
36943 switch (encoding)
36944@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
36945
36946 /* arbitrate between network and tty opens */
36947 spin_lock_irqsave(&info->netlock, flags);
36948- if (info->port.count != 0 || info->netcount != 0) {
36949+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
36950 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
36951 spin_unlock_irqrestore(&info->netlock, flags);
36952 return -EBUSY;
36953@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36954 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
36955
36956 /* return error if TTY interface open */
36957- if (info->port.count)
36958+ if (atomic_read(&info->port.count))
36959 return -EBUSY;
36960
36961 if (cmd != SIOCWANDEV)
36962diff --git a/drivers/char/random.c b/drivers/char/random.c
36963index 35487e8..dac8bd1 100644
36964--- a/drivers/char/random.c
36965+++ b/drivers/char/random.c
36966@@ -272,8 +272,13 @@
36967 /*
36968 * Configuration information
36969 */
36970+#ifdef CONFIG_GRKERNSEC_RANDNET
36971+#define INPUT_POOL_WORDS 512
36972+#define OUTPUT_POOL_WORDS 128
36973+#else
36974 #define INPUT_POOL_WORDS 128
36975 #define OUTPUT_POOL_WORDS 32
36976+#endif
36977 #define SEC_XFER_SIZE 512
36978 #define EXTRACT_SIZE 10
36979
36980@@ -313,10 +318,17 @@ static struct poolinfo {
36981 int poolwords;
36982 int tap1, tap2, tap3, tap4, tap5;
36983 } poolinfo_table[] = {
36984+#ifdef CONFIG_GRKERNSEC_RANDNET
36985+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
36986+ { 512, 411, 308, 208, 104, 1 },
36987+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
36988+ { 128, 103, 76, 51, 25, 1 },
36989+#else
36990 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
36991 { 128, 103, 76, 51, 25, 1 },
36992 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
36993 { 32, 26, 20, 14, 7, 1 },
36994+#endif
36995 #if 0
36996 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
36997 { 2048, 1638, 1231, 819, 411, 1 },
36998@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
36999 input_rotate += i ? 7 : 14;
37000 }
37001
37002- ACCESS_ONCE(r->input_rotate) = input_rotate;
37003- ACCESS_ONCE(r->add_ptr) = i;
37004+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
37005+ ACCESS_ONCE_RW(r->add_ptr) = i;
37006 smp_wmb();
37007
37008 if (out)
37009@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
37010
37011 extract_buf(r, tmp);
37012 i = min_t(int, nbytes, EXTRACT_SIZE);
37013- if (copy_to_user(buf, tmp, i)) {
37014+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
37015 ret = -EFAULT;
37016 break;
37017 }
37018@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
37019 #include <linux/sysctl.h>
37020
37021 static int min_read_thresh = 8, min_write_thresh;
37022-static int max_read_thresh = INPUT_POOL_WORDS * 32;
37023+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
37024 static int max_write_thresh = INPUT_POOL_WORDS * 32;
37025 static char sysctl_bootid[16];
37026
37027@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
37028 static int proc_do_uuid(ctl_table *table, int write,
37029 void __user *buffer, size_t *lenp, loff_t *ppos)
37030 {
37031- ctl_table fake_table;
37032+ ctl_table_no_const fake_table;
37033 unsigned char buf[64], tmp_uuid[16], *uuid;
37034
37035 uuid = table->data;
37036diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
37037index bf2349db..5456d53 100644
37038--- a/drivers/char/sonypi.c
37039+++ b/drivers/char/sonypi.c
37040@@ -54,6 +54,7 @@
37041
37042 #include <asm/uaccess.h>
37043 #include <asm/io.h>
37044+#include <asm/local.h>
37045
37046 #include <linux/sonypi.h>
37047
37048@@ -490,7 +491,7 @@ static struct sonypi_device {
37049 spinlock_t fifo_lock;
37050 wait_queue_head_t fifo_proc_list;
37051 struct fasync_struct *fifo_async;
37052- int open_count;
37053+ local_t open_count;
37054 int model;
37055 struct input_dev *input_jog_dev;
37056 struct input_dev *input_key_dev;
37057@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
37058 static int sonypi_misc_release(struct inode *inode, struct file *file)
37059 {
37060 mutex_lock(&sonypi_device.lock);
37061- sonypi_device.open_count--;
37062+ local_dec(&sonypi_device.open_count);
37063 mutex_unlock(&sonypi_device.lock);
37064 return 0;
37065 }
37066@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
37067 {
37068 mutex_lock(&sonypi_device.lock);
37069 /* Flush input queue on first open */
37070- if (!sonypi_device.open_count)
37071+ if (!local_read(&sonypi_device.open_count))
37072 kfifo_reset(&sonypi_device.fifo);
37073- sonypi_device.open_count++;
37074+ local_inc(&sonypi_device.open_count);
37075 mutex_unlock(&sonypi_device.lock);
37076
37077 return 0;
37078diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
37079index 64420b3..5c40b56 100644
37080--- a/drivers/char/tpm/tpm_acpi.c
37081+++ b/drivers/char/tpm/tpm_acpi.c
37082@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
37083 virt = acpi_os_map_memory(start, len);
37084 if (!virt) {
37085 kfree(log->bios_event_log);
37086+ log->bios_event_log = NULL;
37087 printk("%s: ERROR - Unable to map memory\n", __func__);
37088 return -EIO;
37089 }
37090
37091- memcpy_fromio(log->bios_event_log, virt, len);
37092+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
37093
37094 acpi_os_unmap_memory(virt, len);
37095 return 0;
37096diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
37097index 84ddc55..1d32f1e 100644
37098--- a/drivers/char/tpm/tpm_eventlog.c
37099+++ b/drivers/char/tpm/tpm_eventlog.c
37100@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
37101 event = addr;
37102
37103 if ((event->event_type == 0 && event->event_size == 0) ||
37104- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
37105+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
37106 return NULL;
37107
37108 return addr;
37109@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
37110 return NULL;
37111
37112 if ((event->event_type == 0 && event->event_size == 0) ||
37113- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
37114+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
37115 return NULL;
37116
37117 (*pos)++;
37118@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
37119 int i;
37120
37121 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
37122- seq_putc(m, data[i]);
37123+ if (!seq_putc(m, data[i]))
37124+ return -EFAULT;
37125
37126 return 0;
37127 }
37128diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
37129index fc45567..fa2a590 100644
37130--- a/drivers/char/virtio_console.c
37131+++ b/drivers/char/virtio_console.c
37132@@ -682,7 +682,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
37133 if (to_user) {
37134 ssize_t ret;
37135
37136- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
37137+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
37138 if (ret)
37139 return -EFAULT;
37140 } else {
37141@@ -785,7 +785,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
37142 if (!port_has_data(port) && !port->host_connected)
37143 return 0;
37144
37145- return fill_readbuf(port, ubuf, count, true);
37146+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
37147 }
37148
37149 static int wait_port_writable(struct port *port, bool nonblock)
37150diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
37151index a33f46f..a720eed 100644
37152--- a/drivers/clk/clk-composite.c
37153+++ b/drivers/clk/clk-composite.c
37154@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
37155 struct clk *clk;
37156 struct clk_init_data init;
37157 struct clk_composite *composite;
37158- struct clk_ops *clk_composite_ops;
37159+ clk_ops_no_const *clk_composite_ops;
37160
37161 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
37162 if (!composite) {
37163diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
37164index bd11315..7f87098 100644
37165--- a/drivers/clk/socfpga/clk.c
37166+++ b/drivers/clk/socfpga/clk.c
37167@@ -22,6 +22,7 @@
37168 #include <linux/clk-provider.h>
37169 #include <linux/io.h>
37170 #include <linux/of.h>
37171+#include <asm/pgtable.h>
37172
37173 /* Clock Manager offsets */
37174 #define CLKMGR_CTRL 0x0
37175@@ -135,8 +136,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
37176 if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
37177 strcmp(clk_name, "sdram_pll")) {
37178 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
37179- clk_pll_ops.enable = clk_gate_ops.enable;
37180- clk_pll_ops.disable = clk_gate_ops.disable;
37181+ pax_open_kernel();
37182+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
37183+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
37184+ pax_close_kernel();
37185 }
37186
37187 clk = clk_register(NULL, &socfpga_clk->hw.hw);
37188diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
37189index a2b2541..bc1e7ff 100644
37190--- a/drivers/clocksource/arm_arch_timer.c
37191+++ b/drivers/clocksource/arm_arch_timer.c
37192@@ -264,7 +264,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
37193 return NOTIFY_OK;
37194 }
37195
37196-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
37197+static struct notifier_block arch_timer_cpu_nb = {
37198 .notifier_call = arch_timer_cpu_notify,
37199 };
37200
37201diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
37202index 350f493..489479e 100644
37203--- a/drivers/clocksource/bcm_kona_timer.c
37204+++ b/drivers/clocksource/bcm_kona_timer.c
37205@@ -199,7 +199,7 @@ static struct irqaction kona_timer_irq = {
37206 .handler = kona_timer_interrupt,
37207 };
37208
37209-static void __init kona_timer_init(void)
37210+static void __init kona_timer_init(struct device_node *np)
37211 {
37212 kona_timers_init();
37213 kona_timer_clockevents_init();
37214diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
37215index ade7513..069445f 100644
37216--- a/drivers/clocksource/metag_generic.c
37217+++ b/drivers/clocksource/metag_generic.c
37218@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
37219 return NOTIFY_OK;
37220 }
37221
37222-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
37223+static struct notifier_block arch_timer_cpu_nb = {
37224 .notifier_call = arch_timer_cpu_notify,
37225 };
37226
37227diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
37228index edc089e..bc7c0bc 100644
37229--- a/drivers/cpufreq/acpi-cpufreq.c
37230+++ b/drivers/cpufreq/acpi-cpufreq.c
37231@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
37232 return sprintf(buf, "%u\n", boost_enabled);
37233 }
37234
37235-static struct global_attr global_boost = __ATTR(boost, 0644,
37236+static global_attr_no_const global_boost = __ATTR(boost, 0644,
37237 show_global_boost,
37238 store_global_boost);
37239
37240@@ -705,8 +705,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37241 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
37242 per_cpu(acfreq_data, cpu) = data;
37243
37244- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
37245- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37246+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
37247+ pax_open_kernel();
37248+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37249+ pax_close_kernel();
37250+ }
37251
37252 result = acpi_processor_register_performance(data->acpi_data, cpu);
37253 if (result)
37254@@ -832,7 +835,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37255 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
37256 break;
37257 case ACPI_ADR_SPACE_FIXED_HARDWARE:
37258- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37259+ pax_open_kernel();
37260+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37261+ pax_close_kernel();
37262 policy->cur = get_cur_freq_on_cpu(cpu);
37263 break;
37264 default:
37265@@ -843,8 +848,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37266 acpi_processor_notify_smm(THIS_MODULE);
37267
37268 /* Check for APERF/MPERF support in hardware */
37269- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
37270- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
37271+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
37272+ pax_open_kernel();
37273+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
37274+ pax_close_kernel();
37275+ }
37276
37277 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
37278 for (i = 0; i < perf->state_count; i++)
37279diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
37280index 6485547..477033e 100644
37281--- a/drivers/cpufreq/cpufreq.c
37282+++ b/drivers/cpufreq/cpufreq.c
37283@@ -1854,7 +1854,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
37284 return NOTIFY_OK;
37285 }
37286
37287-static struct notifier_block __refdata cpufreq_cpu_notifier = {
37288+static struct notifier_block cpufreq_cpu_notifier = {
37289 .notifier_call = cpufreq_cpu_callback,
37290 };
37291
37292@@ -1886,8 +1886,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
37293
37294 pr_debug("trying to register driver %s\n", driver_data->name);
37295
37296- if (driver_data->setpolicy)
37297- driver_data->flags |= CPUFREQ_CONST_LOOPS;
37298+ if (driver_data->setpolicy) {
37299+ pax_open_kernel();
37300+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
37301+ pax_close_kernel();
37302+ }
37303
37304 write_lock_irqsave(&cpufreq_driver_lock, flags);
37305 if (cpufreq_driver) {
37306diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
37307index a86ff72..aad2b03 100644
37308--- a/drivers/cpufreq/cpufreq_governor.c
37309+++ b/drivers/cpufreq/cpufreq_governor.c
37310@@ -235,7 +235,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37311 struct dbs_data *dbs_data;
37312 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
37313 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
37314- struct od_ops *od_ops = NULL;
37315+ const struct od_ops *od_ops = NULL;
37316 struct od_dbs_tuners *od_tuners = NULL;
37317 struct cs_dbs_tuners *cs_tuners = NULL;
37318 struct cpu_dbs_common_info *cpu_cdbs;
37319@@ -298,7 +298,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37320
37321 if ((cdata->governor == GOV_CONSERVATIVE) &&
37322 (!policy->governor->initialized)) {
37323- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37324+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37325
37326 cpufreq_register_notifier(cs_ops->notifier_block,
37327 CPUFREQ_TRANSITION_NOTIFIER);
37328@@ -315,7 +315,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37329
37330 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
37331 (policy->governor->initialized == 1)) {
37332- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37333+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37334
37335 cpufreq_unregister_notifier(cs_ops->notifier_block,
37336 CPUFREQ_TRANSITION_NOTIFIER);
37337diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
37338index 0d9e6be..461fd3b 100644
37339--- a/drivers/cpufreq/cpufreq_governor.h
37340+++ b/drivers/cpufreq/cpufreq_governor.h
37341@@ -204,7 +204,7 @@ struct common_dbs_data {
37342 void (*exit)(struct dbs_data *dbs_data);
37343
37344 /* Governor specific ops, see below */
37345- void *gov_ops;
37346+ const void *gov_ops;
37347 };
37348
37349 /* Governer Per policy data */
37350diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
37351index c087347..dad6268 100644
37352--- a/drivers/cpufreq/cpufreq_ondemand.c
37353+++ b/drivers/cpufreq/cpufreq_ondemand.c
37354@@ -615,14 +615,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
37355 (struct cpufreq_policy *, unsigned int, unsigned int),
37356 unsigned int powersave_bias)
37357 {
37358- od_ops.powersave_bias_target = f;
37359+ pax_open_kernel();
37360+ *(void **)&od_ops.powersave_bias_target = f;
37361+ pax_close_kernel();
37362 od_set_powersave_bias(powersave_bias);
37363 }
37364 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
37365
37366 void od_unregister_powersave_bias_handler(void)
37367 {
37368- od_ops.powersave_bias_target = generic_powersave_bias_target;
37369+ pax_open_kernel();
37370+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
37371+ pax_close_kernel();
37372 od_set_powersave_bias(0);
37373 }
37374 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
37375diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
37376index bfd6273..e39dd63 100644
37377--- a/drivers/cpufreq/cpufreq_stats.c
37378+++ b/drivers/cpufreq/cpufreq_stats.c
37379@@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
37380 }
37381
37382 /* priority=1 so this will get called before cpufreq_remove_dev */
37383-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
37384+static struct notifier_block cpufreq_stat_cpu_notifier = {
37385 .notifier_call = cpufreq_stat_cpu_callback,
37386 .priority = 1,
37387 };
37388diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
37389index 421ef37..e708530c 100644
37390--- a/drivers/cpufreq/p4-clockmod.c
37391+++ b/drivers/cpufreq/p4-clockmod.c
37392@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37393 case 0x0F: /* Core Duo */
37394 case 0x16: /* Celeron Core */
37395 case 0x1C: /* Atom */
37396- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37397+ pax_open_kernel();
37398+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37399+ pax_close_kernel();
37400 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
37401 case 0x0D: /* Pentium M (Dothan) */
37402- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37403+ pax_open_kernel();
37404+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37405+ pax_close_kernel();
37406 /* fall through */
37407 case 0x09: /* Pentium M (Banias) */
37408 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
37409@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37410
37411 /* on P-4s, the TSC runs with constant frequency independent whether
37412 * throttling is active or not. */
37413- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37414+ pax_open_kernel();
37415+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37416+ pax_close_kernel();
37417
37418 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
37419 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
37420diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
37421index c71ee14..7c2e183 100644
37422--- a/drivers/cpufreq/sparc-us3-cpufreq.c
37423+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
37424@@ -18,14 +18,12 @@
37425 #include <asm/head.h>
37426 #include <asm/timer.h>
37427
37428-static struct cpufreq_driver *cpufreq_us3_driver;
37429-
37430 struct us3_freq_percpu_info {
37431 struct cpufreq_frequency_table table[4];
37432 };
37433
37434 /* Indexed by cpu number. */
37435-static struct us3_freq_percpu_info *us3_freq_table;
37436+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
37437
37438 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
37439 * in the Safari config register.
37440@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
37441
37442 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
37443 {
37444- if (cpufreq_us3_driver)
37445- us3_set_cpu_divider_index(policy, 0);
37446+ us3_set_cpu_divider_index(policy->cpu, 0);
37447
37448 return 0;
37449 }
37450
37451+static int __init us3_freq_init(void);
37452+static void __exit us3_freq_exit(void);
37453+
37454+static struct cpufreq_driver cpufreq_us3_driver = {
37455+ .init = us3_freq_cpu_init,
37456+ .verify = us3_freq_verify,
37457+ .target = us3_freq_target,
37458+ .get = us3_freq_get,
37459+ .exit = us3_freq_cpu_exit,
37460+ .owner = THIS_MODULE,
37461+ .name = "UltraSPARC-III",
37462+
37463+};
37464+
37465 static int __init us3_freq_init(void)
37466 {
37467 unsigned long manuf, impl, ver;
37468@@ -208,57 +219,15 @@ static int __init us3_freq_init(void)
37469 (impl == CHEETAH_IMPL ||
37470 impl == CHEETAH_PLUS_IMPL ||
37471 impl == JAGUAR_IMPL ||
37472- impl == PANTHER_IMPL)) {
37473- struct cpufreq_driver *driver;
37474-
37475- ret = -ENOMEM;
37476- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
37477- if (!driver)
37478- goto err_out;
37479-
37480- us3_freq_table = kzalloc(
37481- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
37482- GFP_KERNEL);
37483- if (!us3_freq_table)
37484- goto err_out;
37485-
37486- driver->init = us3_freq_cpu_init;
37487- driver->verify = us3_freq_verify;
37488- driver->target = us3_freq_target;
37489- driver->get = us3_freq_get;
37490- driver->exit = us3_freq_cpu_exit;
37491- driver->owner = THIS_MODULE,
37492- strcpy(driver->name, "UltraSPARC-III");
37493-
37494- cpufreq_us3_driver = driver;
37495- ret = cpufreq_register_driver(driver);
37496- if (ret)
37497- goto err_out;
37498-
37499- return 0;
37500-
37501-err_out:
37502- if (driver) {
37503- kfree(driver);
37504- cpufreq_us3_driver = NULL;
37505- }
37506- kfree(us3_freq_table);
37507- us3_freq_table = NULL;
37508- return ret;
37509- }
37510+ impl == PANTHER_IMPL))
37511+ return cpufreq_register_driver(&cpufreq_us3_driver);
37512
37513 return -ENODEV;
37514 }
37515
37516 static void __exit us3_freq_exit(void)
37517 {
37518- if (cpufreq_us3_driver) {
37519- cpufreq_unregister_driver(cpufreq_us3_driver);
37520- kfree(cpufreq_us3_driver);
37521- cpufreq_us3_driver = NULL;
37522- kfree(us3_freq_table);
37523- us3_freq_table = NULL;
37524- }
37525+ cpufreq_unregister_driver(&cpufreq_us3_driver);
37526 }
37527
37528 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
37529diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
37530index 618e6f4..e89d915 100644
37531--- a/drivers/cpufreq/speedstep-centrino.c
37532+++ b/drivers/cpufreq/speedstep-centrino.c
37533@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
37534 !cpu_has(cpu, X86_FEATURE_EST))
37535 return -ENODEV;
37536
37537- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
37538- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
37539+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
37540+ pax_open_kernel();
37541+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
37542+ pax_close_kernel();
37543+ }
37544
37545 if (policy->cpu != 0)
37546 return -ENODEV;
37547diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
37548index c3a93fe..e808f24 100644
37549--- a/drivers/cpuidle/cpuidle.c
37550+++ b/drivers/cpuidle/cpuidle.c
37551@@ -254,7 +254,7 @@ static int poll_idle(struct cpuidle_device *dev,
37552
37553 static void poll_idle_init(struct cpuidle_driver *drv)
37554 {
37555- struct cpuidle_state *state = &drv->states[0];
37556+ cpuidle_state_no_const *state = &drv->states[0];
37557
37558 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
37559 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
37560diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
37561index ea2f8e7..70ac501 100644
37562--- a/drivers/cpuidle/governor.c
37563+++ b/drivers/cpuidle/governor.c
37564@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
37565 mutex_lock(&cpuidle_lock);
37566 if (__cpuidle_find_governor(gov->name) == NULL) {
37567 ret = 0;
37568- list_add_tail(&gov->governor_list, &cpuidle_governors);
37569+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
37570 if (!cpuidle_curr_governor ||
37571 cpuidle_curr_governor->rating < gov->rating)
37572 cpuidle_switch_governor(gov);
37573@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
37574 new_gov = cpuidle_replace_governor(gov->rating);
37575 cpuidle_switch_governor(new_gov);
37576 }
37577- list_del(&gov->governor_list);
37578+ pax_list_del((struct list_head *)&gov->governor_list);
37579 mutex_unlock(&cpuidle_lock);
37580 }
37581
37582diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
37583index 428754a..8bdf9cc 100644
37584--- a/drivers/cpuidle/sysfs.c
37585+++ b/drivers/cpuidle/sysfs.c
37586@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
37587 NULL
37588 };
37589
37590-static struct attribute_group cpuidle_attr_group = {
37591+static attribute_group_no_const cpuidle_attr_group = {
37592 .attrs = cpuidle_default_attrs,
37593 .name = "cpuidle",
37594 };
37595diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
37596index 3b36797..db0b0c0 100644
37597--- a/drivers/devfreq/devfreq.c
37598+++ b/drivers/devfreq/devfreq.c
37599@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
37600 GFP_KERNEL);
37601 devfreq->last_stat_updated = jiffies;
37602
37603- dev_set_name(&devfreq->dev, dev_name(dev));
37604+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
37605 err = device_register(&devfreq->dev);
37606 if (err) {
37607 put_device(&devfreq->dev);
37608@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
37609 goto err_out;
37610 }
37611
37612- list_add(&governor->node, &devfreq_governor_list);
37613+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
37614
37615 list_for_each_entry(devfreq, &devfreq_list, node) {
37616 int ret = 0;
37617@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
37618 }
37619 }
37620
37621- list_del(&governor->node);
37622+ pax_list_del((struct list_head *)&governor->node);
37623 err_out:
37624 mutex_unlock(&devfreq_list_lock);
37625
37626diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
37627index b70709b..1d8d02a 100644
37628--- a/drivers/dma/sh/shdma.c
37629+++ b/drivers/dma/sh/shdma.c
37630@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
37631 return ret;
37632 }
37633
37634-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
37635+static struct notifier_block sh_dmae_nmi_notifier = {
37636 .notifier_call = sh_dmae_nmi_handler,
37637
37638 /* Run before NMI debug handler and KGDB */
37639diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
37640index c4d700a..0b57abd 100644
37641--- a/drivers/edac/edac_mc_sysfs.c
37642+++ b/drivers/edac/edac_mc_sysfs.c
37643@@ -148,7 +148,7 @@ static const char * const edac_caps[] = {
37644 struct dev_ch_attribute {
37645 struct device_attribute attr;
37646 int channel;
37647-};
37648+} __do_const;
37649
37650 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
37651 struct dev_ch_attribute dev_attr_legacy_##_name = \
37652@@ -1005,14 +1005,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
37653 }
37654
37655 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
37656+ pax_open_kernel();
37657 if (mci->get_sdram_scrub_rate) {
37658- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
37659- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
37660+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
37661+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
37662 }
37663 if (mci->set_sdram_scrub_rate) {
37664- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
37665- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
37666+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
37667+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
37668 }
37669+ pax_close_kernel();
37670 err = device_create_file(&mci->dev,
37671 &dev_attr_sdram_scrub_rate);
37672 if (err) {
37673diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
37674index e8658e4..22746d6 100644
37675--- a/drivers/edac/edac_pci_sysfs.c
37676+++ b/drivers/edac/edac_pci_sysfs.c
37677@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
37678 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
37679 static int edac_pci_poll_msec = 1000; /* one second workq period */
37680
37681-static atomic_t pci_parity_count = ATOMIC_INIT(0);
37682-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
37683+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
37684+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
37685
37686 static struct kobject *edac_pci_top_main_kobj;
37687 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
37688@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
37689 void *value;
37690 ssize_t(*show) (void *, char *);
37691 ssize_t(*store) (void *, const char *, size_t);
37692-};
37693+} __do_const;
37694
37695 /* Set of show/store abstract level functions for PCI Parity object */
37696 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
37697@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37698 edac_printk(KERN_CRIT, EDAC_PCI,
37699 "Signaled System Error on %s\n",
37700 pci_name(dev));
37701- atomic_inc(&pci_nonparity_count);
37702+ atomic_inc_unchecked(&pci_nonparity_count);
37703 }
37704
37705 if (status & (PCI_STATUS_PARITY)) {
37706@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37707 "Master Data Parity Error on %s\n",
37708 pci_name(dev));
37709
37710- atomic_inc(&pci_parity_count);
37711+ atomic_inc_unchecked(&pci_parity_count);
37712 }
37713
37714 if (status & (PCI_STATUS_DETECTED_PARITY)) {
37715@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37716 "Detected Parity Error on %s\n",
37717 pci_name(dev));
37718
37719- atomic_inc(&pci_parity_count);
37720+ atomic_inc_unchecked(&pci_parity_count);
37721 }
37722 }
37723
37724@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37725 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
37726 "Signaled System Error on %s\n",
37727 pci_name(dev));
37728- atomic_inc(&pci_nonparity_count);
37729+ atomic_inc_unchecked(&pci_nonparity_count);
37730 }
37731
37732 if (status & (PCI_STATUS_PARITY)) {
37733@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37734 "Master Data Parity Error on "
37735 "%s\n", pci_name(dev));
37736
37737- atomic_inc(&pci_parity_count);
37738+ atomic_inc_unchecked(&pci_parity_count);
37739 }
37740
37741 if (status & (PCI_STATUS_DETECTED_PARITY)) {
37742@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
37743 "Detected Parity Error on %s\n",
37744 pci_name(dev));
37745
37746- atomic_inc(&pci_parity_count);
37747+ atomic_inc_unchecked(&pci_parity_count);
37748 }
37749 }
37750 }
37751@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
37752 if (!check_pci_errors)
37753 return;
37754
37755- before_count = atomic_read(&pci_parity_count);
37756+ before_count = atomic_read_unchecked(&pci_parity_count);
37757
37758 /* scan all PCI devices looking for a Parity Error on devices and
37759 * bridges.
37760@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
37761 /* Only if operator has selected panic on PCI Error */
37762 if (edac_pci_get_panic_on_pe()) {
37763 /* If the count is different 'after' from 'before' */
37764- if (before_count != atomic_read(&pci_parity_count))
37765+ if (before_count != atomic_read_unchecked(&pci_parity_count))
37766 panic("EDAC: PCI Parity Error");
37767 }
37768 }
37769diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
37770index 51b7e3a..aa8a3e8 100644
37771--- a/drivers/edac/mce_amd.h
37772+++ b/drivers/edac/mce_amd.h
37773@@ -77,7 +77,7 @@ struct amd_decoder_ops {
37774 bool (*mc0_mce)(u16, u8);
37775 bool (*mc1_mce)(u16, u8);
37776 bool (*mc2_mce)(u16, u8);
37777-};
37778+} __no_const;
37779
37780 void amd_report_gart_errors(bool);
37781 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
37782diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
37783index 57ea7f4..789e3c3 100644
37784--- a/drivers/firewire/core-card.c
37785+++ b/drivers/firewire/core-card.c
37786@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
37787
37788 void fw_core_remove_card(struct fw_card *card)
37789 {
37790- struct fw_card_driver dummy_driver = dummy_driver_template;
37791+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
37792
37793 card->driver->update_phy_reg(card, 4,
37794 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
37795diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
37796index 664a6ff..af13580 100644
37797--- a/drivers/firewire/core-device.c
37798+++ b/drivers/firewire/core-device.c
37799@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
37800 struct config_rom_attribute {
37801 struct device_attribute attr;
37802 u32 key;
37803-};
37804+} __do_const;
37805
37806 static ssize_t show_immediate(struct device *dev,
37807 struct device_attribute *dattr, char *buf)
37808diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
37809index 28a94c7..58da63a 100644
37810--- a/drivers/firewire/core-transaction.c
37811+++ b/drivers/firewire/core-transaction.c
37812@@ -38,6 +38,7 @@
37813 #include <linux/timer.h>
37814 #include <linux/types.h>
37815 #include <linux/workqueue.h>
37816+#include <linux/sched.h>
37817
37818 #include <asm/byteorder.h>
37819
37820diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
37821index 515a42c..5ecf3ba 100644
37822--- a/drivers/firewire/core.h
37823+++ b/drivers/firewire/core.h
37824@@ -111,6 +111,7 @@ struct fw_card_driver {
37825
37826 int (*stop_iso)(struct fw_iso_context *ctx);
37827 };
37828+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
37829
37830 void fw_card_initialize(struct fw_card *card,
37831 const struct fw_card_driver *driver, struct device *device);
37832diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
37833index 94a58a0..f5eba42 100644
37834--- a/drivers/firmware/dmi-id.c
37835+++ b/drivers/firmware/dmi-id.c
37836@@ -16,7 +16,7 @@
37837 struct dmi_device_attribute{
37838 struct device_attribute dev_attr;
37839 int field;
37840-};
37841+} __do_const;
37842 #define to_dmi_dev_attr(_dev_attr) \
37843 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
37844
37845diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
37846index b95159b..841ae55 100644
37847--- a/drivers/firmware/dmi_scan.c
37848+++ b/drivers/firmware/dmi_scan.c
37849@@ -497,11 +497,6 @@ void __init dmi_scan_machine(void)
37850 }
37851 }
37852 else {
37853- /*
37854- * no iounmap() for that ioremap(); it would be a no-op, but
37855- * it's so early in setup that sucker gets confused into doing
37856- * what it shouldn't if we actually call it.
37857- */
37858 p = dmi_ioremap(0xF0000, 0x10000);
37859 if (p == NULL)
37860 goto error;
37861@@ -786,7 +781,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
37862 if (buf == NULL)
37863 return -1;
37864
37865- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
37866+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
37867
37868 iounmap(buf);
37869 return 0;
37870diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
37871index 5145fa3..0d3babd 100644
37872--- a/drivers/firmware/efi/efi.c
37873+++ b/drivers/firmware/efi/efi.c
37874@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
37875 };
37876
37877 static struct efivars generic_efivars;
37878-static struct efivar_operations generic_ops;
37879+static efivar_operations_no_const generic_ops __read_only;
37880
37881 static int generic_ops_register(void)
37882 {
37883- generic_ops.get_variable = efi.get_variable;
37884- generic_ops.set_variable = efi.set_variable;
37885- generic_ops.get_next_variable = efi.get_next_variable;
37886- generic_ops.query_variable_store = efi_query_variable_store;
37887+ pax_open_kernel();
37888+ *(void **)&generic_ops.get_variable = efi.get_variable;
37889+ *(void **)&generic_ops.set_variable = efi.set_variable;
37890+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
37891+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
37892+ pax_close_kernel();
37893
37894 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
37895 }
37896diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
37897index 8bd1bb6..c48b0c6 100644
37898--- a/drivers/firmware/efi/efivars.c
37899+++ b/drivers/firmware/efi/efivars.c
37900@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
37901 static int
37902 create_efivars_bin_attributes(void)
37903 {
37904- struct bin_attribute *attr;
37905+ bin_attribute_no_const *attr;
37906 int error;
37907
37908 /* new_var */
37909diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
37910index 2a90ba6..07f3733 100644
37911--- a/drivers/firmware/google/memconsole.c
37912+++ b/drivers/firmware/google/memconsole.c
37913@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
37914 if (!found_memconsole())
37915 return -ENODEV;
37916
37917- memconsole_bin_attr.size = memconsole_length;
37918+ pax_open_kernel();
37919+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
37920+ pax_close_kernel();
37921
37922 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
37923
37924diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
37925index e16d932..f0206ef 100644
37926--- a/drivers/gpio/gpio-ich.c
37927+++ b/drivers/gpio/gpio-ich.c
37928@@ -69,7 +69,7 @@ struct ichx_desc {
37929 /* Some chipsets have quirks, let these use their own request/get */
37930 int (*request)(struct gpio_chip *chip, unsigned offset);
37931 int (*get)(struct gpio_chip *chip, unsigned offset);
37932-};
37933+} __do_const;
37934
37935 static struct {
37936 spinlock_t lock;
37937diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
37938index 9902732..64b62dd 100644
37939--- a/drivers/gpio/gpio-vr41xx.c
37940+++ b/drivers/gpio/gpio-vr41xx.c
37941@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
37942 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
37943 maskl, pendl, maskh, pendh);
37944
37945- atomic_inc(&irq_err_count);
37946+ atomic_inc_unchecked(&irq_err_count);
37947
37948 return -EINVAL;
37949 }
37950diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
37951index ed1334e..ee0dd42 100644
37952--- a/drivers/gpu/drm/drm_crtc_helper.c
37953+++ b/drivers/gpu/drm/drm_crtc_helper.c
37954@@ -321,7 +321,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
37955 struct drm_crtc *tmp;
37956 int crtc_mask = 1;
37957
37958- WARN(!crtc, "checking null crtc?\n");
37959+ BUG_ON(!crtc);
37960
37961 dev = crtc->dev;
37962
37963diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
37964index 9cc247f..36aa285 100644
37965--- a/drivers/gpu/drm/drm_drv.c
37966+++ b/drivers/gpu/drm/drm_drv.c
37967@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
37968 /**
37969 * Copy and IOCTL return string to user space
37970 */
37971-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
37972+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
37973 {
37974 int len;
37975
37976@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
37977 struct drm_file *file_priv = filp->private_data;
37978 struct drm_device *dev;
37979 const struct drm_ioctl_desc *ioctl = NULL;
37980- drm_ioctl_t *func;
37981+ drm_ioctl_no_const_t func;
37982 unsigned int nr = DRM_IOCTL_NR(cmd);
37983 int retcode = -EINVAL;
37984 char stack_kdata[128];
37985@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
37986 return -ENODEV;
37987
37988 atomic_inc(&dev->ioctl_count);
37989- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
37990+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
37991 ++file_priv->ioctl_count;
37992
37993 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
37994diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
37995index 429e07d..e681a2c 100644
37996--- a/drivers/gpu/drm/drm_fops.c
37997+++ b/drivers/gpu/drm/drm_fops.c
37998@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
37999 }
38000
38001 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
38002- atomic_set(&dev->counts[i], 0);
38003+ atomic_set_unchecked(&dev->counts[i], 0);
38004
38005 dev->sigdata.lock = NULL;
38006
38007@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
38008 if (drm_device_is_unplugged(dev))
38009 return -ENODEV;
38010
38011- if (!dev->open_count++)
38012+ if (local_inc_return(&dev->open_count) == 1)
38013 need_setup = 1;
38014 mutex_lock(&dev->struct_mutex);
38015 old_imapping = inode->i_mapping;
38016@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
38017 retcode = drm_open_helper(inode, filp, dev);
38018 if (retcode)
38019 goto err_undo;
38020- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
38021+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
38022 if (need_setup) {
38023 retcode = drm_setup(dev);
38024 if (retcode)
38025@@ -166,7 +166,7 @@ err_undo:
38026 iput(container_of(dev->dev_mapping, struct inode, i_data));
38027 dev->dev_mapping = old_mapping;
38028 mutex_unlock(&dev->struct_mutex);
38029- dev->open_count--;
38030+ local_dec(&dev->open_count);
38031 return retcode;
38032 }
38033 EXPORT_SYMBOL(drm_open);
38034@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
38035
38036 mutex_lock(&drm_global_mutex);
38037
38038- DRM_DEBUG("open_count = %d\n", dev->open_count);
38039+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
38040
38041 if (dev->driver->preclose)
38042 dev->driver->preclose(dev, file_priv);
38043@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
38044 * Begin inline drm_release
38045 */
38046
38047- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
38048+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
38049 task_pid_nr(current),
38050 (long)old_encode_dev(file_priv->minor->device),
38051- dev->open_count);
38052+ local_read(&dev->open_count));
38053
38054 /* Release any auth tokens that might point to this file_priv,
38055 (do that under the drm_global_mutex) */
38056@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
38057 * End inline drm_release
38058 */
38059
38060- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
38061- if (!--dev->open_count) {
38062+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
38063+ if (local_dec_and_test(&dev->open_count)) {
38064 if (atomic_read(&dev->ioctl_count)) {
38065 DRM_ERROR("Device busy: %d\n",
38066 atomic_read(&dev->ioctl_count));
38067diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
38068index f731116..629842c 100644
38069--- a/drivers/gpu/drm/drm_global.c
38070+++ b/drivers/gpu/drm/drm_global.c
38071@@ -36,7 +36,7 @@
38072 struct drm_global_item {
38073 struct mutex mutex;
38074 void *object;
38075- int refcount;
38076+ atomic_t refcount;
38077 };
38078
38079 static struct drm_global_item glob[DRM_GLOBAL_NUM];
38080@@ -49,7 +49,7 @@ void drm_global_init(void)
38081 struct drm_global_item *item = &glob[i];
38082 mutex_init(&item->mutex);
38083 item->object = NULL;
38084- item->refcount = 0;
38085+ atomic_set(&item->refcount, 0);
38086 }
38087 }
38088
38089@@ -59,7 +59,7 @@ void drm_global_release(void)
38090 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
38091 struct drm_global_item *item = &glob[i];
38092 BUG_ON(item->object != NULL);
38093- BUG_ON(item->refcount != 0);
38094+ BUG_ON(atomic_read(&item->refcount) != 0);
38095 }
38096 }
38097
38098@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38099 void *object;
38100
38101 mutex_lock(&item->mutex);
38102- if (item->refcount == 0) {
38103+ if (atomic_read(&item->refcount) == 0) {
38104 item->object = kzalloc(ref->size, GFP_KERNEL);
38105 if (unlikely(item->object == NULL)) {
38106 ret = -ENOMEM;
38107@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38108 goto out_err;
38109
38110 }
38111- ++item->refcount;
38112+ atomic_inc(&item->refcount);
38113 ref->object = item->object;
38114 object = item->object;
38115 mutex_unlock(&item->mutex);
38116@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
38117 struct drm_global_item *item = &glob[ref->global_type];
38118
38119 mutex_lock(&item->mutex);
38120- BUG_ON(item->refcount == 0);
38121+ BUG_ON(atomic_read(&item->refcount) == 0);
38122 BUG_ON(ref->object != item->object);
38123- if (--item->refcount == 0) {
38124+ if (atomic_dec_and_test(&item->refcount)) {
38125 ref->release(ref);
38126 item->object = NULL;
38127 }
38128diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
38129index d4b20ce..77a8d41 100644
38130--- a/drivers/gpu/drm/drm_info.c
38131+++ b/drivers/gpu/drm/drm_info.c
38132@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
38133 struct drm_local_map *map;
38134 struct drm_map_list *r_list;
38135
38136- /* Hardcoded from _DRM_FRAME_BUFFER,
38137- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
38138- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
38139- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
38140+ static const char * const types[] = {
38141+ [_DRM_FRAME_BUFFER] = "FB",
38142+ [_DRM_REGISTERS] = "REG",
38143+ [_DRM_SHM] = "SHM",
38144+ [_DRM_AGP] = "AGP",
38145+ [_DRM_SCATTER_GATHER] = "SG",
38146+ [_DRM_CONSISTENT] = "PCI",
38147+ [_DRM_GEM] = "GEM" };
38148 const char *type;
38149 int i;
38150
38151@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
38152 map = r_list->map;
38153 if (!map)
38154 continue;
38155- if (map->type < 0 || map->type > 5)
38156+ if (map->type >= ARRAY_SIZE(types))
38157 type = "??";
38158 else
38159 type = types[map->type];
38160@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
38161 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
38162 vma->vm_flags & VM_LOCKED ? 'l' : '-',
38163 vma->vm_flags & VM_IO ? 'i' : '-',
38164+#ifdef CONFIG_GRKERNSEC_HIDESYM
38165+ 0);
38166+#else
38167 vma->vm_pgoff);
38168+#endif
38169
38170 #if defined(__i386__)
38171 pgprot = pgprot_val(vma->vm_page_prot);
38172diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
38173index 2f4c434..dd12cd2 100644
38174--- a/drivers/gpu/drm/drm_ioc32.c
38175+++ b/drivers/gpu/drm/drm_ioc32.c
38176@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
38177 request = compat_alloc_user_space(nbytes);
38178 if (!access_ok(VERIFY_WRITE, request, nbytes))
38179 return -EFAULT;
38180- list = (struct drm_buf_desc *) (request + 1);
38181+ list = (struct drm_buf_desc __user *) (request + 1);
38182
38183 if (__put_user(count, &request->count)
38184 || __put_user(list, &request->list))
38185@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
38186 request = compat_alloc_user_space(nbytes);
38187 if (!access_ok(VERIFY_WRITE, request, nbytes))
38188 return -EFAULT;
38189- list = (struct drm_buf_pub *) (request + 1);
38190+ list = (struct drm_buf_pub __user *) (request + 1);
38191
38192 if (__put_user(count, &request->count)
38193 || __put_user(list, &request->list))
38194@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
38195 return 0;
38196 }
38197
38198-drm_ioctl_compat_t *drm_compat_ioctls[] = {
38199+drm_ioctl_compat_t drm_compat_ioctls[] = {
38200 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
38201 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
38202 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
38203@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
38204 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38205 {
38206 unsigned int nr = DRM_IOCTL_NR(cmd);
38207- drm_ioctl_compat_t *fn;
38208 int ret;
38209
38210 /* Assume that ioctls without an explicit compat routine will just
38211@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38212 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
38213 return drm_ioctl(filp, cmd, arg);
38214
38215- fn = drm_compat_ioctls[nr];
38216-
38217- if (fn != NULL)
38218- ret = (*fn) (filp, cmd, arg);
38219+ if (drm_compat_ioctls[nr] != NULL)
38220+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
38221 else
38222 ret = drm_ioctl(filp, cmd, arg);
38223
38224diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
38225index e77bd8b..1571b85 100644
38226--- a/drivers/gpu/drm/drm_ioctl.c
38227+++ b/drivers/gpu/drm/drm_ioctl.c
38228@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
38229 stats->data[i].value =
38230 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
38231 else
38232- stats->data[i].value = atomic_read(&dev->counts[i]);
38233+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
38234 stats->data[i].type = dev->types[i];
38235 }
38236
38237diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
38238index d752c96..fe08455 100644
38239--- a/drivers/gpu/drm/drm_lock.c
38240+++ b/drivers/gpu/drm/drm_lock.c
38241@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38242 if (drm_lock_take(&master->lock, lock->context)) {
38243 master->lock.file_priv = file_priv;
38244 master->lock.lock_time = jiffies;
38245- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
38246+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
38247 break; /* Got lock */
38248 }
38249
38250@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38251 return -EINVAL;
38252 }
38253
38254- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
38255+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
38256
38257 if (drm_lock_free(&master->lock, lock->context)) {
38258 /* FIXME: Should really bail out here. */
38259diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
38260index 16f3ec5..b28f9ca 100644
38261--- a/drivers/gpu/drm/drm_stub.c
38262+++ b/drivers/gpu/drm/drm_stub.c
38263@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
38264
38265 drm_device_set_unplugged(dev);
38266
38267- if (dev->open_count == 0) {
38268+ if (local_read(&dev->open_count) == 0) {
38269 drm_put_dev(dev);
38270 }
38271 mutex_unlock(&drm_global_mutex);
38272diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
38273index 0229665..f61329c 100644
38274--- a/drivers/gpu/drm/drm_sysfs.c
38275+++ b/drivers/gpu/drm/drm_sysfs.c
38276@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
38277 int drm_sysfs_device_add(struct drm_minor *minor)
38278 {
38279 int err;
38280- char *minor_str;
38281+ const char *minor_str;
38282
38283 minor->kdev.parent = minor->dev->dev;
38284
38285diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
38286index 004ecdf..db1f6e0 100644
38287--- a/drivers/gpu/drm/i810/i810_dma.c
38288+++ b/drivers/gpu/drm/i810/i810_dma.c
38289@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
38290 dma->buflist[vertex->idx],
38291 vertex->discard, vertex->used);
38292
38293- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38294- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38295+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38296+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38297 sarea_priv->last_enqueue = dev_priv->counter - 1;
38298 sarea_priv->last_dispatch = (int)hw_status[5];
38299
38300@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
38301 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
38302 mc->last_render);
38303
38304- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38305- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38306+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38307+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38308 sarea_priv->last_enqueue = dev_priv->counter - 1;
38309 sarea_priv->last_dispatch = (int)hw_status[5];
38310
38311diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
38312index 6e0acad..93c8289 100644
38313--- a/drivers/gpu/drm/i810/i810_drv.h
38314+++ b/drivers/gpu/drm/i810/i810_drv.h
38315@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
38316 int page_flipping;
38317
38318 wait_queue_head_t irq_queue;
38319- atomic_t irq_received;
38320- atomic_t irq_emitted;
38321+ atomic_unchecked_t irq_received;
38322+ atomic_unchecked_t irq_emitted;
38323
38324 int front_offset;
38325 } drm_i810_private_t;
38326diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
38327index e913d32..4d9b351 100644
38328--- a/drivers/gpu/drm/i915/i915_debugfs.c
38329+++ b/drivers/gpu/drm/i915/i915_debugfs.c
38330@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
38331 I915_READ(GTIMR));
38332 }
38333 seq_printf(m, "Interrupts received: %d\n",
38334- atomic_read(&dev_priv->irq_received));
38335+ atomic_read_unchecked(&dev_priv->irq_received));
38336 for_each_ring(ring, dev_priv, i) {
38337 if (IS_GEN6(dev) || IS_GEN7(dev)) {
38338 seq_printf(m,
38339diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
38340index 17d9b0b..860e6d9 100644
38341--- a/drivers/gpu/drm/i915/i915_dma.c
38342+++ b/drivers/gpu/drm/i915/i915_dma.c
38343@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
38344 bool can_switch;
38345
38346 spin_lock(&dev->count_lock);
38347- can_switch = (dev->open_count == 0);
38348+ can_switch = (local_read(&dev->open_count) == 0);
38349 spin_unlock(&dev->count_lock);
38350 return can_switch;
38351 }
38352diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
38353index 47d8b68..52f5d8d 100644
38354--- a/drivers/gpu/drm/i915/i915_drv.h
38355+++ b/drivers/gpu/drm/i915/i915_drv.h
38356@@ -916,7 +916,7 @@ typedef struct drm_i915_private {
38357 drm_dma_handle_t *status_page_dmah;
38358 struct resource mch_res;
38359
38360- atomic_t irq_received;
38361+ atomic_unchecked_t irq_received;
38362
38363 /* protects the irq masks */
38364 spinlock_t irq_lock;
38365@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
38366 struct drm_i915_private *dev_priv, unsigned port);
38367 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
38368 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
38369-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
38370+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
38371 {
38372 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
38373 }
38374diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38375index 117ce38..eefd237 100644
38376--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38377+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38378@@ -727,9 +727,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
38379
38380 static int
38381 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
38382- int count)
38383+ unsigned int count)
38384 {
38385- int i;
38386+ unsigned int i;
38387 int relocs_total = 0;
38388 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
38389
38390diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
38391index 3c59584..500f2e9 100644
38392--- a/drivers/gpu/drm/i915/i915_ioc32.c
38393+++ b/drivers/gpu/drm/i915/i915_ioc32.c
38394@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
38395 (unsigned long)request);
38396 }
38397
38398-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
38399+static drm_ioctl_compat_t i915_compat_ioctls[] = {
38400 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
38401 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
38402 [DRM_I915_GETPARAM] = compat_i915_getparam,
38403@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
38404 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38405 {
38406 unsigned int nr = DRM_IOCTL_NR(cmd);
38407- drm_ioctl_compat_t *fn = NULL;
38408 int ret;
38409
38410 if (nr < DRM_COMMAND_BASE)
38411 return drm_compat_ioctl(filp, cmd, arg);
38412
38413- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
38414- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
38415-
38416- if (fn != NULL)
38417+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
38418+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
38419 ret = (*fn) (filp, cmd, arg);
38420- else
38421+ } else
38422 ret = drm_ioctl(filp, cmd, arg);
38423
38424 return ret;
38425diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
38426index e5e32869..1678f36 100644
38427--- a/drivers/gpu/drm/i915/i915_irq.c
38428+++ b/drivers/gpu/drm/i915/i915_irq.c
38429@@ -670,7 +670,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
38430 int pipe;
38431 u32 pipe_stats[I915_MAX_PIPES];
38432
38433- atomic_inc(&dev_priv->irq_received);
38434+ atomic_inc_unchecked(&dev_priv->irq_received);
38435
38436 while (true) {
38437 iir = I915_READ(VLV_IIR);
38438@@ -835,7 +835,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
38439 irqreturn_t ret = IRQ_NONE;
38440 int i;
38441
38442- atomic_inc(&dev_priv->irq_received);
38443+ atomic_inc_unchecked(&dev_priv->irq_received);
38444
38445 /* disable master interrupt before clearing iir */
38446 de_ier = I915_READ(DEIER);
38447@@ -925,7 +925,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
38448 int ret = IRQ_NONE;
38449 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
38450
38451- atomic_inc(&dev_priv->irq_received);
38452+ atomic_inc_unchecked(&dev_priv->irq_received);
38453
38454 /* disable master interrupt before clearing iir */
38455 de_ier = I915_READ(DEIER);
38456@@ -2089,7 +2089,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
38457 {
38458 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38459
38460- atomic_set(&dev_priv->irq_received, 0);
38461+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38462
38463 I915_WRITE(HWSTAM, 0xeffe);
38464
38465@@ -2124,7 +2124,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
38466 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38467 int pipe;
38468
38469- atomic_set(&dev_priv->irq_received, 0);
38470+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38471
38472 /* VLV magic */
38473 I915_WRITE(VLV_IMR, 0);
38474@@ -2411,7 +2411,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
38475 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38476 int pipe;
38477
38478- atomic_set(&dev_priv->irq_received, 0);
38479+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38480
38481 for_each_pipe(pipe)
38482 I915_WRITE(PIPESTAT(pipe), 0);
38483@@ -2490,7 +2490,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
38484 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
38485 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38486
38487- atomic_inc(&dev_priv->irq_received);
38488+ atomic_inc_unchecked(&dev_priv->irq_received);
38489
38490 iir = I915_READ16(IIR);
38491 if (iir == 0)
38492@@ -2565,7 +2565,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
38493 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38494 int pipe;
38495
38496- atomic_set(&dev_priv->irq_received, 0);
38497+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38498
38499 if (I915_HAS_HOTPLUG(dev)) {
38500 I915_WRITE(PORT_HOTPLUG_EN, 0);
38501@@ -2664,7 +2664,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
38502 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38503 int pipe, ret = IRQ_NONE;
38504
38505- atomic_inc(&dev_priv->irq_received);
38506+ atomic_inc_unchecked(&dev_priv->irq_received);
38507
38508 iir = I915_READ(IIR);
38509 do {
38510@@ -2791,7 +2791,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
38511 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38512 int pipe;
38513
38514- atomic_set(&dev_priv->irq_received, 0);
38515+ atomic_set_unchecked(&dev_priv->irq_received, 0);
38516
38517 I915_WRITE(PORT_HOTPLUG_EN, 0);
38518 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
38519@@ -2898,7 +2898,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
38520 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
38521 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38522
38523- atomic_inc(&dev_priv->irq_received);
38524+ atomic_inc_unchecked(&dev_priv->irq_received);
38525
38526 iir = I915_READ(IIR);
38527
38528diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
38529index eea5982..eeef407 100644
38530--- a/drivers/gpu/drm/i915/intel_display.c
38531+++ b/drivers/gpu/drm/i915/intel_display.c
38532@@ -8935,13 +8935,13 @@ struct intel_quirk {
38533 int subsystem_vendor;
38534 int subsystem_device;
38535 void (*hook)(struct drm_device *dev);
38536-};
38537+} __do_const;
38538
38539 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
38540 struct intel_dmi_quirk {
38541 void (*hook)(struct drm_device *dev);
38542 const struct dmi_system_id (*dmi_id_list)[];
38543-};
38544+} __do_const;
38545
38546 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
38547 {
38548@@ -8949,18 +8949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
38549 return 1;
38550 }
38551
38552-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
38553+static const struct dmi_system_id intel_dmi_quirks_table[] = {
38554 {
38555- .dmi_id_list = &(const struct dmi_system_id[]) {
38556- {
38557- .callback = intel_dmi_reverse_brightness,
38558- .ident = "NCR Corporation",
38559- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
38560- DMI_MATCH(DMI_PRODUCT_NAME, ""),
38561- },
38562- },
38563- { } /* terminating entry */
38564+ .callback = intel_dmi_reverse_brightness,
38565+ .ident = "NCR Corporation",
38566+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
38567+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
38568 },
38569+ },
38570+ { } /* terminating entry */
38571+};
38572+
38573+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
38574+ {
38575+ .dmi_id_list = &intel_dmi_quirks_table,
38576 .hook = quirk_invert_brightness,
38577 },
38578 };
38579diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
38580index 54558a0..2d97005 100644
38581--- a/drivers/gpu/drm/mga/mga_drv.h
38582+++ b/drivers/gpu/drm/mga/mga_drv.h
38583@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
38584 u32 clear_cmd;
38585 u32 maccess;
38586
38587- atomic_t vbl_received; /**< Number of vblanks received. */
38588+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
38589 wait_queue_head_t fence_queue;
38590- atomic_t last_fence_retired;
38591+ atomic_unchecked_t last_fence_retired;
38592 u32 next_fence_to_post;
38593
38594 unsigned int fb_cpp;
38595diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
38596index 709e90d..89a1c0d 100644
38597--- a/drivers/gpu/drm/mga/mga_ioc32.c
38598+++ b/drivers/gpu/drm/mga/mga_ioc32.c
38599@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
38600 return 0;
38601 }
38602
38603-drm_ioctl_compat_t *mga_compat_ioctls[] = {
38604+drm_ioctl_compat_t mga_compat_ioctls[] = {
38605 [DRM_MGA_INIT] = compat_mga_init,
38606 [DRM_MGA_GETPARAM] = compat_mga_getparam,
38607 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
38608@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
38609 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38610 {
38611 unsigned int nr = DRM_IOCTL_NR(cmd);
38612- drm_ioctl_compat_t *fn = NULL;
38613 int ret;
38614
38615 if (nr < DRM_COMMAND_BASE)
38616 return drm_compat_ioctl(filp, cmd, arg);
38617
38618- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
38619- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
38620-
38621- if (fn != NULL)
38622+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
38623+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
38624 ret = (*fn) (filp, cmd, arg);
38625- else
38626+ } else
38627 ret = drm_ioctl(filp, cmd, arg);
38628
38629 return ret;
38630diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
38631index 598c281..60d590e 100644
38632--- a/drivers/gpu/drm/mga/mga_irq.c
38633+++ b/drivers/gpu/drm/mga/mga_irq.c
38634@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
38635 if (crtc != 0)
38636 return 0;
38637
38638- return atomic_read(&dev_priv->vbl_received);
38639+ return atomic_read_unchecked(&dev_priv->vbl_received);
38640 }
38641
38642
38643@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
38644 /* VBLANK interrupt */
38645 if (status & MGA_VLINEPEN) {
38646 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
38647- atomic_inc(&dev_priv->vbl_received);
38648+ atomic_inc_unchecked(&dev_priv->vbl_received);
38649 drm_handle_vblank(dev, 0);
38650 handled = 1;
38651 }
38652@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
38653 if ((prim_start & ~0x03) != (prim_end & ~0x03))
38654 MGA_WRITE(MGA_PRIMEND, prim_end);
38655
38656- atomic_inc(&dev_priv->last_fence_retired);
38657+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
38658 DRM_WAKEUP(&dev_priv->fence_queue);
38659 handled = 1;
38660 }
38661@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
38662 * using fences.
38663 */
38664 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
38665- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
38666+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
38667 - *sequence) <= (1 << 23)));
38668
38669 *sequence = cur_fence;
38670diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
38671index 6aa2137..fe8dc55 100644
38672--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
38673+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
38674@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
38675 struct bit_table {
38676 const char id;
38677 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
38678-};
38679+} __no_const;
38680
38681 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
38682
38683diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
38684index f2b30f8..d0f9a95 100644
38685--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
38686+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
38687@@ -92,7 +92,7 @@ struct nouveau_drm {
38688 struct drm_global_reference mem_global_ref;
38689 struct ttm_bo_global_ref bo_global_ref;
38690 struct ttm_bo_device bdev;
38691- atomic_t validate_sequence;
38692+ atomic_unchecked_t validate_sequence;
38693 int (*move)(struct nouveau_channel *,
38694 struct ttm_buffer_object *,
38695 struct ttm_mem_reg *, struct ttm_mem_reg *);
38696diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
38697index b4b4d0c..b7edc15 100644
38698--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
38699+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
38700@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
38701 int ret, i;
38702 struct nouveau_bo *res_bo = NULL;
38703
38704- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
38705+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
38706 retry:
38707 if (++trycnt > 100000) {
38708 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
38709@@ -359,7 +359,7 @@ retry:
38710 if (ret) {
38711 validate_fini(op, NULL);
38712 if (unlikely(ret == -EAGAIN)) {
38713- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
38714+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
38715 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
38716 sequence);
38717 if (!ret)
38718diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38719index 08214bc..9208577 100644
38720--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38721+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
38722@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
38723 unsigned long arg)
38724 {
38725 unsigned int nr = DRM_IOCTL_NR(cmd);
38726- drm_ioctl_compat_t *fn = NULL;
38727+ drm_ioctl_compat_t fn = NULL;
38728 int ret;
38729
38730 if (nr < DRM_COMMAND_BASE)
38731diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
38732index 25d3495..d81aaf6 100644
38733--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
38734+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
38735@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
38736 bool can_switch;
38737
38738 spin_lock(&dev->count_lock);
38739- can_switch = (dev->open_count == 0);
38740+ can_switch = (local_read(&dev->open_count) == 0);
38741 spin_unlock(&dev->count_lock);
38742 return can_switch;
38743 }
38744diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
38745index 489cb8c..0b8d0d3 100644
38746--- a/drivers/gpu/drm/qxl/qxl_ttm.c
38747+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
38748@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
38749 }
38750 }
38751
38752-static struct vm_operations_struct qxl_ttm_vm_ops;
38753+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
38754 static const struct vm_operations_struct *ttm_vm_ops;
38755
38756 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38757@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
38758 return r;
38759 if (unlikely(ttm_vm_ops == NULL)) {
38760 ttm_vm_ops = vma->vm_ops;
38761+ pax_open_kernel();
38762 qxl_ttm_vm_ops = *ttm_vm_ops;
38763 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
38764+ pax_close_kernel();
38765 }
38766 vma->vm_ops = &qxl_ttm_vm_ops;
38767 return 0;
38768@@ -556,25 +558,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
38769 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
38770 {
38771 #if defined(CONFIG_DEBUG_FS)
38772- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
38773- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
38774- unsigned i;
38775+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
38776+ {
38777+ .name = "qxl_mem_mm",
38778+ .show = &qxl_mm_dump_table,
38779+ },
38780+ {
38781+ .name = "qxl_surf_mm",
38782+ .show = &qxl_mm_dump_table,
38783+ }
38784+ };
38785
38786- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
38787- if (i == 0)
38788- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
38789- else
38790- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
38791- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
38792- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
38793- qxl_mem_types_list[i].driver_features = 0;
38794- if (i == 0)
38795- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
38796- else
38797- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
38798+ pax_open_kernel();
38799+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
38800+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
38801+ pax_close_kernel();
38802
38803- }
38804- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
38805+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
38806 #else
38807 return 0;
38808 #endif
38809diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
38810index d4660cf..70dbe65 100644
38811--- a/drivers/gpu/drm/r128/r128_cce.c
38812+++ b/drivers/gpu/drm/r128/r128_cce.c
38813@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
38814
38815 /* GH: Simple idle check.
38816 */
38817- atomic_set(&dev_priv->idle_count, 0);
38818+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38819
38820 /* We don't support anything other than bus-mastering ring mode,
38821 * but the ring can be in either AGP or PCI space for the ring
38822diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
38823index 930c71b..499aded 100644
38824--- a/drivers/gpu/drm/r128/r128_drv.h
38825+++ b/drivers/gpu/drm/r128/r128_drv.h
38826@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
38827 int is_pci;
38828 unsigned long cce_buffers_offset;
38829
38830- atomic_t idle_count;
38831+ atomic_unchecked_t idle_count;
38832
38833 int page_flipping;
38834 int current_page;
38835 u32 crtc_offset;
38836 u32 crtc_offset_cntl;
38837
38838- atomic_t vbl_received;
38839+ atomic_unchecked_t vbl_received;
38840
38841 u32 color_fmt;
38842 unsigned int front_offset;
38843diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
38844index a954c54..9cc595c 100644
38845--- a/drivers/gpu/drm/r128/r128_ioc32.c
38846+++ b/drivers/gpu/drm/r128/r128_ioc32.c
38847@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
38848 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
38849 }
38850
38851-drm_ioctl_compat_t *r128_compat_ioctls[] = {
38852+drm_ioctl_compat_t r128_compat_ioctls[] = {
38853 [DRM_R128_INIT] = compat_r128_init,
38854 [DRM_R128_DEPTH] = compat_r128_depth,
38855 [DRM_R128_STIPPLE] = compat_r128_stipple,
38856@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
38857 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38858 {
38859 unsigned int nr = DRM_IOCTL_NR(cmd);
38860- drm_ioctl_compat_t *fn = NULL;
38861 int ret;
38862
38863 if (nr < DRM_COMMAND_BASE)
38864 return drm_compat_ioctl(filp, cmd, arg);
38865
38866- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
38867- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38868-
38869- if (fn != NULL)
38870+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
38871+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38872 ret = (*fn) (filp, cmd, arg);
38873- else
38874+ } else
38875 ret = drm_ioctl(filp, cmd, arg);
38876
38877 return ret;
38878diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
38879index 2ea4f09..d391371 100644
38880--- a/drivers/gpu/drm/r128/r128_irq.c
38881+++ b/drivers/gpu/drm/r128/r128_irq.c
38882@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
38883 if (crtc != 0)
38884 return 0;
38885
38886- return atomic_read(&dev_priv->vbl_received);
38887+ return atomic_read_unchecked(&dev_priv->vbl_received);
38888 }
38889
38890 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38891@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38892 /* VBLANK interrupt */
38893 if (status & R128_CRTC_VBLANK_INT) {
38894 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
38895- atomic_inc(&dev_priv->vbl_received);
38896+ atomic_inc_unchecked(&dev_priv->vbl_received);
38897 drm_handle_vblank(dev, 0);
38898 return IRQ_HANDLED;
38899 }
38900diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
38901index 19bb7e6..de7e2a2 100644
38902--- a/drivers/gpu/drm/r128/r128_state.c
38903+++ b/drivers/gpu/drm/r128/r128_state.c
38904@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
38905
38906 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
38907 {
38908- if (atomic_read(&dev_priv->idle_count) == 0)
38909+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
38910 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
38911 else
38912- atomic_set(&dev_priv->idle_count, 0);
38913+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38914 }
38915
38916 #endif
38917diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
38918index 5a82b6b..9e69c73 100644
38919--- a/drivers/gpu/drm/radeon/mkregtable.c
38920+++ b/drivers/gpu/drm/radeon/mkregtable.c
38921@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
38922 regex_t mask_rex;
38923 regmatch_t match[4];
38924 char buf[1024];
38925- size_t end;
38926+ long end;
38927 int len;
38928 int done = 0;
38929 int r;
38930 unsigned o;
38931 struct offset *offset;
38932 char last_reg_s[10];
38933- int last_reg;
38934+ unsigned long last_reg;
38935
38936 if (regcomp
38937 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
38938diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
38939index b0dc0b6..a9bfe9c 100644
38940--- a/drivers/gpu/drm/radeon/radeon_device.c
38941+++ b/drivers/gpu/drm/radeon/radeon_device.c
38942@@ -1014,7 +1014,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
38943 bool can_switch;
38944
38945 spin_lock(&dev->count_lock);
38946- can_switch = (dev->open_count == 0);
38947+ can_switch = (local_read(&dev->open_count) == 0);
38948 spin_unlock(&dev->count_lock);
38949 return can_switch;
38950 }
38951diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
38952index b369d42..8dd04eb 100644
38953--- a/drivers/gpu/drm/radeon/radeon_drv.h
38954+++ b/drivers/gpu/drm/radeon/radeon_drv.h
38955@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
38956
38957 /* SW interrupt */
38958 wait_queue_head_t swi_queue;
38959- atomic_t swi_emitted;
38960+ atomic_unchecked_t swi_emitted;
38961 int vblank_crtc;
38962 uint32_t irq_enable_reg;
38963 uint32_t r500_disp_irq_reg;
38964diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
38965index c180df8..5fd8186 100644
38966--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
38967+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
38968@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38969 request = compat_alloc_user_space(sizeof(*request));
38970 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
38971 || __put_user(req32.param, &request->param)
38972- || __put_user((void __user *)(unsigned long)req32.value,
38973+ || __put_user((unsigned long)req32.value,
38974 &request->value))
38975 return -EFAULT;
38976
38977@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38978 #define compat_radeon_cp_setparam NULL
38979 #endif /* X86_64 || IA64 */
38980
38981-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38982+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
38983 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
38984 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
38985 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
38986@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38987 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38988 {
38989 unsigned int nr = DRM_IOCTL_NR(cmd);
38990- drm_ioctl_compat_t *fn = NULL;
38991 int ret;
38992
38993 if (nr < DRM_COMMAND_BASE)
38994 return drm_compat_ioctl(filp, cmd, arg);
38995
38996- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
38997- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
38998-
38999- if (fn != NULL)
39000+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
39001+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
39002 ret = (*fn) (filp, cmd, arg);
39003- else
39004+ } else
39005 ret = drm_ioctl(filp, cmd, arg);
39006
39007 return ret;
39008diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
39009index 8d68e97..9dcfed8 100644
39010--- a/drivers/gpu/drm/radeon/radeon_irq.c
39011+++ b/drivers/gpu/drm/radeon/radeon_irq.c
39012@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
39013 unsigned int ret;
39014 RING_LOCALS;
39015
39016- atomic_inc(&dev_priv->swi_emitted);
39017- ret = atomic_read(&dev_priv->swi_emitted);
39018+ atomic_inc_unchecked(&dev_priv->swi_emitted);
39019+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
39020
39021 BEGIN_RING(4);
39022 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
39023@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
39024 drm_radeon_private_t *dev_priv =
39025 (drm_radeon_private_t *) dev->dev_private;
39026
39027- atomic_set(&dev_priv->swi_emitted, 0);
39028+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
39029 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
39030
39031 dev->max_vblank_count = 0x001fffff;
39032diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
39033index 4d20910..6726b6d 100644
39034--- a/drivers/gpu/drm/radeon/radeon_state.c
39035+++ b/drivers/gpu/drm/radeon/radeon_state.c
39036@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
39037 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
39038 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
39039
39040- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39041+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39042 sarea_priv->nbox * sizeof(depth_boxes[0])))
39043 return -EFAULT;
39044
39045@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
39046 {
39047 drm_radeon_private_t *dev_priv = dev->dev_private;
39048 drm_radeon_getparam_t *param = data;
39049- int value;
39050+ int value = 0;
39051
39052 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
39053
39054diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
39055index 6c0ce89..57a2529 100644
39056--- a/drivers/gpu/drm/radeon/radeon_ttm.c
39057+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
39058@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
39059 man->size = size >> PAGE_SHIFT;
39060 }
39061
39062-static struct vm_operations_struct radeon_ttm_vm_ops;
39063+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
39064 static const struct vm_operations_struct *ttm_vm_ops = NULL;
39065
39066 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39067@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
39068 }
39069 if (unlikely(ttm_vm_ops == NULL)) {
39070 ttm_vm_ops = vma->vm_ops;
39071+ pax_open_kernel();
39072 radeon_ttm_vm_ops = *ttm_vm_ops;
39073 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
39074+ pax_close_kernel();
39075 }
39076 vma->vm_ops = &radeon_ttm_vm_ops;
39077 return 0;
39078@@ -853,38 +855,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
39079 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
39080 {
39081 #if defined(CONFIG_DEBUG_FS)
39082- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
39083- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
39084+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
39085+ {
39086+ .name = "radeon_vram_mm",
39087+ .show = &radeon_mm_dump_table,
39088+ },
39089+ {
39090+ .name = "radeon_gtt_mm",
39091+ .show = &radeon_mm_dump_table,
39092+ },
39093+ {
39094+ .name = "ttm_page_pool",
39095+ .show = &ttm_page_alloc_debugfs,
39096+ },
39097+ {
39098+ .name = "ttm_dma_page_pool",
39099+ .show = &ttm_dma_page_alloc_debugfs,
39100+ },
39101+ };
39102 unsigned i;
39103
39104- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
39105- if (i == 0)
39106- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
39107- else
39108- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
39109- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39110- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
39111- radeon_mem_types_list[i].driver_features = 0;
39112- if (i == 0)
39113- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39114- else
39115- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39116-
39117- }
39118- /* Add ttm page pool to debugfs */
39119- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
39120- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39121- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
39122- radeon_mem_types_list[i].driver_features = 0;
39123- radeon_mem_types_list[i++].data = NULL;
39124+ pax_open_kernel();
39125+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39126+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39127+ pax_close_kernel();
39128 #ifdef CONFIG_SWIOTLB
39129- if (swiotlb_nr_tbl()) {
39130- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
39131- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39132- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
39133- radeon_mem_types_list[i].driver_features = 0;
39134- radeon_mem_types_list[i++].data = NULL;
39135- }
39136+ if (swiotlb_nr_tbl())
39137+ i++;
39138 #endif
39139 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
39140
39141diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
39142index 55880d5..9e95342 100644
39143--- a/drivers/gpu/drm/radeon/rs690.c
39144+++ b/drivers/gpu/drm/radeon/rs690.c
39145@@ -327,9 +327,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
39146 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
39147 rdev->pm.sideport_bandwidth.full)
39148 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
39149- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
39150+ read_delay_latency.full = dfixed_const(800 * 1000);
39151 read_delay_latency.full = dfixed_div(read_delay_latency,
39152 rdev->pm.igp_sideport_mclk);
39153+ a.full = dfixed_const(370);
39154+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
39155 } else {
39156 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
39157 rdev->pm.k8_bandwidth.full)
39158diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
39159index dbc2def..0a9f710 100644
39160--- a/drivers/gpu/drm/ttm/ttm_memory.c
39161+++ b/drivers/gpu/drm/ttm/ttm_memory.c
39162@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
39163 zone->glob = glob;
39164 glob->zone_kernel = zone;
39165 ret = kobject_init_and_add(
39166- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39167+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39168 if (unlikely(ret != 0)) {
39169 kobject_put(&zone->kobj);
39170 return ret;
39171@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
39172 zone->glob = glob;
39173 glob->zone_dma32 = zone;
39174 ret = kobject_init_and_add(
39175- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39176+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39177 if (unlikely(ret != 0)) {
39178 kobject_put(&zone->kobj);
39179 return ret;
39180diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39181index bd2a3b4..122d9ad 100644
39182--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
39183+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39184@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
39185 static int ttm_pool_mm_shrink(struct shrinker *shrink,
39186 struct shrink_control *sc)
39187 {
39188- static atomic_t start_pool = ATOMIC_INIT(0);
39189+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
39190 unsigned i;
39191- unsigned pool_offset = atomic_add_return(1, &start_pool);
39192+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
39193 struct ttm_page_pool *pool;
39194 int shrink_pages = sc->nr_to_scan;
39195
39196diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
39197index dc0c065..58a0782 100644
39198--- a/drivers/gpu/drm/udl/udl_fb.c
39199+++ b/drivers/gpu/drm/udl/udl_fb.c
39200@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
39201 fb_deferred_io_cleanup(info);
39202 kfree(info->fbdefio);
39203 info->fbdefio = NULL;
39204- info->fbops->fb_mmap = udl_fb_mmap;
39205 }
39206
39207 pr_warn("released /dev/fb%d user=%d count=%d\n",
39208diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
39209index 893a650..6190d3b 100644
39210--- a/drivers/gpu/drm/via/via_drv.h
39211+++ b/drivers/gpu/drm/via/via_drv.h
39212@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
39213 typedef uint32_t maskarray_t[5];
39214
39215 typedef struct drm_via_irq {
39216- atomic_t irq_received;
39217+ atomic_unchecked_t irq_received;
39218 uint32_t pending_mask;
39219 uint32_t enable_mask;
39220 wait_queue_head_t irq_queue;
39221@@ -75,7 +75,7 @@ typedef struct drm_via_private {
39222 struct timeval last_vblank;
39223 int last_vblank_valid;
39224 unsigned usec_per_vblank;
39225- atomic_t vbl_received;
39226+ atomic_unchecked_t vbl_received;
39227 drm_via_state_t hc_state;
39228 char pci_buf[VIA_PCI_BUF_SIZE];
39229 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
39230diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
39231index ac98964..5dbf512 100644
39232--- a/drivers/gpu/drm/via/via_irq.c
39233+++ b/drivers/gpu/drm/via/via_irq.c
39234@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
39235 if (crtc != 0)
39236 return 0;
39237
39238- return atomic_read(&dev_priv->vbl_received);
39239+ return atomic_read_unchecked(&dev_priv->vbl_received);
39240 }
39241
39242 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39243@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39244
39245 status = VIA_READ(VIA_REG_INTERRUPT);
39246 if (status & VIA_IRQ_VBLANK_PENDING) {
39247- atomic_inc(&dev_priv->vbl_received);
39248- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
39249+ atomic_inc_unchecked(&dev_priv->vbl_received);
39250+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
39251 do_gettimeofday(&cur_vblank);
39252 if (dev_priv->last_vblank_valid) {
39253 dev_priv->usec_per_vblank =
39254@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39255 dev_priv->last_vblank = cur_vblank;
39256 dev_priv->last_vblank_valid = 1;
39257 }
39258- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
39259+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
39260 DRM_DEBUG("US per vblank is: %u\n",
39261 dev_priv->usec_per_vblank);
39262 }
39263@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39264
39265 for (i = 0; i < dev_priv->num_irqs; ++i) {
39266 if (status & cur_irq->pending_mask) {
39267- atomic_inc(&cur_irq->irq_received);
39268+ atomic_inc_unchecked(&cur_irq->irq_received);
39269 DRM_WAKEUP(&cur_irq->irq_queue);
39270 handled = 1;
39271 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
39272@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
39273 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39274 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
39275 masks[irq][4]));
39276- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
39277+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
39278 } else {
39279 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39280 (((cur_irq_sequence =
39281- atomic_read(&cur_irq->irq_received)) -
39282+ atomic_read_unchecked(&cur_irq->irq_received)) -
39283 *sequence) <= (1 << 23)));
39284 }
39285 *sequence = cur_irq_sequence;
39286@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
39287 }
39288
39289 for (i = 0; i < dev_priv->num_irqs; ++i) {
39290- atomic_set(&cur_irq->irq_received, 0);
39291+ atomic_set_unchecked(&cur_irq->irq_received, 0);
39292 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
39293 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
39294 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
39295@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
39296 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
39297 case VIA_IRQ_RELATIVE:
39298 irqwait->request.sequence +=
39299- atomic_read(&cur_irq->irq_received);
39300+ atomic_read_unchecked(&cur_irq->irq_received);
39301 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
39302 case VIA_IRQ_ABSOLUTE:
39303 break;
39304diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39305index 13aeda7..4a952d1 100644
39306--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39307+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39308@@ -290,7 +290,7 @@ struct vmw_private {
39309 * Fencing and IRQs.
39310 */
39311
39312- atomic_t marker_seq;
39313+ atomic_unchecked_t marker_seq;
39314 wait_queue_head_t fence_queue;
39315 wait_queue_head_t fifo_queue;
39316 int fence_queue_waiters; /* Protected by hw_mutex */
39317diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39318index 3eb1486..0a47ee9 100644
39319--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39320+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
39321@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
39322 (unsigned int) min,
39323 (unsigned int) fifo->capabilities);
39324
39325- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
39326+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
39327 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
39328 vmw_marker_queue_init(&fifo->marker_queue);
39329 return vmw_fifo_send_fence(dev_priv, &dummy);
39330@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
39331 if (reserveable)
39332 iowrite32(bytes, fifo_mem +
39333 SVGA_FIFO_RESERVED);
39334- return fifo_mem + (next_cmd >> 2);
39335+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
39336 } else {
39337 need_bounce = true;
39338 }
39339@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
39340
39341 fm = vmw_fifo_reserve(dev_priv, bytes);
39342 if (unlikely(fm == NULL)) {
39343- *seqno = atomic_read(&dev_priv->marker_seq);
39344+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
39345 ret = -ENOMEM;
39346 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
39347 false, 3*HZ);
39348@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
39349 }
39350
39351 do {
39352- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
39353+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
39354 } while (*seqno == 0);
39355
39356 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
39357diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39358index c509d40..3b640c3 100644
39359--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39360+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
39361@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
39362 int ret;
39363
39364 num_clips = arg->num_clips;
39365- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
39366+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
39367
39368 if (unlikely(num_clips == 0))
39369 return 0;
39370@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
39371 int ret;
39372
39373 num_clips = arg->num_clips;
39374- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
39375+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
39376
39377 if (unlikely(num_clips == 0))
39378 return 0;
39379diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39380index 4640adb..e1384ed 100644
39381--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39382+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
39383@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
39384 * emitted. Then the fence is stale and signaled.
39385 */
39386
39387- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
39388+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
39389 > VMW_FENCE_WRAP);
39390
39391 return ret;
39392@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
39393
39394 if (fifo_idle)
39395 down_read(&fifo_state->rwsem);
39396- signal_seq = atomic_read(&dev_priv->marker_seq);
39397+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
39398 ret = 0;
39399
39400 for (;;) {
39401diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39402index 8a8725c2..afed796 100644
39403--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39404+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
39405@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
39406 while (!vmw_lag_lt(queue, us)) {
39407 spin_lock(&queue->lock);
39408 if (list_empty(&queue->head))
39409- seqno = atomic_read(&dev_priv->marker_seq);
39410+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
39411 else {
39412 marker = list_first_entry(&queue->head,
39413 struct vmw_marker, head);
39414diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
39415index 8c04943..4370ed9 100644
39416--- a/drivers/gpu/host1x/drm/dc.c
39417+++ b/drivers/gpu/host1x/drm/dc.c
39418@@ -999,7 +999,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
39419 }
39420
39421 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
39422- dc->debugfs_files[i].data = dc;
39423+ *(void **)&dc->debugfs_files[i].data = dc;
39424
39425 err = drm_debugfs_create_files(dc->debugfs_files,
39426 ARRAY_SIZE(debugfs_files),
39427diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
39428index 402f486..5340852 100644
39429--- a/drivers/hid/hid-core.c
39430+++ b/drivers/hid/hid-core.c
39431@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
39432 struct hid_report_enum *report_enum = device->report_enum + type;
39433 struct hid_report *report;
39434
39435+ if (id >= HID_MAX_IDS)
39436+ return NULL;
39437 if (report_enum->report_id_hash[id])
39438 return report_enum->report_id_hash[id];
39439
39440@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
39441
39442 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
39443 parser->global.report_id = item_udata(item);
39444- if (parser->global.report_id == 0) {
39445- hid_err(parser->device, "report_id 0 is invalid\n");
39446+ if (parser->global.report_id == 0 ||
39447+ parser->global.report_id >= HID_MAX_IDS) {
39448+ hid_err(parser->device, "report_id %u is invalid\n",
39449+ parser->global.report_id);
39450 return -1;
39451 }
39452 return 0;
39453@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
39454 for (i = 0; i < HID_REPORT_TYPES; i++) {
39455 struct hid_report_enum *report_enum = device->report_enum + i;
39456
39457- for (j = 0; j < 256; j++) {
39458+ for (j = 0; j < HID_MAX_IDS; j++) {
39459 struct hid_report *report = report_enum->report_id_hash[j];
39460 if (report)
39461 hid_free_report(report);
39462@@ -755,6 +759,56 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
39463 }
39464 EXPORT_SYMBOL_GPL(hid_parse_report);
39465
39466+static const char * const hid_report_names[] = {
39467+ "HID_INPUT_REPORT",
39468+ "HID_OUTPUT_REPORT",
39469+ "HID_FEATURE_REPORT",
39470+};
39471+/**
39472+ * hid_validate_report - validate existing device report
39473+ *
39474+ * @device: hid device
39475+ * @type: which report type to examine
39476+ * @id: which report ID to examine (0 for first)
39477+ * @fields: expected number of fields
39478+ * @report_counts: expected number of values per field
39479+ *
39480+ * Validate the report details after parsing.
39481+ */
39482+struct hid_report *hid_validate_report(struct hid_device *hid,
39483+ unsigned int type, unsigned int id,
39484+ unsigned int fields,
39485+ unsigned int report_counts)
39486+{
39487+ struct hid_report *report;
39488+ unsigned int i;
39489+
39490+ if (type > HID_FEATURE_REPORT) {
39491+ hid_err(hid, "invalid HID report %u\n", type);
39492+ return NULL;
39493+ }
39494+
39495+ report = hid->report_enum[type].report_id_hash[id];
39496+ if (!report) {
39497+ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
39498+ return NULL;
39499+ }
39500+ if (report->maxfield < fields) {
39501+ hid_err(hid, "not enough fields in %s %u\n",
39502+ hid_report_names[type], id);
39503+ return NULL;
39504+ }
39505+ for (i = 0; i < fields; i++) {
39506+ if (report->field[i]->report_count < report_counts) {
39507+ hid_err(hid, "not enough values in %s %u fields\n",
39508+ hid_report_names[type], id);
39509+ return NULL;
39510+ }
39511+ }
39512+ return report;
39513+}
39514+EXPORT_SYMBOL_GPL(hid_validate_report);
39515+
39516 /**
39517 * hid_open_report - open a driver-specific device report
39518 *
39519@@ -1152,7 +1206,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
39520
39521 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
39522 {
39523- unsigned size = field->report_size;
39524+ unsigned size;
39525+
39526+ if (!field)
39527+ return -1;
39528+
39529+ size = field->report_size;
39530
39531 hid_dump_input(field->report->device, field->usage + offset, value);
39532
39533@@ -2275,7 +2334,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
39534
39535 int hid_add_device(struct hid_device *hdev)
39536 {
39537- static atomic_t id = ATOMIC_INIT(0);
39538+ static atomic_unchecked_t id = ATOMIC_INIT(0);
39539 int ret;
39540
39541 if (WARN_ON(hdev->status & HID_STAT_ADDED))
39542@@ -2309,7 +2368,7 @@ int hid_add_device(struct hid_device *hdev)
39543 /* XXX hack, any other cleaner solution after the driver core
39544 * is converted to allow more than 20 bytes as the device name? */
39545 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
39546- hdev->vendor, hdev->product, atomic_inc_return(&id));
39547+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
39548
39549 hid_debug_register(hdev, dev_name(&hdev->dev));
39550 ret = device_add(&hdev->dev);
39551diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
39552index 07837f5..b697ada 100644
39553--- a/drivers/hid/hid-lenovo-tpkbd.c
39554+++ b/drivers/hid/hid-lenovo-tpkbd.c
39555@@ -341,6 +341,11 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
39556 char *name_mute, *name_micmute;
39557 int ret;
39558
39559+ /* Validate required reports. */
39560+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, 4, 4, 1) ||
39561+ !hid_validate_report(hdev, HID_OUTPUT_REPORT, 3, 1, 2))
39562+ return -ENODEV;
39563+
39564 if (sysfs_create_group(&hdev->dev.kobj,
39565 &tpkbd_attr_group_pointer)) {
39566 hid_warn(hdev, "Could not create sysfs group\n");
39567diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
39568index b3cd150..9805197 100644
39569--- a/drivers/hid/hid-lg2ff.c
39570+++ b/drivers/hid/hid-lg2ff.c
39571@@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
39572 struct hid_report *report;
39573 struct hid_input *hidinput = list_entry(hid->inputs.next,
39574 struct hid_input, list);
39575- struct list_head *report_list =
39576- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39577 struct input_dev *dev = hidinput->input;
39578 int error;
39579
39580- if (list_empty(report_list)) {
39581- hid_err(hid, "no output report found\n");
39582+ /* Check that the report looks ok */
39583+ report = hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7);
39584+ if (!report)
39585 return -ENODEV;
39586- }
39587-
39588- report = list_entry(report_list->next, struct hid_report, list);
39589-
39590- if (report->maxfield < 1) {
39591- hid_err(hid, "output report is empty\n");
39592- return -ENODEV;
39593- }
39594- if (report->field[0]->report_count < 7) {
39595- hid_err(hid, "not enough values in the field\n");
39596- return -ENODEV;
39597- }
39598
39599 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
39600 if (!lg2ff)
39601diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
39602index e52f181..53ac79b 100644
39603--- a/drivers/hid/hid-lg3ff.c
39604+++ b/drivers/hid/hid-lg3ff.c
39605@@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
39606 int x, y;
39607
39608 /*
39609- * Maxusage should always be 63 (maximum fields)
39610- * likely a better way to ensure this data is clean
39611+ * Available values in the field should always be 63, but we only use up to
39612+ * 35. Instead, clear the entire area, however big it is.
39613 */
39614- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
39615+ memset(report->field[0]->value, 0,
39616+ sizeof(__s32) * report->field[0]->report_count);
39617
39618 switch (effect->type) {
39619 case FF_CONSTANT:
39620@@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
39621 int lg3ff_init(struct hid_device *hid)
39622 {
39623 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39624- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39625 struct input_dev *dev = hidinput->input;
39626- struct hid_report *report;
39627- struct hid_field *field;
39628 const signed short *ff_bits = ff3_joystick_ac;
39629 int error;
39630 int i;
39631
39632- /* Find the report to use */
39633- if (list_empty(report_list)) {
39634- hid_err(hid, "No output report found\n");
39635- return -1;
39636- }
39637-
39638 /* Check that the report looks ok */
39639- report = list_entry(report_list->next, struct hid_report, list);
39640- if (!report) {
39641- hid_err(hid, "NULL output report\n");
39642- return -1;
39643- }
39644-
39645- field = report->field[0];
39646- if (!field) {
39647- hid_err(hid, "NULL field\n");
39648- return -1;
39649- }
39650+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 35))
39651+ return -ENODEV;
39652
39653 /* Assume single fixed device G940 */
39654 for (i = 0; ff_bits[i] >= 0; i++)
39655diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
39656index 0ddae2a..8b89f0f 100644
39657--- a/drivers/hid/hid-lg4ff.c
39658+++ b/drivers/hid/hid-lg4ff.c
39659@@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
39660 int lg4ff_init(struct hid_device *hid)
39661 {
39662 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39663- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39664 struct input_dev *dev = hidinput->input;
39665- struct hid_report *report;
39666- struct hid_field *field;
39667 struct lg4ff_device_entry *entry;
39668 struct lg_drv_data *drv_data;
39669 struct usb_device_descriptor *udesc;
39670 int error, i, j;
39671 __u16 bcdDevice, rev_maj, rev_min;
39672
39673- /* Find the report to use */
39674- if (list_empty(report_list)) {
39675- hid_err(hid, "No output report found\n");
39676- return -1;
39677- }
39678-
39679 /* Check that the report looks ok */
39680- report = list_entry(report_list->next, struct hid_report, list);
39681- if (!report) {
39682- hid_err(hid, "NULL output report\n");
39683+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7))
39684 return -1;
39685- }
39686-
39687- field = report->field[0];
39688- if (!field) {
39689- hid_err(hid, "NULL field\n");
39690- return -1;
39691- }
39692
39693 /* Check what wheel has been connected */
39694 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
39695diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
39696index d7ea8c8..a84fb40 100644
39697--- a/drivers/hid/hid-lgff.c
39698+++ b/drivers/hid/hid-lgff.c
39699@@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
39700 int lgff_init(struct hid_device* hid)
39701 {
39702 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
39703- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39704 struct input_dev *dev = hidinput->input;
39705- struct hid_report *report;
39706- struct hid_field *field;
39707 const signed short *ff_bits = ff_joystick;
39708 int error;
39709 int i;
39710
39711- /* Find the report to use */
39712- if (list_empty(report_list)) {
39713- hid_err(hid, "No output report found\n");
39714- return -1;
39715- }
39716-
39717 /* Check that the report looks ok */
39718- report = list_entry(report_list->next, struct hid_report, list);
39719- field = report->field[0];
39720- if (!field) {
39721- hid_err(hid, "NULL field\n");
39722- return -1;
39723- }
39724+ if (!hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 1, 7))
39725+ return -ENODEV;
39726
39727 for (i = 0; i < ARRAY_SIZE(devices); i++) {
39728 if (dev->id.vendor == devices[i].idVendor &&
39729diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
39730index 5207591a..6c9197f 100644
39731--- a/drivers/hid/hid-logitech-dj.c
39732+++ b/drivers/hid/hid-logitech-dj.c
39733@@ -421,7 +421,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
39734 struct hid_report *report;
39735 struct hid_report_enum *output_report_enum;
39736 u8 *data = (u8 *)(&dj_report->device_index);
39737- int i;
39738+ unsigned int i, length;
39739
39740 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
39741 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
39742@@ -431,7 +431,9 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
39743 return -ENODEV;
39744 }
39745
39746- for (i = 0; i < report->field[0]->report_count; i++)
39747+ length = min_t(size_t, sizeof(*dj_report) - 1,
39748+ report->field[0]->report_count);
39749+ for (i = 0; i < length; i++)
39750 report->field[0]->value[i] = data[i];
39751
39752 hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
39753@@ -738,6 +740,12 @@ static int logi_dj_probe(struct hid_device *hdev,
39754 goto hid_parse_fail;
39755 }
39756
39757+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
39758+ 1, 3)) {
39759+ retval = -ENODEV;
39760+ goto hid_parse_fail;
39761+ }
39762+
39763 /* Starts the usb device and connects to upper interfaces hiddev and
39764 * hidraw */
39765 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
39766diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
39767index d39a5ce..4892dfc 100644
39768--- a/drivers/hid/hid-multitouch.c
39769+++ b/drivers/hid/hid-multitouch.c
39770@@ -330,9 +330,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
39771 break;
39772 }
39773 }
39774+ /* Ignore if value index is out of bounds. */
39775+ if (td->inputmode_index < 0 ||
39776+ td->inputmode_index >= field->report_count) {
39777+ dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
39778+ td->inputmode = -1;
39779+ }
39780
39781 break;
39782 case HID_DG_CONTACTMAX:
39783+ /* Ignore if value count is out of bounds. */
39784+ if (field->report_count < 1)
39785+ break;
39786 td->maxcontact_report_id = field->report->id;
39787 td->maxcontacts = field->value[0];
39788 if (!td->maxcontacts &&
39789@@ -743,15 +752,21 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
39790 unsigned count;
39791 int r, n;
39792
39793+ if (report->maxfield == 0)
39794+ return;
39795+
39796 /*
39797 * Includes multi-packet support where subsequent
39798 * packets are sent with zero contactcount.
39799 */
39800- if (td->cc_index >= 0) {
39801- struct hid_field *field = report->field[td->cc_index];
39802- int value = field->value[td->cc_value_index];
39803- if (value)
39804- td->num_expected = value;
39805+ if (td->cc_index >= 0 && td->cc_index < report->maxfield) {
39806+ field = report->field[td->cc_index];
39807+ if (td->cc_value_index >= 0 &&
39808+ td->cc_value_index < field->report_count) {
39809+ int value = field->value[td->cc_value_index];
39810+ if (value)
39811+ td->num_expected = value;
39812+ }
39813 }
39814
39815 for (r = 0; r < report->maxfield; r++) {
39816diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
39817index ef95102..5482156 100644
39818--- a/drivers/hid/hid-ntrig.c
39819+++ b/drivers/hid/hid-ntrig.c
39820@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
39821 struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
39822 report_id_hash[0x0d];
39823
39824- if (!report)
39825+ if (!report || report->maxfield < 1 ||
39826+ report->field[0]->report_count < 1)
39827 return -EINVAL;
39828
39829 hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
39830diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
39831index b48092d..72bba1e 100644
39832--- a/drivers/hid/hid-picolcd_core.c
39833+++ b/drivers/hid/hid-picolcd_core.c
39834@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
39835 buf += 10;
39836 cnt -= 10;
39837 }
39838- if (!report)
39839+ if (!report || report->maxfield < 1)
39840 return -EINVAL;
39841
39842 while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
39843diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
39844index d29112f..2dcd7d9 100644
39845--- a/drivers/hid/hid-pl.c
39846+++ b/drivers/hid/hid-pl.c
39847@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
39848 strong = &report->field[0]->value[2];
39849 weak = &report->field[0]->value[3];
39850 debug("detected single-field device");
39851- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
39852- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
39853+ } else if (report->field[0]->maxusage == 1 &&
39854+ report->field[0]->usage[0].hid ==
39855+ (HID_UP_LED | 0x43) &&
39856+ report->maxfield >= 4 &&
39857+ report->field[0]->report_count >= 1 &&
39858+ report->field[1]->report_count >= 1 &&
39859+ report->field[2]->report_count >= 1 &&
39860+ report->field[3]->report_count >= 1) {
39861 report->field[0]->value[0] = 0x00;
39862 report->field[1]->value[0] = 0x00;
39863 strong = &report->field[2]->value[0];
39864diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
39865index ca749810..aa34755 100644
39866--- a/drivers/hid/hid-sensor-hub.c
39867+++ b/drivers/hid/hid-sensor-hub.c
39868@@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
39869
39870 mutex_lock(&data->mutex);
39871 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
39872- if (!report || (field_index >= report->maxfield)) {
39873+ if (!report || (field_index >= report->maxfield) ||
39874+ report->field[field_index]->report_count < 1) {
39875 ret = -EINVAL;
39876 goto done_proc;
39877 }
39878diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
39879index d164911..ef42e86 100644
39880--- a/drivers/hid/hid-steelseries.c
39881+++ b/drivers/hid/hid-steelseries.c
39882@@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
39883 goto err_free;
39884 }
39885
39886+ if (!hid_validate_report(hdev, HID_OUTPUT_REPORT, 0, 1, 16)) {
39887+ ret = -ENODEV;
39888+ goto err_free;
39889+ }
39890+
39891 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
39892 if (ret) {
39893 hid_err(hdev, "hw start failed\n");
39894diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
39895index 90124ff..3761764 100644
39896--- a/drivers/hid/hid-wiimote-debug.c
39897+++ b/drivers/hid/hid-wiimote-debug.c
39898@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
39899 else if (size == 0)
39900 return -EIO;
39901
39902- if (copy_to_user(u, buf, size))
39903+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
39904 return -EFAULT;
39905
39906 *off += size;
39907diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
39908index 6ec28a3..b124991 100644
39909--- a/drivers/hid/hid-zpff.c
39910+++ b/drivers/hid/hid-zpff.c
39911@@ -68,22 +68,12 @@ static int zpff_init(struct hid_device *hid)
39912 struct hid_report *report;
39913 struct hid_input *hidinput = list_entry(hid->inputs.next,
39914 struct hid_input, list);
39915- struct list_head *report_list =
39916- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
39917 struct input_dev *dev = hidinput->input;
39918 int error;
39919
39920- if (list_empty(report_list)) {
39921- hid_err(hid, "no output report found\n");
39922+ report = hid_validate_report(hid, HID_OUTPUT_REPORT, 0, 4, 1);
39923+ if (!report)
39924 return -ENODEV;
39925- }
39926-
39927- report = list_entry(report_list->next, struct hid_report, list);
39928-
39929- if (report->maxfield < 4) {
39930- hid_err(hid, "not enough fields in report\n");
39931- return -ENODEV;
39932- }
39933
39934 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
39935 if (!zpff)
39936diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
39937index fc307e0..2b255e8 100644
39938--- a/drivers/hid/uhid.c
39939+++ b/drivers/hid/uhid.c
39940@@ -47,7 +47,7 @@ struct uhid_device {
39941 struct mutex report_lock;
39942 wait_queue_head_t report_wait;
39943 atomic_t report_done;
39944- atomic_t report_id;
39945+ atomic_unchecked_t report_id;
39946 struct uhid_event report_buf;
39947 };
39948
39949@@ -187,7 +187,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
39950
39951 spin_lock_irqsave(&uhid->qlock, flags);
39952 ev->type = UHID_FEATURE;
39953- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
39954+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
39955 ev->u.feature.rnum = rnum;
39956 ev->u.feature.rtype = report_type;
39957
39958@@ -471,7 +471,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
39959 spin_lock_irqsave(&uhid->qlock, flags);
39960
39961 /* id for old report; drop it silently */
39962- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
39963+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
39964 goto unlock;
39965 if (atomic_read(&uhid->report_done))
39966 goto unlock;
39967diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
39968index 0b122f8..b1d8160 100644
39969--- a/drivers/hv/channel.c
39970+++ b/drivers/hv/channel.c
39971@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
39972 int ret = 0;
39973 int t;
39974
39975- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
39976- atomic_inc(&vmbus_connection.next_gpadl_handle);
39977+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
39978+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
39979
39980 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
39981 if (ret)
39982diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
39983index ae49237..380d4c9 100644
39984--- a/drivers/hv/hv.c
39985+++ b/drivers/hv/hv.c
39986@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
39987 u64 output_address = (output) ? virt_to_phys(output) : 0;
39988 u32 output_address_hi = output_address >> 32;
39989 u32 output_address_lo = output_address & 0xFFFFFFFF;
39990- void *hypercall_page = hv_context.hypercall_page;
39991+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
39992
39993 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
39994 "=a"(hv_status_lo) : "d" (control_hi),
39995diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
39996index 12f2f9e..679603c 100644
39997--- a/drivers/hv/hyperv_vmbus.h
39998+++ b/drivers/hv/hyperv_vmbus.h
39999@@ -591,7 +591,7 @@ enum vmbus_connect_state {
40000 struct vmbus_connection {
40001 enum vmbus_connect_state conn_state;
40002
40003- atomic_t next_gpadl_handle;
40004+ atomic_unchecked_t next_gpadl_handle;
40005
40006 /*
40007 * Represents channel interrupts. Each bit position represents a
40008diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
40009index 4004e54..c2de226 100644
40010--- a/drivers/hv/vmbus_drv.c
40011+++ b/drivers/hv/vmbus_drv.c
40012@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
40013 {
40014 int ret = 0;
40015
40016- static atomic_t device_num = ATOMIC_INIT(0);
40017+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
40018
40019 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
40020- atomic_inc_return(&device_num));
40021+ atomic_inc_return_unchecked(&device_num));
40022
40023 child_device_obj->device.bus = &hv_bus;
40024 child_device_obj->device.parent = &hv_acpi_dev->dev;
40025diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
40026index 6351aba..dc4aaf4 100644
40027--- a/drivers/hwmon/acpi_power_meter.c
40028+++ b/drivers/hwmon/acpi_power_meter.c
40029@@ -117,7 +117,7 @@ struct sensor_template {
40030 struct device_attribute *devattr,
40031 const char *buf, size_t count);
40032 int index;
40033-};
40034+} __do_const;
40035
40036 /* Averaging interval */
40037 static int update_avg_interval(struct acpi_power_meter_resource *resource)
40038@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
40039 struct sensor_template *attrs)
40040 {
40041 struct device *dev = &resource->acpi_dev->dev;
40042- struct sensor_device_attribute *sensors =
40043+ sensor_device_attribute_no_const *sensors =
40044 &resource->sensors[resource->num_sensors];
40045 int res = 0;
40046
40047diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
40048index 62c2e32..8f2859a 100644
40049--- a/drivers/hwmon/applesmc.c
40050+++ b/drivers/hwmon/applesmc.c
40051@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
40052 {
40053 struct applesmc_node_group *grp;
40054 struct applesmc_dev_attr *node;
40055- struct attribute *attr;
40056+ attribute_no_const *attr;
40057 int ret, i;
40058
40059 for (grp = groups; grp->format; grp++) {
40060diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
40061index b25c643..a13460d 100644
40062--- a/drivers/hwmon/asus_atk0110.c
40063+++ b/drivers/hwmon/asus_atk0110.c
40064@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
40065 struct atk_sensor_data {
40066 struct list_head list;
40067 struct atk_data *data;
40068- struct device_attribute label_attr;
40069- struct device_attribute input_attr;
40070- struct device_attribute limit1_attr;
40071- struct device_attribute limit2_attr;
40072+ device_attribute_no_const label_attr;
40073+ device_attribute_no_const input_attr;
40074+ device_attribute_no_const limit1_attr;
40075+ device_attribute_no_const limit2_attr;
40076 char label_attr_name[ATTR_NAME_SIZE];
40077 char input_attr_name[ATTR_NAME_SIZE];
40078 char limit1_attr_name[ATTR_NAME_SIZE];
40079@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
40080 static struct device_attribute atk_name_attr =
40081 __ATTR(name, 0444, atk_name_show, NULL);
40082
40083-static void atk_init_attribute(struct device_attribute *attr, char *name,
40084+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
40085 sysfs_show_func show)
40086 {
40087 sysfs_attr_init(&attr->attr);
40088diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
40089index 658ce3a..0d0c2f3 100644
40090--- a/drivers/hwmon/coretemp.c
40091+++ b/drivers/hwmon/coretemp.c
40092@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
40093 return NOTIFY_OK;
40094 }
40095
40096-static struct notifier_block coretemp_cpu_notifier __refdata = {
40097+static struct notifier_block coretemp_cpu_notifier = {
40098 .notifier_call = coretemp_cpu_callback,
40099 };
40100
40101diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
40102index 1429f6e..ee03d59 100644
40103--- a/drivers/hwmon/ibmaem.c
40104+++ b/drivers/hwmon/ibmaem.c
40105@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
40106 struct aem_rw_sensor_template *rw)
40107 {
40108 struct device *dev = &data->pdev->dev;
40109- struct sensor_device_attribute *sensors = data->sensors;
40110+ sensor_device_attribute_no_const *sensors = data->sensors;
40111 int err;
40112
40113 /* Set up read-only sensors */
40114diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
40115index 52b77af..aed1ddf 100644
40116--- a/drivers/hwmon/iio_hwmon.c
40117+++ b/drivers/hwmon/iio_hwmon.c
40118@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
40119 {
40120 struct device *dev = &pdev->dev;
40121 struct iio_hwmon_state *st;
40122- struct sensor_device_attribute *a;
40123+ sensor_device_attribute_no_const *a;
40124 int ret, i;
40125 int in_i = 1, temp_i = 1, curr_i = 1;
40126 enum iio_chan_type type;
40127diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
40128index 9add6092..ee7ba3f 100644
40129--- a/drivers/hwmon/pmbus/pmbus_core.c
40130+++ b/drivers/hwmon/pmbus/pmbus_core.c
40131@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
40132 return 0;
40133 }
40134
40135-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40136+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
40137 const char *name,
40138 umode_t mode,
40139 ssize_t (*show)(struct device *dev,
40140@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40141 dev_attr->store = store;
40142 }
40143
40144-static void pmbus_attr_init(struct sensor_device_attribute *a,
40145+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
40146 const char *name,
40147 umode_t mode,
40148 ssize_t (*show)(struct device *dev,
40149@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
40150 u16 reg, u8 mask)
40151 {
40152 struct pmbus_boolean *boolean;
40153- struct sensor_device_attribute *a;
40154+ sensor_device_attribute_no_const *a;
40155
40156 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
40157 if (!boolean)
40158@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
40159 bool update, bool readonly)
40160 {
40161 struct pmbus_sensor *sensor;
40162- struct device_attribute *a;
40163+ device_attribute_no_const *a;
40164
40165 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
40166 if (!sensor)
40167@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
40168 const char *lstring, int index)
40169 {
40170 struct pmbus_label *label;
40171- struct device_attribute *a;
40172+ device_attribute_no_const *a;
40173
40174 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
40175 if (!label)
40176diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
40177index 2507f90..1645765 100644
40178--- a/drivers/hwmon/sht15.c
40179+++ b/drivers/hwmon/sht15.c
40180@@ -169,7 +169,7 @@ struct sht15_data {
40181 int supply_uv;
40182 bool supply_uv_valid;
40183 struct work_struct update_supply_work;
40184- atomic_t interrupt_handled;
40185+ atomic_unchecked_t interrupt_handled;
40186 };
40187
40188 /**
40189@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
40190 ret = gpio_direction_input(data->pdata->gpio_data);
40191 if (ret)
40192 return ret;
40193- atomic_set(&data->interrupt_handled, 0);
40194+ atomic_set_unchecked(&data->interrupt_handled, 0);
40195
40196 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40197 if (gpio_get_value(data->pdata->gpio_data) == 0) {
40198 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
40199 /* Only relevant if the interrupt hasn't occurred. */
40200- if (!atomic_read(&data->interrupt_handled))
40201+ if (!atomic_read_unchecked(&data->interrupt_handled))
40202 schedule_work(&data->read_work);
40203 }
40204 ret = wait_event_timeout(data->wait_queue,
40205@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
40206
40207 /* First disable the interrupt */
40208 disable_irq_nosync(irq);
40209- atomic_inc(&data->interrupt_handled);
40210+ atomic_inc_unchecked(&data->interrupt_handled);
40211 /* Then schedule a reading work struct */
40212 if (data->state != SHT15_READING_NOTHING)
40213 schedule_work(&data->read_work);
40214@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
40215 * If not, then start the interrupt again - care here as could
40216 * have gone low in meantime so verify it hasn't!
40217 */
40218- atomic_set(&data->interrupt_handled, 0);
40219+ atomic_set_unchecked(&data->interrupt_handled, 0);
40220 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40221 /* If still not occurred or another handler was scheduled */
40222 if (gpio_get_value(data->pdata->gpio_data)
40223- || atomic_read(&data->interrupt_handled))
40224+ || atomic_read_unchecked(&data->interrupt_handled))
40225 return;
40226 }
40227
40228diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
40229index 76f157b..9c0db1b 100644
40230--- a/drivers/hwmon/via-cputemp.c
40231+++ b/drivers/hwmon/via-cputemp.c
40232@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
40233 return NOTIFY_OK;
40234 }
40235
40236-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
40237+static struct notifier_block via_cputemp_cpu_notifier = {
40238 .notifier_call = via_cputemp_cpu_callback,
40239 };
40240
40241diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
40242index 07f01ac..d79ad3d 100644
40243--- a/drivers/i2c/busses/i2c-amd756-s4882.c
40244+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
40245@@ -43,7 +43,7 @@
40246 extern struct i2c_adapter amd756_smbus;
40247
40248 static struct i2c_adapter *s4882_adapter;
40249-static struct i2c_algorithm *s4882_algo;
40250+static i2c_algorithm_no_const *s4882_algo;
40251
40252 /* Wrapper access functions for multiplexed SMBus */
40253 static DEFINE_MUTEX(amd756_lock);
40254diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
40255index 2ca268d..c6acbdf 100644
40256--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
40257+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
40258@@ -41,7 +41,7 @@
40259 extern struct i2c_adapter *nforce2_smbus;
40260
40261 static struct i2c_adapter *s4985_adapter;
40262-static struct i2c_algorithm *s4985_algo;
40263+static i2c_algorithm_no_const *s4985_algo;
40264
40265 /* Wrapper access functions for multiplexed SMBus */
40266 static DEFINE_MUTEX(nforce2_lock);
40267diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
40268index c3ccdea..5b3dc1a 100644
40269--- a/drivers/i2c/i2c-dev.c
40270+++ b/drivers/i2c/i2c-dev.c
40271@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
40272 break;
40273 }
40274
40275- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
40276+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
40277 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
40278 if (IS_ERR(rdwr_pa[i].buf)) {
40279 res = PTR_ERR(rdwr_pa[i].buf);
40280diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
40281index 2ff6204..218c16e 100644
40282--- a/drivers/ide/ide-cd.c
40283+++ b/drivers/ide/ide-cd.c
40284@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
40285 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
40286 if ((unsigned long)buf & alignment
40287 || blk_rq_bytes(rq) & q->dma_pad_mask
40288- || object_is_on_stack(buf))
40289+ || object_starts_on_stack(buf))
40290 drive->dma = 0;
40291 }
40292 }
40293diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
40294index e145931..08bfc59 100644
40295--- a/drivers/iio/industrialio-core.c
40296+++ b/drivers/iio/industrialio-core.c
40297@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
40298 }
40299
40300 static
40301-int __iio_device_attr_init(struct device_attribute *dev_attr,
40302+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
40303 const char *postfix,
40304 struct iio_chan_spec const *chan,
40305 ssize_t (*readfunc)(struct device *dev,
40306diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
40307index 784b97c..c9ceadf 100644
40308--- a/drivers/infiniband/core/cm.c
40309+++ b/drivers/infiniband/core/cm.c
40310@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
40311
40312 struct cm_counter_group {
40313 struct kobject obj;
40314- atomic_long_t counter[CM_ATTR_COUNT];
40315+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
40316 };
40317
40318 struct cm_counter_attribute {
40319@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
40320 struct ib_mad_send_buf *msg = NULL;
40321 int ret;
40322
40323- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40324+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40325 counter[CM_REQ_COUNTER]);
40326
40327 /* Quick state check to discard duplicate REQs. */
40328@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
40329 if (!cm_id_priv)
40330 return;
40331
40332- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40333+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40334 counter[CM_REP_COUNTER]);
40335 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
40336 if (ret)
40337@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
40338 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
40339 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
40340 spin_unlock_irq(&cm_id_priv->lock);
40341- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40342+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40343 counter[CM_RTU_COUNTER]);
40344 goto out;
40345 }
40346@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
40347 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
40348 dreq_msg->local_comm_id);
40349 if (!cm_id_priv) {
40350- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40351+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40352 counter[CM_DREQ_COUNTER]);
40353 cm_issue_drep(work->port, work->mad_recv_wc);
40354 return -EINVAL;
40355@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
40356 case IB_CM_MRA_REP_RCVD:
40357 break;
40358 case IB_CM_TIMEWAIT:
40359- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40360+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40361 counter[CM_DREQ_COUNTER]);
40362 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40363 goto unlock;
40364@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
40365 cm_free_msg(msg);
40366 goto deref;
40367 case IB_CM_DREQ_RCVD:
40368- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40369+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40370 counter[CM_DREQ_COUNTER]);
40371 goto unlock;
40372 default:
40373@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
40374 ib_modify_mad(cm_id_priv->av.port->mad_agent,
40375 cm_id_priv->msg, timeout)) {
40376 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
40377- atomic_long_inc(&work->port->
40378+ atomic_long_inc_unchecked(&work->port->
40379 counter_group[CM_RECV_DUPLICATES].
40380 counter[CM_MRA_COUNTER]);
40381 goto out;
40382@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
40383 break;
40384 case IB_CM_MRA_REQ_RCVD:
40385 case IB_CM_MRA_REP_RCVD:
40386- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40387+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40388 counter[CM_MRA_COUNTER]);
40389 /* fall through */
40390 default:
40391@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
40392 case IB_CM_LAP_IDLE:
40393 break;
40394 case IB_CM_MRA_LAP_SENT:
40395- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40396+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40397 counter[CM_LAP_COUNTER]);
40398 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40399 goto unlock;
40400@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
40401 cm_free_msg(msg);
40402 goto deref;
40403 case IB_CM_LAP_RCVD:
40404- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40405+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40406 counter[CM_LAP_COUNTER]);
40407 goto unlock;
40408 default:
40409@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
40410 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
40411 if (cur_cm_id_priv) {
40412 spin_unlock_irq(&cm.lock);
40413- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40414+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40415 counter[CM_SIDR_REQ_COUNTER]);
40416 goto out; /* Duplicate message. */
40417 }
40418@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
40419 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
40420 msg->retries = 1;
40421
40422- atomic_long_add(1 + msg->retries,
40423+ atomic_long_add_unchecked(1 + msg->retries,
40424 &port->counter_group[CM_XMIT].counter[attr_index]);
40425 if (msg->retries)
40426- atomic_long_add(msg->retries,
40427+ atomic_long_add_unchecked(msg->retries,
40428 &port->counter_group[CM_XMIT_RETRIES].
40429 counter[attr_index]);
40430
40431@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
40432 }
40433
40434 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
40435- atomic_long_inc(&port->counter_group[CM_RECV].
40436+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
40437 counter[attr_id - CM_ATTR_ID_OFFSET]);
40438
40439 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
40440@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
40441 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
40442
40443 return sprintf(buf, "%ld\n",
40444- atomic_long_read(&group->counter[cm_attr->index]));
40445+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
40446 }
40447
40448 static const struct sysfs_ops cm_counter_ops = {
40449diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
40450index 9f5ad7c..588cd84 100644
40451--- a/drivers/infiniband/core/fmr_pool.c
40452+++ b/drivers/infiniband/core/fmr_pool.c
40453@@ -98,8 +98,8 @@ struct ib_fmr_pool {
40454
40455 struct task_struct *thread;
40456
40457- atomic_t req_ser;
40458- atomic_t flush_ser;
40459+ atomic_unchecked_t req_ser;
40460+ atomic_unchecked_t flush_ser;
40461
40462 wait_queue_head_t force_wait;
40463 };
40464@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40465 struct ib_fmr_pool *pool = pool_ptr;
40466
40467 do {
40468- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
40469+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
40470 ib_fmr_batch_release(pool);
40471
40472- atomic_inc(&pool->flush_ser);
40473+ atomic_inc_unchecked(&pool->flush_ser);
40474 wake_up_interruptible(&pool->force_wait);
40475
40476 if (pool->flush_function)
40477@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40478 }
40479
40480 set_current_state(TASK_INTERRUPTIBLE);
40481- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
40482+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
40483 !kthread_should_stop())
40484 schedule();
40485 __set_current_state(TASK_RUNNING);
40486@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
40487 pool->dirty_watermark = params->dirty_watermark;
40488 pool->dirty_len = 0;
40489 spin_lock_init(&pool->pool_lock);
40490- atomic_set(&pool->req_ser, 0);
40491- atomic_set(&pool->flush_ser, 0);
40492+ atomic_set_unchecked(&pool->req_ser, 0);
40493+ atomic_set_unchecked(&pool->flush_ser, 0);
40494 init_waitqueue_head(&pool->force_wait);
40495
40496 pool->thread = kthread_run(ib_fmr_cleanup_thread,
40497@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
40498 }
40499 spin_unlock_irq(&pool->pool_lock);
40500
40501- serial = atomic_inc_return(&pool->req_ser);
40502+ serial = atomic_inc_return_unchecked(&pool->req_ser);
40503 wake_up_process(pool->thread);
40504
40505 if (wait_event_interruptible(pool->force_wait,
40506- atomic_read(&pool->flush_ser) - serial >= 0))
40507+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
40508 return -EINTR;
40509
40510 return 0;
40511@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
40512 } else {
40513 list_add_tail(&fmr->list, &pool->dirty_list);
40514 if (++pool->dirty_len >= pool->dirty_watermark) {
40515- atomic_inc(&pool->req_ser);
40516+ atomic_inc_unchecked(&pool->req_ser);
40517 wake_up_process(pool->thread);
40518 }
40519 }
40520diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
40521index 4cb8eb2..146bf60 100644
40522--- a/drivers/infiniband/hw/cxgb4/mem.c
40523+++ b/drivers/infiniband/hw/cxgb4/mem.c
40524@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40525 int err;
40526 struct fw_ri_tpte tpt;
40527 u32 stag_idx;
40528- static atomic_t key;
40529+ static atomic_unchecked_t key;
40530
40531 if (c4iw_fatal_error(rdev))
40532 return -EIO;
40533@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40534 if (rdev->stats.stag.cur > rdev->stats.stag.max)
40535 rdev->stats.stag.max = rdev->stats.stag.cur;
40536 mutex_unlock(&rdev->stats.lock);
40537- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
40538+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
40539 }
40540 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
40541 __func__, stag_state, type, pdid, stag_idx);
40542diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
40543index 79b3dbc..96e5fcc 100644
40544--- a/drivers/infiniband/hw/ipath/ipath_rc.c
40545+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
40546@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40547 struct ib_atomic_eth *ateth;
40548 struct ipath_ack_entry *e;
40549 u64 vaddr;
40550- atomic64_t *maddr;
40551+ atomic64_unchecked_t *maddr;
40552 u64 sdata;
40553 u32 rkey;
40554 u8 next;
40555@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40556 IB_ACCESS_REMOTE_ATOMIC)))
40557 goto nack_acc_unlck;
40558 /* Perform atomic OP and save result. */
40559- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40560+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40561 sdata = be64_to_cpu(ateth->swap_data);
40562 e = &qp->s_ack_queue[qp->r_head_ack_queue];
40563 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
40564- (u64) atomic64_add_return(sdata, maddr) - sdata :
40565+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40566 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40567 be64_to_cpu(ateth->compare_data),
40568 sdata);
40569diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
40570index 1f95bba..9530f87 100644
40571--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
40572+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
40573@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
40574 unsigned long flags;
40575 struct ib_wc wc;
40576 u64 sdata;
40577- atomic64_t *maddr;
40578+ atomic64_unchecked_t *maddr;
40579 enum ib_wc_status send_status;
40580
40581 /*
40582@@ -382,11 +382,11 @@ again:
40583 IB_ACCESS_REMOTE_ATOMIC)))
40584 goto acc_err;
40585 /* Perform atomic OP and save result. */
40586- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40587+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40588 sdata = wqe->wr.wr.atomic.compare_add;
40589 *(u64 *) sqp->s_sge.sge.vaddr =
40590 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
40591- (u64) atomic64_add_return(sdata, maddr) - sdata :
40592+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40593 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40594 sdata, wqe->wr.wr.atomic.swap);
40595 goto send_comp;
40596diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
40597index 9d3e5c1..d9afe4a 100644
40598--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
40599+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
40600@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
40601 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
40602 }
40603
40604-int mthca_QUERY_FW(struct mthca_dev *dev)
40605+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
40606 {
40607 struct mthca_mailbox *mailbox;
40608 u32 *outbox;
40609diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
40610index ed9a989..e0c5871 100644
40611--- a/drivers/infiniband/hw/mthca/mthca_mr.c
40612+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
40613@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
40614 return key;
40615 }
40616
40617-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
40618+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
40619 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
40620 {
40621 struct mthca_mailbox *mailbox;
40622diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
40623index 4291410..d2ab1fb 100644
40624--- a/drivers/infiniband/hw/nes/nes.c
40625+++ b/drivers/infiniband/hw/nes/nes.c
40626@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
40627 LIST_HEAD(nes_adapter_list);
40628 static LIST_HEAD(nes_dev_list);
40629
40630-atomic_t qps_destroyed;
40631+atomic_unchecked_t qps_destroyed;
40632
40633 static unsigned int ee_flsh_adapter;
40634 static unsigned int sysfs_nonidx_addr;
40635@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
40636 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
40637 struct nes_adapter *nesadapter = nesdev->nesadapter;
40638
40639- atomic_inc(&qps_destroyed);
40640+ atomic_inc_unchecked(&qps_destroyed);
40641
40642 /* Free the control structures */
40643
40644diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
40645index 33cc589..3bd6538 100644
40646--- a/drivers/infiniband/hw/nes/nes.h
40647+++ b/drivers/infiniband/hw/nes/nes.h
40648@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
40649 extern unsigned int wqm_quanta;
40650 extern struct list_head nes_adapter_list;
40651
40652-extern atomic_t cm_connects;
40653-extern atomic_t cm_accepts;
40654-extern atomic_t cm_disconnects;
40655-extern atomic_t cm_closes;
40656-extern atomic_t cm_connecteds;
40657-extern atomic_t cm_connect_reqs;
40658-extern atomic_t cm_rejects;
40659-extern atomic_t mod_qp_timouts;
40660-extern atomic_t qps_created;
40661-extern atomic_t qps_destroyed;
40662-extern atomic_t sw_qps_destroyed;
40663+extern atomic_unchecked_t cm_connects;
40664+extern atomic_unchecked_t cm_accepts;
40665+extern atomic_unchecked_t cm_disconnects;
40666+extern atomic_unchecked_t cm_closes;
40667+extern atomic_unchecked_t cm_connecteds;
40668+extern atomic_unchecked_t cm_connect_reqs;
40669+extern atomic_unchecked_t cm_rejects;
40670+extern atomic_unchecked_t mod_qp_timouts;
40671+extern atomic_unchecked_t qps_created;
40672+extern atomic_unchecked_t qps_destroyed;
40673+extern atomic_unchecked_t sw_qps_destroyed;
40674 extern u32 mh_detected;
40675 extern u32 mh_pauses_sent;
40676 extern u32 cm_packets_sent;
40677@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
40678 extern u32 cm_packets_received;
40679 extern u32 cm_packets_dropped;
40680 extern u32 cm_packets_retrans;
40681-extern atomic_t cm_listens_created;
40682-extern atomic_t cm_listens_destroyed;
40683+extern atomic_unchecked_t cm_listens_created;
40684+extern atomic_unchecked_t cm_listens_destroyed;
40685 extern u32 cm_backlog_drops;
40686-extern atomic_t cm_loopbacks;
40687-extern atomic_t cm_nodes_created;
40688-extern atomic_t cm_nodes_destroyed;
40689-extern atomic_t cm_accel_dropped_pkts;
40690-extern atomic_t cm_resets_recvd;
40691-extern atomic_t pau_qps_created;
40692-extern atomic_t pau_qps_destroyed;
40693+extern atomic_unchecked_t cm_loopbacks;
40694+extern atomic_unchecked_t cm_nodes_created;
40695+extern atomic_unchecked_t cm_nodes_destroyed;
40696+extern atomic_unchecked_t cm_accel_dropped_pkts;
40697+extern atomic_unchecked_t cm_resets_recvd;
40698+extern atomic_unchecked_t pau_qps_created;
40699+extern atomic_unchecked_t pau_qps_destroyed;
40700
40701 extern u32 int_mod_timer_init;
40702 extern u32 int_mod_cq_depth_256;
40703diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
40704index 24b9f1a..00fd004 100644
40705--- a/drivers/infiniband/hw/nes/nes_cm.c
40706+++ b/drivers/infiniband/hw/nes/nes_cm.c
40707@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
40708 u32 cm_packets_retrans;
40709 u32 cm_packets_created;
40710 u32 cm_packets_received;
40711-atomic_t cm_listens_created;
40712-atomic_t cm_listens_destroyed;
40713+atomic_unchecked_t cm_listens_created;
40714+atomic_unchecked_t cm_listens_destroyed;
40715 u32 cm_backlog_drops;
40716-atomic_t cm_loopbacks;
40717-atomic_t cm_nodes_created;
40718-atomic_t cm_nodes_destroyed;
40719-atomic_t cm_accel_dropped_pkts;
40720-atomic_t cm_resets_recvd;
40721+atomic_unchecked_t cm_loopbacks;
40722+atomic_unchecked_t cm_nodes_created;
40723+atomic_unchecked_t cm_nodes_destroyed;
40724+atomic_unchecked_t cm_accel_dropped_pkts;
40725+atomic_unchecked_t cm_resets_recvd;
40726
40727 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
40728 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
40729@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
40730
40731 static struct nes_cm_core *g_cm_core;
40732
40733-atomic_t cm_connects;
40734-atomic_t cm_accepts;
40735-atomic_t cm_disconnects;
40736-atomic_t cm_closes;
40737-atomic_t cm_connecteds;
40738-atomic_t cm_connect_reqs;
40739-atomic_t cm_rejects;
40740+atomic_unchecked_t cm_connects;
40741+atomic_unchecked_t cm_accepts;
40742+atomic_unchecked_t cm_disconnects;
40743+atomic_unchecked_t cm_closes;
40744+atomic_unchecked_t cm_connecteds;
40745+atomic_unchecked_t cm_connect_reqs;
40746+atomic_unchecked_t cm_rejects;
40747
40748 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
40749 {
40750@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
40751 kfree(listener);
40752 listener = NULL;
40753 ret = 0;
40754- atomic_inc(&cm_listens_destroyed);
40755+ atomic_inc_unchecked(&cm_listens_destroyed);
40756 } else {
40757 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
40758 }
40759@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
40760 cm_node->rem_mac);
40761
40762 add_hte_node(cm_core, cm_node);
40763- atomic_inc(&cm_nodes_created);
40764+ atomic_inc_unchecked(&cm_nodes_created);
40765
40766 return cm_node;
40767 }
40768@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
40769 }
40770
40771 atomic_dec(&cm_core->node_cnt);
40772- atomic_inc(&cm_nodes_destroyed);
40773+ atomic_inc_unchecked(&cm_nodes_destroyed);
40774 nesqp = cm_node->nesqp;
40775 if (nesqp) {
40776 nesqp->cm_node = NULL;
40777@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
40778
40779 static void drop_packet(struct sk_buff *skb)
40780 {
40781- atomic_inc(&cm_accel_dropped_pkts);
40782+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
40783 dev_kfree_skb_any(skb);
40784 }
40785
40786@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
40787 {
40788
40789 int reset = 0; /* whether to send reset in case of err.. */
40790- atomic_inc(&cm_resets_recvd);
40791+ atomic_inc_unchecked(&cm_resets_recvd);
40792 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
40793 " refcnt=%d\n", cm_node, cm_node->state,
40794 atomic_read(&cm_node->ref_count));
40795@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
40796 rem_ref_cm_node(cm_node->cm_core, cm_node);
40797 return NULL;
40798 }
40799- atomic_inc(&cm_loopbacks);
40800+ atomic_inc_unchecked(&cm_loopbacks);
40801 loopbackremotenode->loopbackpartner = cm_node;
40802 loopbackremotenode->tcp_cntxt.rcv_wscale =
40803 NES_CM_DEFAULT_RCV_WND_SCALE;
40804@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
40805 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
40806 else {
40807 rem_ref_cm_node(cm_core, cm_node);
40808- atomic_inc(&cm_accel_dropped_pkts);
40809+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
40810 dev_kfree_skb_any(skb);
40811 }
40812 break;
40813@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
40814
40815 if ((cm_id) && (cm_id->event_handler)) {
40816 if (issue_disconn) {
40817- atomic_inc(&cm_disconnects);
40818+ atomic_inc_unchecked(&cm_disconnects);
40819 cm_event.event = IW_CM_EVENT_DISCONNECT;
40820 cm_event.status = disconn_status;
40821 cm_event.local_addr = cm_id->local_addr;
40822@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
40823 }
40824
40825 if (issue_close) {
40826- atomic_inc(&cm_closes);
40827+ atomic_inc_unchecked(&cm_closes);
40828 nes_disconnect(nesqp, 1);
40829
40830 cm_id->provider_data = nesqp;
40831@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
40832
40833 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
40834 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
40835- atomic_inc(&cm_accepts);
40836+ atomic_inc_unchecked(&cm_accepts);
40837
40838 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
40839 netdev_refcnt_read(nesvnic->netdev));
40840@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
40841 struct nes_cm_core *cm_core;
40842 u8 *start_buff;
40843
40844- atomic_inc(&cm_rejects);
40845+ atomic_inc_unchecked(&cm_rejects);
40846 cm_node = (struct nes_cm_node *)cm_id->provider_data;
40847 loopback = cm_node->loopbackpartner;
40848 cm_core = cm_node->cm_core;
40849@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
40850 ntohl(cm_id->local_addr.sin_addr.s_addr),
40851 ntohs(cm_id->local_addr.sin_port));
40852
40853- atomic_inc(&cm_connects);
40854+ atomic_inc_unchecked(&cm_connects);
40855 nesqp->active_conn = 1;
40856
40857 /* cache the cm_id in the qp */
40858@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
40859 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
40860 return err;
40861 }
40862- atomic_inc(&cm_listens_created);
40863+ atomic_inc_unchecked(&cm_listens_created);
40864 }
40865
40866 cm_id->add_ref(cm_id);
40867@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
40868
40869 if (nesqp->destroyed)
40870 return;
40871- atomic_inc(&cm_connecteds);
40872+ atomic_inc_unchecked(&cm_connecteds);
40873 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
40874 " local port 0x%04X. jiffies = %lu.\n",
40875 nesqp->hwqp.qp_id,
40876@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
40877
40878 cm_id->add_ref(cm_id);
40879 ret = cm_id->event_handler(cm_id, &cm_event);
40880- atomic_inc(&cm_closes);
40881+ atomic_inc_unchecked(&cm_closes);
40882 cm_event.event = IW_CM_EVENT_CLOSE;
40883 cm_event.status = 0;
40884 cm_event.provider_data = cm_id->provider_data;
40885@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
40886 return;
40887 cm_id = cm_node->cm_id;
40888
40889- atomic_inc(&cm_connect_reqs);
40890+ atomic_inc_unchecked(&cm_connect_reqs);
40891 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
40892 cm_node, cm_id, jiffies);
40893
40894@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
40895 return;
40896 cm_id = cm_node->cm_id;
40897
40898- atomic_inc(&cm_connect_reqs);
40899+ atomic_inc_unchecked(&cm_connect_reqs);
40900 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
40901 cm_node, cm_id, jiffies);
40902
40903diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
40904index 4166452..fc952c3 100644
40905--- a/drivers/infiniband/hw/nes/nes_mgt.c
40906+++ b/drivers/infiniband/hw/nes/nes_mgt.c
40907@@ -40,8 +40,8 @@
40908 #include "nes.h"
40909 #include "nes_mgt.h"
40910
40911-atomic_t pau_qps_created;
40912-atomic_t pau_qps_destroyed;
40913+atomic_unchecked_t pau_qps_created;
40914+atomic_unchecked_t pau_qps_destroyed;
40915
40916 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
40917 {
40918@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
40919 {
40920 struct sk_buff *skb;
40921 unsigned long flags;
40922- atomic_inc(&pau_qps_destroyed);
40923+ atomic_inc_unchecked(&pau_qps_destroyed);
40924
40925 /* Free packets that have not yet been forwarded */
40926 /* Lock is acquired by skb_dequeue when removing the skb */
40927@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
40928 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
40929 skb_queue_head_init(&nesqp->pau_list);
40930 spin_lock_init(&nesqp->pau_lock);
40931- atomic_inc(&pau_qps_created);
40932+ atomic_inc_unchecked(&pau_qps_created);
40933 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
40934 }
40935
40936diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
40937index 49eb511..a774366 100644
40938--- a/drivers/infiniband/hw/nes/nes_nic.c
40939+++ b/drivers/infiniband/hw/nes/nes_nic.c
40940@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
40941 target_stat_values[++index] = mh_detected;
40942 target_stat_values[++index] = mh_pauses_sent;
40943 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
40944- target_stat_values[++index] = atomic_read(&cm_connects);
40945- target_stat_values[++index] = atomic_read(&cm_accepts);
40946- target_stat_values[++index] = atomic_read(&cm_disconnects);
40947- target_stat_values[++index] = atomic_read(&cm_connecteds);
40948- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
40949- target_stat_values[++index] = atomic_read(&cm_rejects);
40950- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
40951- target_stat_values[++index] = atomic_read(&qps_created);
40952- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
40953- target_stat_values[++index] = atomic_read(&qps_destroyed);
40954- target_stat_values[++index] = atomic_read(&cm_closes);
40955+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
40956+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
40957+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
40958+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
40959+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
40960+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
40961+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
40962+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
40963+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
40964+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
40965+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
40966 target_stat_values[++index] = cm_packets_sent;
40967 target_stat_values[++index] = cm_packets_bounced;
40968 target_stat_values[++index] = cm_packets_created;
40969 target_stat_values[++index] = cm_packets_received;
40970 target_stat_values[++index] = cm_packets_dropped;
40971 target_stat_values[++index] = cm_packets_retrans;
40972- target_stat_values[++index] = atomic_read(&cm_listens_created);
40973- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
40974+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
40975+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
40976 target_stat_values[++index] = cm_backlog_drops;
40977- target_stat_values[++index] = atomic_read(&cm_loopbacks);
40978- target_stat_values[++index] = atomic_read(&cm_nodes_created);
40979- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
40980- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
40981- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
40982+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
40983+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
40984+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
40985+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
40986+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
40987 target_stat_values[++index] = nesadapter->free_4kpbl;
40988 target_stat_values[++index] = nesadapter->free_256pbl;
40989 target_stat_values[++index] = int_mod_timer_init;
40990 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
40991 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
40992 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
40993- target_stat_values[++index] = atomic_read(&pau_qps_created);
40994- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
40995+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
40996+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
40997 }
40998
40999 /**
41000diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
41001index 8f67fe2..8960859 100644
41002--- a/drivers/infiniband/hw/nes/nes_verbs.c
41003+++ b/drivers/infiniband/hw/nes/nes_verbs.c
41004@@ -46,9 +46,9 @@
41005
41006 #include <rdma/ib_umem.h>
41007
41008-atomic_t mod_qp_timouts;
41009-atomic_t qps_created;
41010-atomic_t sw_qps_destroyed;
41011+atomic_unchecked_t mod_qp_timouts;
41012+atomic_unchecked_t qps_created;
41013+atomic_unchecked_t sw_qps_destroyed;
41014
41015 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
41016
41017@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
41018 if (init_attr->create_flags)
41019 return ERR_PTR(-EINVAL);
41020
41021- atomic_inc(&qps_created);
41022+ atomic_inc_unchecked(&qps_created);
41023 switch (init_attr->qp_type) {
41024 case IB_QPT_RC:
41025 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
41026@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
41027 struct iw_cm_event cm_event;
41028 int ret = 0;
41029
41030- atomic_inc(&sw_qps_destroyed);
41031+ atomic_inc_unchecked(&sw_qps_destroyed);
41032 nesqp->destroyed = 1;
41033
41034 /* Blow away the connection if it exists. */
41035diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
41036index 4d11575..3e890e5 100644
41037--- a/drivers/infiniband/hw/qib/qib.h
41038+++ b/drivers/infiniband/hw/qib/qib.h
41039@@ -51,6 +51,7 @@
41040 #include <linux/completion.h>
41041 #include <linux/kref.h>
41042 #include <linux/sched.h>
41043+#include <linux/slab.h>
41044
41045 #include "qib_common.h"
41046 #include "qib_verbs.h"
41047diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
41048index da739d9..da1c7f4 100644
41049--- a/drivers/input/gameport/gameport.c
41050+++ b/drivers/input/gameport/gameport.c
41051@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
41052 */
41053 static void gameport_init_port(struct gameport *gameport)
41054 {
41055- static atomic_t gameport_no = ATOMIC_INIT(0);
41056+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
41057
41058 __module_get(THIS_MODULE);
41059
41060 mutex_init(&gameport->drv_mutex);
41061 device_initialize(&gameport->dev);
41062 dev_set_name(&gameport->dev, "gameport%lu",
41063- (unsigned long)atomic_inc_return(&gameport_no) - 1);
41064+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
41065 gameport->dev.bus = &gameport_bus;
41066 gameport->dev.release = gameport_release_port;
41067 if (gameport->parent)
41068diff --git a/drivers/input/input.c b/drivers/input/input.c
41069index c044699..174d71a 100644
41070--- a/drivers/input/input.c
41071+++ b/drivers/input/input.c
41072@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
41073 */
41074 int input_register_device(struct input_dev *dev)
41075 {
41076- static atomic_t input_no = ATOMIC_INIT(0);
41077+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
41078 struct input_devres *devres = NULL;
41079 struct input_handler *handler;
41080 unsigned int packet_size;
41081@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
41082 dev->setkeycode = input_default_setkeycode;
41083
41084 dev_set_name(&dev->dev, "input%ld",
41085- (unsigned long) atomic_inc_return(&input_no) - 1);
41086+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
41087
41088 error = device_add(&dev->dev);
41089 if (error)
41090diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
41091index 04c69af..5f92d00 100644
41092--- a/drivers/input/joystick/sidewinder.c
41093+++ b/drivers/input/joystick/sidewinder.c
41094@@ -30,6 +30,7 @@
41095 #include <linux/kernel.h>
41096 #include <linux/module.h>
41097 #include <linux/slab.h>
41098+#include <linux/sched.h>
41099 #include <linux/init.h>
41100 #include <linux/input.h>
41101 #include <linux/gameport.h>
41102diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
41103index fa061d4..4a6957c 100644
41104--- a/drivers/input/joystick/xpad.c
41105+++ b/drivers/input/joystick/xpad.c
41106@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
41107
41108 static int xpad_led_probe(struct usb_xpad *xpad)
41109 {
41110- static atomic_t led_seq = ATOMIC_INIT(0);
41111+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
41112 long led_no;
41113 struct xpad_led *led;
41114 struct led_classdev *led_cdev;
41115@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
41116 if (!led)
41117 return -ENOMEM;
41118
41119- led_no = (long)atomic_inc_return(&led_seq) - 1;
41120+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
41121
41122 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
41123 led->xpad = xpad;
41124diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
41125index 2f0b39d..7370f13 100644
41126--- a/drivers/input/mouse/psmouse.h
41127+++ b/drivers/input/mouse/psmouse.h
41128@@ -116,7 +116,7 @@ struct psmouse_attribute {
41129 ssize_t (*set)(struct psmouse *psmouse, void *data,
41130 const char *buf, size_t count);
41131 bool protect;
41132-};
41133+} __do_const;
41134 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
41135
41136 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
41137diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
41138index 4c842c3..590b0bf 100644
41139--- a/drivers/input/mousedev.c
41140+++ b/drivers/input/mousedev.c
41141@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
41142
41143 spin_unlock_irq(&client->packet_lock);
41144
41145- if (copy_to_user(buffer, data, count))
41146+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
41147 return -EFAULT;
41148
41149 return count;
41150diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
41151index 25fc597..558bf3b3 100644
41152--- a/drivers/input/serio/serio.c
41153+++ b/drivers/input/serio/serio.c
41154@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
41155 */
41156 static void serio_init_port(struct serio *serio)
41157 {
41158- static atomic_t serio_no = ATOMIC_INIT(0);
41159+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
41160
41161 __module_get(THIS_MODULE);
41162
41163@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
41164 mutex_init(&serio->drv_mutex);
41165 device_initialize(&serio->dev);
41166 dev_set_name(&serio->dev, "serio%ld",
41167- (long)atomic_inc_return(&serio_no) - 1);
41168+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
41169 serio->dev.bus = &serio_bus;
41170 serio->dev.release = serio_release_port;
41171 serio->dev.groups = serio_device_attr_groups;
41172diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
41173index d8f98b1..f62a640 100644
41174--- a/drivers/iommu/iommu.c
41175+++ b/drivers/iommu/iommu.c
41176@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
41177 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
41178 {
41179 bus_register_notifier(bus, &iommu_bus_nb);
41180- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
41181+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
41182 }
41183
41184 /**
41185diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
41186index dcfea4e..f4226b2 100644
41187--- a/drivers/iommu/irq_remapping.c
41188+++ b/drivers/iommu/irq_remapping.c
41189@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
41190 void panic_if_irq_remap(const char *msg)
41191 {
41192 if (irq_remapping_enabled)
41193- panic(msg);
41194+ panic("%s", msg);
41195 }
41196
41197 static void ir_ack_apic_edge(struct irq_data *data)
41198@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
41199
41200 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
41201 {
41202- chip->irq_print_chip = ir_print_prefix;
41203- chip->irq_ack = ir_ack_apic_edge;
41204- chip->irq_eoi = ir_ack_apic_level;
41205- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41206+ pax_open_kernel();
41207+ *(void **)&chip->irq_print_chip = ir_print_prefix;
41208+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
41209+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
41210+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41211+ pax_close_kernel();
41212 }
41213
41214 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
41215diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
41216index 19ceaa6..3625818 100644
41217--- a/drivers/irqchip/irq-gic.c
41218+++ b/drivers/irqchip/irq-gic.c
41219@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
41220 * Supported arch specific GIC irq extension.
41221 * Default make them NULL.
41222 */
41223-struct irq_chip gic_arch_extn = {
41224+irq_chip_no_const gic_arch_extn = {
41225 .irq_eoi = NULL,
41226 .irq_mask = NULL,
41227 .irq_unmask = NULL,
41228@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
41229 chained_irq_exit(chip, desc);
41230 }
41231
41232-static struct irq_chip gic_chip = {
41233+static irq_chip_no_const gic_chip __read_only = {
41234 .name = "GIC",
41235 .irq_mask = gic_mask_irq,
41236 .irq_unmask = gic_unmask_irq,
41237diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
41238index ac6f72b..81150f2 100644
41239--- a/drivers/isdn/capi/capi.c
41240+++ b/drivers/isdn/capi/capi.c
41241@@ -81,8 +81,8 @@ struct capiminor {
41242
41243 struct capi20_appl *ap;
41244 u32 ncci;
41245- atomic_t datahandle;
41246- atomic_t msgid;
41247+ atomic_unchecked_t datahandle;
41248+ atomic_unchecked_t msgid;
41249
41250 struct tty_port port;
41251 int ttyinstop;
41252@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
41253 capimsg_setu16(s, 2, mp->ap->applid);
41254 capimsg_setu8 (s, 4, CAPI_DATA_B3);
41255 capimsg_setu8 (s, 5, CAPI_RESP);
41256- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
41257+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
41258 capimsg_setu32(s, 8, mp->ncci);
41259 capimsg_setu16(s, 12, datahandle);
41260 }
41261@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
41262 mp->outbytes -= len;
41263 spin_unlock_bh(&mp->outlock);
41264
41265- datahandle = atomic_inc_return(&mp->datahandle);
41266+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
41267 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
41268 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41269 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41270 capimsg_setu16(skb->data, 2, mp->ap->applid);
41271 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
41272 capimsg_setu8 (skb->data, 5, CAPI_REQ);
41273- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
41274+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
41275 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
41276 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
41277 capimsg_setu16(skb->data, 16, len); /* Data length */
41278diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
41279index 600c79b..3752bab 100644
41280--- a/drivers/isdn/gigaset/interface.c
41281+++ b/drivers/isdn/gigaset/interface.c
41282@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
41283 }
41284 tty->driver_data = cs;
41285
41286- ++cs->port.count;
41287+ atomic_inc(&cs->port.count);
41288
41289- if (cs->port.count == 1) {
41290+ if (atomic_read(&cs->port.count) == 1) {
41291 tty_port_tty_set(&cs->port, tty);
41292 cs->port.low_latency = 1;
41293 }
41294@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
41295
41296 if (!cs->connected)
41297 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
41298- else if (!cs->port.count)
41299+ else if (!atomic_read(&cs->port.count))
41300 dev_warn(cs->dev, "%s: device not opened\n", __func__);
41301- else if (!--cs->port.count)
41302+ else if (!atomic_dec_return(&cs->port.count))
41303 tty_port_tty_set(&cs->port, NULL);
41304
41305 mutex_unlock(&cs->mutex);
41306diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
41307index d0a41cb..f0cdb8c 100644
41308--- a/drivers/isdn/gigaset/usb-gigaset.c
41309+++ b/drivers/isdn/gigaset/usb-gigaset.c
41310@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
41311 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
41312 memcpy(cs->hw.usb->bchars, buf, 6);
41313 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
41314- 0, 0, &buf, 6, 2000);
41315+ 0, 0, buf, 6, 2000);
41316 }
41317
41318 static void gigaset_freebcshw(struct bc_state *bcs)
41319diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
41320index 4d9b195..455075c 100644
41321--- a/drivers/isdn/hardware/avm/b1.c
41322+++ b/drivers/isdn/hardware/avm/b1.c
41323@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
41324 }
41325 if (left) {
41326 if (t4file->user) {
41327- if (copy_from_user(buf, dp, left))
41328+ if (left > sizeof buf || copy_from_user(buf, dp, left))
41329 return -EFAULT;
41330 } else {
41331 memcpy(buf, dp, left);
41332@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
41333 }
41334 if (left) {
41335 if (config->user) {
41336- if (copy_from_user(buf, dp, left))
41337+ if (left > sizeof buf || copy_from_user(buf, dp, left))
41338 return -EFAULT;
41339 } else {
41340 memcpy(buf, dp, left);
41341diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
41342index 9bb12ba..d4262f7 100644
41343--- a/drivers/isdn/i4l/isdn_common.c
41344+++ b/drivers/isdn/i4l/isdn_common.c
41345@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
41346 } else
41347 return -EINVAL;
41348 case IIOCDBGVAR:
41349+ if (!capable(CAP_SYS_RAWIO))
41350+ return -EPERM;
41351 if (arg) {
41352 if (copy_to_user(argp, &dev, sizeof(ulong)))
41353 return -EFAULT;
41354diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
41355index 3c5f249..5fac4d0 100644
41356--- a/drivers/isdn/i4l/isdn_tty.c
41357+++ b/drivers/isdn/i4l/isdn_tty.c
41358@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
41359
41360 #ifdef ISDN_DEBUG_MODEM_OPEN
41361 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
41362- port->count);
41363+ atomic_read(&port->count));
41364 #endif
41365- port->count++;
41366+ atomic_inc(&port->count);
41367 port->tty = tty;
41368 /*
41369 * Start up serial port
41370@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41371 #endif
41372 return;
41373 }
41374- if ((tty->count == 1) && (port->count != 1)) {
41375+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
41376 /*
41377 * Uh, oh. tty->count is 1, which means that the tty
41378 * structure will be freed. Info->count should always
41379@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41380 * serial port won't be shutdown.
41381 */
41382 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
41383- "info->count is %d\n", port->count);
41384- port->count = 1;
41385+ "info->count is %d\n", atomic_read(&port->count));
41386+ atomic_set(&port->count, 1);
41387 }
41388- if (--port->count < 0) {
41389+ if (atomic_dec_return(&port->count) < 0) {
41390 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
41391- info->line, port->count);
41392- port->count = 0;
41393+ info->line, atomic_read(&port->count));
41394+ atomic_set(&port->count, 0);
41395 }
41396- if (port->count) {
41397+ if (atomic_read(&port->count)) {
41398 #ifdef ISDN_DEBUG_MODEM_OPEN
41399 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
41400 #endif
41401@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
41402 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
41403 return;
41404 isdn_tty_shutdown(info);
41405- port->count = 0;
41406+ atomic_set(&port->count, 0);
41407 port->flags &= ~ASYNC_NORMAL_ACTIVE;
41408 port->tty = NULL;
41409 wake_up_interruptible(&port->open_wait);
41410@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
41411 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
41412 modem_info *info = &dev->mdm.info[i];
41413
41414- if (info->port.count == 0)
41415+ if (atomic_read(&info->port.count) == 0)
41416 continue;
41417 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
41418 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
41419diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
41420index e74df7c..03a03ba 100644
41421--- a/drivers/isdn/icn/icn.c
41422+++ b/drivers/isdn/icn/icn.c
41423@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
41424 if (count > len)
41425 count = len;
41426 if (user) {
41427- if (copy_from_user(msg, buf, count))
41428+ if (count > sizeof msg || copy_from_user(msg, buf, count))
41429 return -EFAULT;
41430 } else
41431 memcpy(msg, buf, count);
41432diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
41433index 6a8405d..0bd1c7e 100644
41434--- a/drivers/leds/leds-clevo-mail.c
41435+++ b/drivers/leds/leds-clevo-mail.c
41436@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
41437 * detected as working, but in reality it is not) as low as
41438 * possible.
41439 */
41440-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
41441+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
41442 {
41443 .callback = clevo_mail_led_dmi_callback,
41444 .ident = "Clevo D410J",
41445diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
41446index 64e204e..c6bf189 100644
41447--- a/drivers/leds/leds-ss4200.c
41448+++ b/drivers/leds/leds-ss4200.c
41449@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
41450 * detected as working, but in reality it is not) as low as
41451 * possible.
41452 */
41453-static struct dmi_system_id __initdata nas_led_whitelist[] = {
41454+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
41455 {
41456 .callback = ss4200_led_dmi_callback,
41457 .ident = "Intel SS4200-E",
41458diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
41459index 0bf1e4e..b4bf44e 100644
41460--- a/drivers/lguest/core.c
41461+++ b/drivers/lguest/core.c
41462@@ -97,9 +97,17 @@ static __init int map_switcher(void)
41463 * The end address needs +1 because __get_vm_area allocates an
41464 * extra guard page, so we need space for that.
41465 */
41466+
41467+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
41468+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41469+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
41470+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41471+#else
41472 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41473 VM_ALLOC, switcher_addr, switcher_addr
41474 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41475+#endif
41476+
41477 if (!switcher_vma) {
41478 err = -ENOMEM;
41479 printk("lguest: could not map switcher pages high\n");
41480@@ -124,7 +132,7 @@ static __init int map_switcher(void)
41481 * Now the Switcher is mapped at the right address, we can't fail!
41482 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
41483 */
41484- memcpy(switcher_vma->addr, start_switcher_text,
41485+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
41486 end_switcher_text - start_switcher_text);
41487
41488 printk(KERN_INFO "lguest: mapped switcher at %p\n",
41489diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
41490index 5b9ac32..2ef4f26 100644
41491--- a/drivers/lguest/page_tables.c
41492+++ b/drivers/lguest/page_tables.c
41493@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
41494 /*:*/
41495
41496 #ifdef CONFIG_X86_PAE
41497-static void release_pmd(pmd_t *spmd)
41498+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
41499 {
41500 /* If the entry's not present, there's nothing to release. */
41501 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
41502diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
41503index f0a3347..f6608b2 100644
41504--- a/drivers/lguest/x86/core.c
41505+++ b/drivers/lguest/x86/core.c
41506@@ -59,7 +59,7 @@ static struct {
41507 /* Offset from where switcher.S was compiled to where we've copied it */
41508 static unsigned long switcher_offset(void)
41509 {
41510- return switcher_addr - (unsigned long)start_switcher_text;
41511+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
41512 }
41513
41514 /* This cpu's struct lguest_pages (after the Switcher text page) */
41515@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
41516 * These copies are pretty cheap, so we do them unconditionally: */
41517 /* Save the current Host top-level page directory.
41518 */
41519+
41520+#ifdef CONFIG_PAX_PER_CPU_PGD
41521+ pages->state.host_cr3 = read_cr3();
41522+#else
41523 pages->state.host_cr3 = __pa(current->mm->pgd);
41524+#endif
41525+
41526 /*
41527 * Set up the Guest's page tables to see this CPU's pages (and no
41528 * other CPU's pages).
41529@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
41530 * compiled-in switcher code and the high-mapped copy we just made.
41531 */
41532 for (i = 0; i < IDT_ENTRIES; i++)
41533- default_idt_entries[i] += switcher_offset();
41534+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
41535
41536 /*
41537 * Set up the Switcher's per-cpu areas.
41538@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
41539 * it will be undisturbed when we switch. To change %cs and jump we
41540 * need this structure to feed to Intel's "lcall" instruction.
41541 */
41542- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
41543+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
41544 lguest_entry.segment = LGUEST_CS;
41545
41546 /*
41547diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
41548index 40634b0..4f5855e 100644
41549--- a/drivers/lguest/x86/switcher_32.S
41550+++ b/drivers/lguest/x86/switcher_32.S
41551@@ -87,6 +87,7 @@
41552 #include <asm/page.h>
41553 #include <asm/segment.h>
41554 #include <asm/lguest.h>
41555+#include <asm/processor-flags.h>
41556
41557 // We mark the start of the code to copy
41558 // It's placed in .text tho it's never run here
41559@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
41560 // Changes type when we load it: damn Intel!
41561 // For after we switch over our page tables
41562 // That entry will be read-only: we'd crash.
41563+
41564+#ifdef CONFIG_PAX_KERNEXEC
41565+ mov %cr0, %edx
41566+ xor $X86_CR0_WP, %edx
41567+ mov %edx, %cr0
41568+#endif
41569+
41570 movl $(GDT_ENTRY_TSS*8), %edx
41571 ltr %dx
41572
41573@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
41574 // Let's clear it again for our return.
41575 // The GDT descriptor of the Host
41576 // Points to the table after two "size" bytes
41577- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
41578+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
41579 // Clear "used" from type field (byte 5, bit 2)
41580- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
41581+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
41582+
41583+#ifdef CONFIG_PAX_KERNEXEC
41584+ mov %cr0, %eax
41585+ xor $X86_CR0_WP, %eax
41586+ mov %eax, %cr0
41587+#endif
41588
41589 // Once our page table's switched, the Guest is live!
41590 // The Host fades as we run this final step.
41591@@ -295,13 +309,12 @@ deliver_to_host:
41592 // I consulted gcc, and it gave
41593 // These instructions, which I gladly credit:
41594 leal (%edx,%ebx,8), %eax
41595- movzwl (%eax),%edx
41596- movl 4(%eax), %eax
41597- xorw %ax, %ax
41598- orl %eax, %edx
41599+ movl 4(%eax), %edx
41600+ movw (%eax), %dx
41601 // Now the address of the handler's in %edx
41602 // We call it now: its "iret" drops us home.
41603- jmp *%edx
41604+ ljmp $__KERNEL_CS, $1f
41605+1: jmp *%edx
41606
41607 // Every interrupt can come to us here
41608 // But we must truly tell each apart.
41609diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
41610index 0003992..854bbce 100644
41611--- a/drivers/md/bcache/closure.h
41612+++ b/drivers/md/bcache/closure.h
41613@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
41614 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
41615 struct workqueue_struct *wq)
41616 {
41617- BUG_ON(object_is_on_stack(cl));
41618+ BUG_ON(object_starts_on_stack(cl));
41619 closure_set_ip(cl);
41620 cl->fn = fn;
41621 cl->wq = wq;
41622diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
41623index 5a2c754..0fa55db 100644
41624--- a/drivers/md/bitmap.c
41625+++ b/drivers/md/bitmap.c
41626@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
41627 chunk_kb ? "KB" : "B");
41628 if (bitmap->storage.file) {
41629 seq_printf(seq, ", file: ");
41630- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
41631+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
41632 }
41633
41634 seq_printf(seq, "\n");
41635diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
41636index 81a79b7..87a0f73 100644
41637--- a/drivers/md/dm-ioctl.c
41638+++ b/drivers/md/dm-ioctl.c
41639@@ -1697,7 +1697,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
41640 cmd == DM_LIST_VERSIONS_CMD)
41641 return 0;
41642
41643- if ((cmd == DM_DEV_CREATE_CMD)) {
41644+ if (cmd == DM_DEV_CREATE_CMD) {
41645 if (!*param->name) {
41646 DMWARN("name not supplied when creating device");
41647 return -EINVAL;
41648diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
41649index 699b5be..eac0a15 100644
41650--- a/drivers/md/dm-raid1.c
41651+++ b/drivers/md/dm-raid1.c
41652@@ -40,7 +40,7 @@ enum dm_raid1_error {
41653
41654 struct mirror {
41655 struct mirror_set *ms;
41656- atomic_t error_count;
41657+ atomic_unchecked_t error_count;
41658 unsigned long error_type;
41659 struct dm_dev *dev;
41660 sector_t offset;
41661@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
41662 struct mirror *m;
41663
41664 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
41665- if (!atomic_read(&m->error_count))
41666+ if (!atomic_read_unchecked(&m->error_count))
41667 return m;
41668
41669 return NULL;
41670@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
41671 * simple way to tell if a device has encountered
41672 * errors.
41673 */
41674- atomic_inc(&m->error_count);
41675+ atomic_inc_unchecked(&m->error_count);
41676
41677 if (test_and_set_bit(error_type, &m->error_type))
41678 return;
41679@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
41680 struct mirror *m = get_default_mirror(ms);
41681
41682 do {
41683- if (likely(!atomic_read(&m->error_count)))
41684+ if (likely(!atomic_read_unchecked(&m->error_count)))
41685 return m;
41686
41687 if (m-- == ms->mirror)
41688@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
41689 {
41690 struct mirror *default_mirror = get_default_mirror(m->ms);
41691
41692- return !atomic_read(&default_mirror->error_count);
41693+ return !atomic_read_unchecked(&default_mirror->error_count);
41694 }
41695
41696 static int mirror_available(struct mirror_set *ms, struct bio *bio)
41697@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
41698 */
41699 if (likely(region_in_sync(ms, region, 1)))
41700 m = choose_mirror(ms, bio->bi_sector);
41701- else if (m && atomic_read(&m->error_count))
41702+ else if (m && atomic_read_unchecked(&m->error_count))
41703 m = NULL;
41704
41705 if (likely(m))
41706@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
41707 }
41708
41709 ms->mirror[mirror].ms = ms;
41710- atomic_set(&(ms->mirror[mirror].error_count), 0);
41711+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
41712 ms->mirror[mirror].error_type = 0;
41713 ms->mirror[mirror].offset = offset;
41714
41715@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
41716 */
41717 static char device_status_char(struct mirror *m)
41718 {
41719- if (!atomic_read(&(m->error_count)))
41720+ if (!atomic_read_unchecked(&(m->error_count)))
41721 return 'A';
41722
41723 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
41724diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
41725index d907ca6..cfb8384 100644
41726--- a/drivers/md/dm-stripe.c
41727+++ b/drivers/md/dm-stripe.c
41728@@ -20,7 +20,7 @@ struct stripe {
41729 struct dm_dev *dev;
41730 sector_t physical_start;
41731
41732- atomic_t error_count;
41733+ atomic_unchecked_t error_count;
41734 };
41735
41736 struct stripe_c {
41737@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
41738 kfree(sc);
41739 return r;
41740 }
41741- atomic_set(&(sc->stripe[i].error_count), 0);
41742+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
41743 }
41744
41745 ti->private = sc;
41746@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
41747 DMEMIT("%d ", sc->stripes);
41748 for (i = 0; i < sc->stripes; i++) {
41749 DMEMIT("%s ", sc->stripe[i].dev->name);
41750- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
41751+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
41752 'D' : 'A';
41753 }
41754 buffer[i] = '\0';
41755@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
41756 */
41757 for (i = 0; i < sc->stripes; i++)
41758 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
41759- atomic_inc(&(sc->stripe[i].error_count));
41760- if (atomic_read(&(sc->stripe[i].error_count)) <
41761+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
41762+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
41763 DM_IO_ERROR_THRESHOLD)
41764 schedule_work(&sc->trigger_event);
41765 }
41766diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
41767index 1ff252a..ee384c1 100644
41768--- a/drivers/md/dm-table.c
41769+++ b/drivers/md/dm-table.c
41770@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
41771 if (!dev_size)
41772 return 0;
41773
41774- if ((start >= dev_size) || (start + len > dev_size)) {
41775+ if ((start >= dev_size) || (len > dev_size - start)) {
41776 DMWARN("%s: %s too small for target: "
41777 "start=%llu, len=%llu, dev_size=%llu",
41778 dm_device_name(ti->table->md), bdevname(bdev, b),
41779diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
41780index 60bce43..9b997d0 100644
41781--- a/drivers/md/dm-thin-metadata.c
41782+++ b/drivers/md/dm-thin-metadata.c
41783@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
41784 {
41785 pmd->info.tm = pmd->tm;
41786 pmd->info.levels = 2;
41787- pmd->info.value_type.context = pmd->data_sm;
41788+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
41789 pmd->info.value_type.size = sizeof(__le64);
41790 pmd->info.value_type.inc = data_block_inc;
41791 pmd->info.value_type.dec = data_block_dec;
41792@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
41793
41794 pmd->bl_info.tm = pmd->tm;
41795 pmd->bl_info.levels = 1;
41796- pmd->bl_info.value_type.context = pmd->data_sm;
41797+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
41798 pmd->bl_info.value_type.size = sizeof(__le64);
41799 pmd->bl_info.value_type.inc = data_block_inc;
41800 pmd->bl_info.value_type.dec = data_block_dec;
41801diff --git a/drivers/md/dm.c b/drivers/md/dm.c
41802index 33f2010..23fb84c 100644
41803--- a/drivers/md/dm.c
41804+++ b/drivers/md/dm.c
41805@@ -169,9 +169,9 @@ struct mapped_device {
41806 /*
41807 * Event handling.
41808 */
41809- atomic_t event_nr;
41810+ atomic_unchecked_t event_nr;
41811 wait_queue_head_t eventq;
41812- atomic_t uevent_seq;
41813+ atomic_unchecked_t uevent_seq;
41814 struct list_head uevent_list;
41815 spinlock_t uevent_lock; /* Protect access to uevent_list */
41816
41817@@ -1884,8 +1884,8 @@ static struct mapped_device *alloc_dev(int minor)
41818 rwlock_init(&md->map_lock);
41819 atomic_set(&md->holders, 1);
41820 atomic_set(&md->open_count, 0);
41821- atomic_set(&md->event_nr, 0);
41822- atomic_set(&md->uevent_seq, 0);
41823+ atomic_set_unchecked(&md->event_nr, 0);
41824+ atomic_set_unchecked(&md->uevent_seq, 0);
41825 INIT_LIST_HEAD(&md->uevent_list);
41826 spin_lock_init(&md->uevent_lock);
41827
41828@@ -2033,7 +2033,7 @@ static void event_callback(void *context)
41829
41830 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
41831
41832- atomic_inc(&md->event_nr);
41833+ atomic_inc_unchecked(&md->event_nr);
41834 wake_up(&md->eventq);
41835 }
41836
41837@@ -2690,18 +2690,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
41838
41839 uint32_t dm_next_uevent_seq(struct mapped_device *md)
41840 {
41841- return atomic_add_return(1, &md->uevent_seq);
41842+ return atomic_add_return_unchecked(1, &md->uevent_seq);
41843 }
41844
41845 uint32_t dm_get_event_nr(struct mapped_device *md)
41846 {
41847- return atomic_read(&md->event_nr);
41848+ return atomic_read_unchecked(&md->event_nr);
41849 }
41850
41851 int dm_wait_event(struct mapped_device *md, int event_nr)
41852 {
41853 return wait_event_interruptible(md->eventq,
41854- (event_nr != atomic_read(&md->event_nr)));
41855+ (event_nr != atomic_read_unchecked(&md->event_nr)));
41856 }
41857
41858 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
41859diff --git a/drivers/md/md.c b/drivers/md/md.c
41860index 51f0345..c77810e 100644
41861--- a/drivers/md/md.c
41862+++ b/drivers/md/md.c
41863@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
41864 * start build, activate spare
41865 */
41866 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
41867-static atomic_t md_event_count;
41868+static atomic_unchecked_t md_event_count;
41869 void md_new_event(struct mddev *mddev)
41870 {
41871- atomic_inc(&md_event_count);
41872+ atomic_inc_unchecked(&md_event_count);
41873 wake_up(&md_event_waiters);
41874 }
41875 EXPORT_SYMBOL_GPL(md_new_event);
41876@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
41877 */
41878 static void md_new_event_inintr(struct mddev *mddev)
41879 {
41880- atomic_inc(&md_event_count);
41881+ atomic_inc_unchecked(&md_event_count);
41882 wake_up(&md_event_waiters);
41883 }
41884
41885@@ -1501,7 +1501,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
41886 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
41887 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
41888 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
41889- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
41890+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
41891
41892 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
41893 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
41894@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
41895 else
41896 sb->resync_offset = cpu_to_le64(0);
41897
41898- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
41899+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
41900
41901 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
41902 sb->size = cpu_to_le64(mddev->dev_sectors);
41903@@ -2750,7 +2750,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
41904 static ssize_t
41905 errors_show(struct md_rdev *rdev, char *page)
41906 {
41907- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
41908+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
41909 }
41910
41911 static ssize_t
41912@@ -2759,7 +2759,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
41913 char *e;
41914 unsigned long n = simple_strtoul(buf, &e, 10);
41915 if (*buf && (*e == 0 || *e == '\n')) {
41916- atomic_set(&rdev->corrected_errors, n);
41917+ atomic_set_unchecked(&rdev->corrected_errors, n);
41918 return len;
41919 }
41920 return -EINVAL;
41921@@ -3207,8 +3207,8 @@ int md_rdev_init(struct md_rdev *rdev)
41922 rdev->sb_loaded = 0;
41923 rdev->bb_page = NULL;
41924 atomic_set(&rdev->nr_pending, 0);
41925- atomic_set(&rdev->read_errors, 0);
41926- atomic_set(&rdev->corrected_errors, 0);
41927+ atomic_set_unchecked(&rdev->read_errors, 0);
41928+ atomic_set_unchecked(&rdev->corrected_errors, 0);
41929
41930 INIT_LIST_HEAD(&rdev->same_set);
41931 init_waitqueue_head(&rdev->blocked_wait);
41932@@ -7009,7 +7009,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
41933
41934 spin_unlock(&pers_lock);
41935 seq_printf(seq, "\n");
41936- seq->poll_event = atomic_read(&md_event_count);
41937+ seq->poll_event = atomic_read_unchecked(&md_event_count);
41938 return 0;
41939 }
41940 if (v == (void*)2) {
41941@@ -7112,7 +7112,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
41942 return error;
41943
41944 seq = file->private_data;
41945- seq->poll_event = atomic_read(&md_event_count);
41946+ seq->poll_event = atomic_read_unchecked(&md_event_count);
41947 return error;
41948 }
41949
41950@@ -7126,7 +7126,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
41951 /* always allow read */
41952 mask = POLLIN | POLLRDNORM;
41953
41954- if (seq->poll_event != atomic_read(&md_event_count))
41955+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
41956 mask |= POLLERR | POLLPRI;
41957 return mask;
41958 }
41959@@ -7170,7 +7170,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
41960 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
41961 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
41962 (int)part_stat_read(&disk->part0, sectors[1]) -
41963- atomic_read(&disk->sync_io);
41964+ atomic_read_unchecked(&disk->sync_io);
41965 /* sync IO will cause sync_io to increase before the disk_stats
41966 * as sync_io is counted when a request starts, and
41967 * disk_stats is counted when it completes.
41968diff --git a/drivers/md/md.h b/drivers/md/md.h
41969index 653f992b6..6af6c40 100644
41970--- a/drivers/md/md.h
41971+++ b/drivers/md/md.h
41972@@ -94,13 +94,13 @@ struct md_rdev {
41973 * only maintained for arrays that
41974 * support hot removal
41975 */
41976- atomic_t read_errors; /* number of consecutive read errors that
41977+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
41978 * we have tried to ignore.
41979 */
41980 struct timespec last_read_error; /* monotonic time since our
41981 * last read error
41982 */
41983- atomic_t corrected_errors; /* number of corrected read errors,
41984+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
41985 * for reporting to userspace and storing
41986 * in superblock.
41987 */
41988@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
41989
41990 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
41991 {
41992- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
41993+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
41994 }
41995
41996 struct md_personality
41997diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
41998index 3e6d115..ffecdeb 100644
41999--- a/drivers/md/persistent-data/dm-space-map.h
42000+++ b/drivers/md/persistent-data/dm-space-map.h
42001@@ -71,6 +71,7 @@ struct dm_space_map {
42002 dm_sm_threshold_fn fn,
42003 void *context);
42004 };
42005+typedef struct dm_space_map __no_const dm_space_map_no_const;
42006
42007 /*----------------------------------------------------------------*/
42008
42009diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
42010index 6f48244..7d29145 100644
42011--- a/drivers/md/raid1.c
42012+++ b/drivers/md/raid1.c
42013@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
42014 if (r1_sync_page_io(rdev, sect, s,
42015 bio->bi_io_vec[idx].bv_page,
42016 READ) != 0)
42017- atomic_add(s, &rdev->corrected_errors);
42018+ atomic_add_unchecked(s, &rdev->corrected_errors);
42019 }
42020 sectors -= s;
42021 sect += s;
42022@@ -2049,7 +2049,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
42023 test_bit(In_sync, &rdev->flags)) {
42024 if (r1_sync_page_io(rdev, sect, s,
42025 conf->tmppage, READ)) {
42026- atomic_add(s, &rdev->corrected_errors);
42027+ atomic_add_unchecked(s, &rdev->corrected_errors);
42028 printk(KERN_INFO
42029 "md/raid1:%s: read error corrected "
42030 "(%d sectors at %llu on %s)\n",
42031diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
42032index 081bb33..3c4b287 100644
42033--- a/drivers/md/raid10.c
42034+++ b/drivers/md/raid10.c
42035@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
42036 /* The write handler will notice the lack of
42037 * R10BIO_Uptodate and record any errors etc
42038 */
42039- atomic_add(r10_bio->sectors,
42040+ atomic_add_unchecked(r10_bio->sectors,
42041 &conf->mirrors[d].rdev->corrected_errors);
42042
42043 /* for reconstruct, we always reschedule after a read.
42044@@ -2298,7 +2298,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42045 {
42046 struct timespec cur_time_mon;
42047 unsigned long hours_since_last;
42048- unsigned int read_errors = atomic_read(&rdev->read_errors);
42049+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
42050
42051 ktime_get_ts(&cur_time_mon);
42052
42053@@ -2320,9 +2320,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42054 * overflowing the shift of read_errors by hours_since_last.
42055 */
42056 if (hours_since_last >= 8 * sizeof(read_errors))
42057- atomic_set(&rdev->read_errors, 0);
42058+ atomic_set_unchecked(&rdev->read_errors, 0);
42059 else
42060- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
42061+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
42062 }
42063
42064 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
42065@@ -2376,8 +2376,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42066 return;
42067
42068 check_decay_read_errors(mddev, rdev);
42069- atomic_inc(&rdev->read_errors);
42070- if (atomic_read(&rdev->read_errors) > max_read_errors) {
42071+ atomic_inc_unchecked(&rdev->read_errors);
42072+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
42073 char b[BDEVNAME_SIZE];
42074 bdevname(rdev->bdev, b);
42075
42076@@ -2385,7 +2385,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42077 "md/raid10:%s: %s: Raid device exceeded "
42078 "read_error threshold [cur %d:max %d]\n",
42079 mdname(mddev), b,
42080- atomic_read(&rdev->read_errors), max_read_errors);
42081+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
42082 printk(KERN_NOTICE
42083 "md/raid10:%s: %s: Failing raid device\n",
42084 mdname(mddev), b);
42085@@ -2540,7 +2540,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42086 sect +
42087 choose_data_offset(r10_bio, rdev)),
42088 bdevname(rdev->bdev, b));
42089- atomic_add(s, &rdev->corrected_errors);
42090+ atomic_add_unchecked(s, &rdev->corrected_errors);
42091 }
42092
42093 rdev_dec_pending(rdev, mddev);
42094diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
42095index a35b846..e295c6d 100644
42096--- a/drivers/md/raid5.c
42097+++ b/drivers/md/raid5.c
42098@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
42099 mdname(conf->mddev), STRIPE_SECTORS,
42100 (unsigned long long)s,
42101 bdevname(rdev->bdev, b));
42102- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
42103+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
42104 clear_bit(R5_ReadError, &sh->dev[i].flags);
42105 clear_bit(R5_ReWrite, &sh->dev[i].flags);
42106 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
42107 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
42108
42109- if (atomic_read(&rdev->read_errors))
42110- atomic_set(&rdev->read_errors, 0);
42111+ if (atomic_read_unchecked(&rdev->read_errors))
42112+ atomic_set_unchecked(&rdev->read_errors, 0);
42113 } else {
42114 const char *bdn = bdevname(rdev->bdev, b);
42115 int retry = 0;
42116 int set_bad = 0;
42117
42118 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
42119- atomic_inc(&rdev->read_errors);
42120+ atomic_inc_unchecked(&rdev->read_errors);
42121 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
42122 printk_ratelimited(
42123 KERN_WARNING
42124@@ -1806,7 +1806,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
42125 mdname(conf->mddev),
42126 (unsigned long long)s,
42127 bdn);
42128- } else if (atomic_read(&rdev->read_errors)
42129+ } else if (atomic_read_unchecked(&rdev->read_errors)
42130 > conf->max_nr_stripes)
42131 printk(KERN_WARNING
42132 "md/raid:%s: Too many read errors, failing device %s.\n",
42133diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
42134index 401ef64..836e563 100644
42135--- a/drivers/media/dvb-core/dvbdev.c
42136+++ b/drivers/media/dvb-core/dvbdev.c
42137@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
42138 const struct dvb_device *template, void *priv, int type)
42139 {
42140 struct dvb_device *dvbdev;
42141- struct file_operations *dvbdevfops;
42142+ file_operations_no_const *dvbdevfops;
42143 struct device *clsdev;
42144 int minor;
42145 int id;
42146diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
42147index 9b6c3bb..baeb5c7 100644
42148--- a/drivers/media/dvb-frontends/dib3000.h
42149+++ b/drivers/media/dvb-frontends/dib3000.h
42150@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
42151 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
42152 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
42153 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
42154-};
42155+} __no_const;
42156
42157 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
42158 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
42159diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
42160index c7a9be1..683f6f8 100644
42161--- a/drivers/media/pci/cx88/cx88-video.c
42162+++ b/drivers/media/pci/cx88/cx88-video.c
42163@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
42164
42165 /* ------------------------------------------------------------------ */
42166
42167-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42168-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42169-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42170+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42171+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42172+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42173
42174 module_param_array(video_nr, int, NULL, 0444);
42175 module_param_array(vbi_nr, int, NULL, 0444);
42176diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
42177index d338b19..aae4f9e 100644
42178--- a/drivers/media/platform/omap/omap_vout.c
42179+++ b/drivers/media/platform/omap/omap_vout.c
42180@@ -63,7 +63,6 @@ enum omap_vout_channels {
42181 OMAP_VIDEO2,
42182 };
42183
42184-static struct videobuf_queue_ops video_vbq_ops;
42185 /* Variables configurable through module params*/
42186 static u32 video1_numbuffers = 3;
42187 static u32 video2_numbuffers = 3;
42188@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
42189 {
42190 struct videobuf_queue *q;
42191 struct omap_vout_device *vout = NULL;
42192+ static struct videobuf_queue_ops video_vbq_ops = {
42193+ .buf_setup = omap_vout_buffer_setup,
42194+ .buf_prepare = omap_vout_buffer_prepare,
42195+ .buf_release = omap_vout_buffer_release,
42196+ .buf_queue = omap_vout_buffer_queue,
42197+ };
42198
42199 vout = video_drvdata(file);
42200 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
42201@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
42202 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
42203
42204 q = &vout->vbq;
42205- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
42206- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
42207- video_vbq_ops.buf_release = omap_vout_buffer_release;
42208- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
42209 spin_lock_init(&vout->vbq_lock);
42210
42211 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
42212diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
42213index 04e6490..2df65bf 100644
42214--- a/drivers/media/platform/s5p-tv/mixer.h
42215+++ b/drivers/media/platform/s5p-tv/mixer.h
42216@@ -156,7 +156,7 @@ struct mxr_layer {
42217 /** layer index (unique identifier) */
42218 int idx;
42219 /** callbacks for layer methods */
42220- struct mxr_layer_ops ops;
42221+ struct mxr_layer_ops *ops;
42222 /** format array */
42223 const struct mxr_format **fmt_array;
42224 /** size of format array */
42225diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42226index b93a21f..2535195 100644
42227--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42228+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42229@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
42230 {
42231 struct mxr_layer *layer;
42232 int ret;
42233- struct mxr_layer_ops ops = {
42234+ static struct mxr_layer_ops ops = {
42235 .release = mxr_graph_layer_release,
42236 .buffer_set = mxr_graph_buffer_set,
42237 .stream_set = mxr_graph_stream_set,
42238diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
42239index b713403..53cb5ad 100644
42240--- a/drivers/media/platform/s5p-tv/mixer_reg.c
42241+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
42242@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
42243 layer->update_buf = next;
42244 }
42245
42246- layer->ops.buffer_set(layer, layer->update_buf);
42247+ layer->ops->buffer_set(layer, layer->update_buf);
42248
42249 if (done && done != layer->shadow_buf)
42250 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
42251diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
42252index ef0efdf..8c78eb6 100644
42253--- a/drivers/media/platform/s5p-tv/mixer_video.c
42254+++ b/drivers/media/platform/s5p-tv/mixer_video.c
42255@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
42256 layer->geo.src.height = layer->geo.src.full_height;
42257
42258 mxr_geometry_dump(mdev, &layer->geo);
42259- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42260+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42261 mxr_geometry_dump(mdev, &layer->geo);
42262 }
42263
42264@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
42265 layer->geo.dst.full_width = mbus_fmt.width;
42266 layer->geo.dst.full_height = mbus_fmt.height;
42267 layer->geo.dst.field = mbus_fmt.field;
42268- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42269+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42270
42271 mxr_geometry_dump(mdev, &layer->geo);
42272 }
42273@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
42274 /* set source size to highest accepted value */
42275 geo->src.full_width = max(geo->dst.full_width, pix->width);
42276 geo->src.full_height = max(geo->dst.full_height, pix->height);
42277- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42278+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42279 mxr_geometry_dump(mdev, &layer->geo);
42280 /* set cropping to total visible screen */
42281 geo->src.width = pix->width;
42282@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
42283 geo->src.x_offset = 0;
42284 geo->src.y_offset = 0;
42285 /* assure consistency of geometry */
42286- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42287+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42288 mxr_geometry_dump(mdev, &layer->geo);
42289 /* set full size to lowest possible value */
42290 geo->src.full_width = 0;
42291 geo->src.full_height = 0;
42292- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42293+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42294 mxr_geometry_dump(mdev, &layer->geo);
42295
42296 /* returning results */
42297@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
42298 target->width = s->r.width;
42299 target->height = s->r.height;
42300
42301- layer->ops.fix_geometry(layer, stage, s->flags);
42302+ layer->ops->fix_geometry(layer, stage, s->flags);
42303
42304 /* retrieve update selection rectangle */
42305 res.left = target->x_offset;
42306@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
42307 mxr_output_get(mdev);
42308
42309 mxr_layer_update_output(layer);
42310- layer->ops.format_set(layer);
42311+ layer->ops->format_set(layer);
42312 /* enabling layer in hardware */
42313 spin_lock_irqsave(&layer->enq_slock, flags);
42314 layer->state = MXR_LAYER_STREAMING;
42315 spin_unlock_irqrestore(&layer->enq_slock, flags);
42316
42317- layer->ops.stream_set(layer, MXR_ENABLE);
42318+ layer->ops->stream_set(layer, MXR_ENABLE);
42319 mxr_streamer_get(mdev);
42320
42321 return 0;
42322@@ -1030,7 +1030,7 @@ static int stop_streaming(struct vb2_queue *vq)
42323 spin_unlock_irqrestore(&layer->enq_slock, flags);
42324
42325 /* disabling layer in hardware */
42326- layer->ops.stream_set(layer, MXR_DISABLE);
42327+ layer->ops->stream_set(layer, MXR_DISABLE);
42328 /* remove one streamer */
42329 mxr_streamer_put(mdev);
42330 /* allow changes in output configuration */
42331@@ -1069,8 +1069,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
42332
42333 void mxr_layer_release(struct mxr_layer *layer)
42334 {
42335- if (layer->ops.release)
42336- layer->ops.release(layer);
42337+ if (layer->ops->release)
42338+ layer->ops->release(layer);
42339 }
42340
42341 void mxr_base_layer_release(struct mxr_layer *layer)
42342@@ -1096,7 +1096,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
42343
42344 layer->mdev = mdev;
42345 layer->idx = idx;
42346- layer->ops = *ops;
42347+ layer->ops = ops;
42348
42349 spin_lock_init(&layer->enq_slock);
42350 INIT_LIST_HEAD(&layer->enq_list);
42351diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42352index 3d13a63..da31bf1 100644
42353--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42354+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42355@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
42356 {
42357 struct mxr_layer *layer;
42358 int ret;
42359- struct mxr_layer_ops ops = {
42360+ static struct mxr_layer_ops ops = {
42361 .release = mxr_vp_layer_release,
42362 .buffer_set = mxr_vp_buffer_set,
42363 .stream_set = mxr_vp_stream_set,
42364diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
42365index 545c04c..a14bded 100644
42366--- a/drivers/media/radio/radio-cadet.c
42367+++ b/drivers/media/radio/radio-cadet.c
42368@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42369 unsigned char readbuf[RDS_BUFFER];
42370 int i = 0;
42371
42372+ if (count > RDS_BUFFER)
42373+ return -EFAULT;
42374 mutex_lock(&dev->lock);
42375 if (dev->rdsstat == 0)
42376 cadet_start_rds(dev);
42377@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42378 while (i < count && dev->rdsin != dev->rdsout)
42379 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
42380
42381- if (i && copy_to_user(data, readbuf, i))
42382+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
42383 i = -EFAULT;
42384 unlock:
42385 mutex_unlock(&dev->lock);
42386diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
42387index 3940bb0..fb3952a 100644
42388--- a/drivers/media/usb/dvb-usb/cxusb.c
42389+++ b/drivers/media/usb/dvb-usb/cxusb.c
42390@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
42391
42392 struct dib0700_adapter_state {
42393 int (*set_param_save) (struct dvb_frontend *);
42394-};
42395+} __no_const;
42396
42397 static int dib7070_set_param_override(struct dvb_frontend *fe)
42398 {
42399diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
42400index 6e237b6..dc25556 100644
42401--- a/drivers/media/usb/dvb-usb/dw2102.c
42402+++ b/drivers/media/usb/dvb-usb/dw2102.c
42403@@ -118,7 +118,7 @@ struct su3000_state {
42404
42405 struct s6x0_state {
42406 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
42407-};
42408+} __no_const;
42409
42410 /* debug */
42411 static int dvb_usb_dw2102_debug;
42412diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42413index f129551..ecf6514 100644
42414--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42415+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
42416@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
42417 __u32 reserved;
42418 };
42419
42420-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42421+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
42422 enum v4l2_memory memory)
42423 {
42424 void __user *up_pln;
42425@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42426 return 0;
42427 }
42428
42429-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
42430+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
42431 enum v4l2_memory memory)
42432 {
42433 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
42434@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
42435 put_user(kp->start_block, &up->start_block) ||
42436 put_user(kp->blocks, &up->blocks) ||
42437 put_user(tmp, &up->edid) ||
42438- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
42439+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
42440 return -EFAULT;
42441 return 0;
42442 }
42443diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
42444index 7658586..1079260 100644
42445--- a/drivers/media/v4l2-core/v4l2-ioctl.c
42446+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
42447@@ -1995,7 +1995,8 @@ struct v4l2_ioctl_info {
42448 struct file *file, void *fh, void *p);
42449 } u;
42450 void (*debug)(const void *arg, bool write_only);
42451-};
42452+} __do_const;
42453+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
42454
42455 /* This control needs a priority check */
42456 #define INFO_FL_PRIO (1 << 0)
42457@@ -2177,7 +2178,7 @@ static long __video_do_ioctl(struct file *file,
42458 struct video_device *vfd = video_devdata(file);
42459 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
42460 bool write_only = false;
42461- struct v4l2_ioctl_info default_info;
42462+ v4l2_ioctl_info_no_const default_info;
42463 const struct v4l2_ioctl_info *info;
42464 void *fh = file->private_data;
42465 struct v4l2_fh *vfh = NULL;
42466@@ -2251,7 +2252,7 @@ done:
42467 }
42468
42469 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42470- void * __user *user_ptr, void ***kernel_ptr)
42471+ void __user **user_ptr, void ***kernel_ptr)
42472 {
42473 int ret = 0;
42474
42475@@ -2267,7 +2268,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42476 ret = -EINVAL;
42477 break;
42478 }
42479- *user_ptr = (void __user *)buf->m.planes;
42480+ *user_ptr = (void __force_user *)buf->m.planes;
42481 *kernel_ptr = (void *)&buf->m.planes;
42482 *array_size = sizeof(struct v4l2_plane) * buf->length;
42483 ret = 1;
42484@@ -2302,7 +2303,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
42485 ret = -EINVAL;
42486 break;
42487 }
42488- *user_ptr = (void __user *)ctrls->controls;
42489+ *user_ptr = (void __force_user *)ctrls->controls;
42490 *kernel_ptr = (void *)&ctrls->controls;
42491 *array_size = sizeof(struct v4l2_ext_control)
42492 * ctrls->count;
42493diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
42494index 767ff4d..c69d259 100644
42495--- a/drivers/message/fusion/mptbase.c
42496+++ b/drivers/message/fusion/mptbase.c
42497@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
42498 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
42499 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
42500
42501+#ifdef CONFIG_GRKERNSEC_HIDESYM
42502+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
42503+#else
42504 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
42505 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
42506+#endif
42507+
42508 /*
42509 * Rounding UP to nearest 4-kB boundary here...
42510 */
42511@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
42512 ioc->facts.GlobalCredits);
42513
42514 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
42515+#ifdef CONFIG_GRKERNSEC_HIDESYM
42516+ NULL, NULL);
42517+#else
42518 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
42519+#endif
42520 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
42521 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
42522 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
42523diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
42524index dd239bd..689c4f7 100644
42525--- a/drivers/message/fusion/mptsas.c
42526+++ b/drivers/message/fusion/mptsas.c
42527@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
42528 return 0;
42529 }
42530
42531+static inline void
42532+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
42533+{
42534+ if (phy_info->port_details) {
42535+ phy_info->port_details->rphy = rphy;
42536+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
42537+ ioc->name, rphy));
42538+ }
42539+
42540+ if (rphy) {
42541+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
42542+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
42543+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
42544+ ioc->name, rphy, rphy->dev.release));
42545+ }
42546+}
42547+
42548 /* no mutex */
42549 static void
42550 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
42551@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
42552 return NULL;
42553 }
42554
42555-static inline void
42556-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
42557-{
42558- if (phy_info->port_details) {
42559- phy_info->port_details->rphy = rphy;
42560- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
42561- ioc->name, rphy));
42562- }
42563-
42564- if (rphy) {
42565- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
42566- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
42567- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
42568- ioc->name, rphy, rphy->dev.release));
42569- }
42570-}
42571-
42572 static inline struct sas_port *
42573 mptsas_get_port(struct mptsas_phyinfo *phy_info)
42574 {
42575diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
42576index 727819c..ad74694 100644
42577--- a/drivers/message/fusion/mptscsih.c
42578+++ b/drivers/message/fusion/mptscsih.c
42579@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
42580
42581 h = shost_priv(SChost);
42582
42583- if (h) {
42584- if (h->info_kbuf == NULL)
42585- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
42586- return h->info_kbuf;
42587- h->info_kbuf[0] = '\0';
42588+ if (!h)
42589+ return NULL;
42590
42591- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
42592- h->info_kbuf[size-1] = '\0';
42593- }
42594+ if (h->info_kbuf == NULL)
42595+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
42596+ return h->info_kbuf;
42597+ h->info_kbuf[0] = '\0';
42598+
42599+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
42600+ h->info_kbuf[size-1] = '\0';
42601
42602 return h->info_kbuf;
42603 }
42604diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
42605index b7d87cd..9890039 100644
42606--- a/drivers/message/i2o/i2o_proc.c
42607+++ b/drivers/message/i2o/i2o_proc.c
42608@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
42609 "Array Controller Device"
42610 };
42611
42612-static char *chtostr(char *tmp, u8 *chars, int n)
42613-{
42614- tmp[0] = 0;
42615- return strncat(tmp, (char *)chars, n);
42616-}
42617-
42618 static int i2o_report_query_status(struct seq_file *seq, int block_status,
42619 char *group)
42620 {
42621@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
42622 } *result;
42623
42624 i2o_exec_execute_ddm_table ddm_table;
42625- char tmp[28 + 1];
42626
42627 result = kmalloc(sizeof(*result), GFP_KERNEL);
42628 if (!result)
42629@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
42630
42631 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
42632 seq_printf(seq, "%-#8x", ddm_table.module_id);
42633- seq_printf(seq, "%-29s",
42634- chtostr(tmp, ddm_table.module_name_version, 28));
42635+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
42636 seq_printf(seq, "%9d ", ddm_table.data_size);
42637 seq_printf(seq, "%8d", ddm_table.code_size);
42638
42639@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
42640
42641 i2o_driver_result_table *result;
42642 i2o_driver_store_table *dst;
42643- char tmp[28 + 1];
42644
42645 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
42646 if (result == NULL)
42647@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
42648
42649 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
42650 seq_printf(seq, "%-#8x", dst->module_id);
42651- seq_printf(seq, "%-29s",
42652- chtostr(tmp, dst->module_name_version, 28));
42653- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
42654+ seq_printf(seq, "%-.28s", dst->module_name_version);
42655+ seq_printf(seq, "%-.8s", dst->date);
42656 seq_printf(seq, "%8d ", dst->module_size);
42657 seq_printf(seq, "%8d ", dst->mpb_size);
42658 seq_printf(seq, "0x%04x", dst->module_flags);
42659@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
42660 // == (allow) 512d bytes (max)
42661 static u16 *work16 = (u16 *) work32;
42662 int token;
42663- char tmp[16 + 1];
42664
42665 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
42666
42667@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
42668 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
42669 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
42670 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
42671- seq_printf(seq, "Vendor info : %s\n",
42672- chtostr(tmp, (u8 *) (work32 + 2), 16));
42673- seq_printf(seq, "Product info : %s\n",
42674- chtostr(tmp, (u8 *) (work32 + 6), 16));
42675- seq_printf(seq, "Description : %s\n",
42676- chtostr(tmp, (u8 *) (work32 + 10), 16));
42677- seq_printf(seq, "Product rev. : %s\n",
42678- chtostr(tmp, (u8 *) (work32 + 14), 8));
42679+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
42680+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
42681+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
42682+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
42683
42684 seq_printf(seq, "Serial number : ");
42685 print_serial_number(seq, (u8 *) (work32 + 16),
42686@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
42687 u8 pad[256]; // allow up to 256 byte (max) serial number
42688 } result;
42689
42690- char tmp[24 + 1];
42691-
42692 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
42693
42694 if (token < 0) {
42695@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
42696 }
42697
42698 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
42699- seq_printf(seq, "Module name : %s\n",
42700- chtostr(tmp, result.module_name, 24));
42701- seq_printf(seq, "Module revision : %s\n",
42702- chtostr(tmp, result.module_rev, 8));
42703+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
42704+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
42705
42706 seq_printf(seq, "Serial number : ");
42707 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
42708@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
42709 u8 instance_number[4];
42710 } result;
42711
42712- char tmp[64 + 1];
42713-
42714 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
42715
42716 if (token < 0) {
42717@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
42718 return 0;
42719 }
42720
42721- seq_printf(seq, "Device name : %s\n",
42722- chtostr(tmp, result.device_name, 64));
42723- seq_printf(seq, "Service name : %s\n",
42724- chtostr(tmp, result.service_name, 64));
42725- seq_printf(seq, "Physical name : %s\n",
42726- chtostr(tmp, result.physical_location, 64));
42727- seq_printf(seq, "Instance number : %s\n",
42728- chtostr(tmp, result.instance_number, 4));
42729+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
42730+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
42731+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
42732+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
42733
42734 return 0;
42735 }
42736diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
42737index a8c08f3..155fe3d 100644
42738--- a/drivers/message/i2o/iop.c
42739+++ b/drivers/message/i2o/iop.c
42740@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
42741
42742 spin_lock_irqsave(&c->context_list_lock, flags);
42743
42744- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
42745- atomic_inc(&c->context_list_counter);
42746+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
42747+ atomic_inc_unchecked(&c->context_list_counter);
42748
42749- entry->context = atomic_read(&c->context_list_counter);
42750+ entry->context = atomic_read_unchecked(&c->context_list_counter);
42751
42752 list_add(&entry->list, &c->context_list);
42753
42754@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
42755
42756 #if BITS_PER_LONG == 64
42757 spin_lock_init(&c->context_list_lock);
42758- atomic_set(&c->context_list_counter, 0);
42759+ atomic_set_unchecked(&c->context_list_counter, 0);
42760 INIT_LIST_HEAD(&c->context_list);
42761 #endif
42762
42763diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
42764index 45ece11..8efa218 100644
42765--- a/drivers/mfd/janz-cmodio.c
42766+++ b/drivers/mfd/janz-cmodio.c
42767@@ -13,6 +13,7 @@
42768
42769 #include <linux/kernel.h>
42770 #include <linux/module.h>
42771+#include <linux/slab.h>
42772 #include <linux/init.h>
42773 #include <linux/pci.h>
42774 #include <linux/interrupt.h>
42775diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
42776index a5f9888..1c0ed56 100644
42777--- a/drivers/mfd/twl4030-irq.c
42778+++ b/drivers/mfd/twl4030-irq.c
42779@@ -35,6 +35,7 @@
42780 #include <linux/of.h>
42781 #include <linux/irqdomain.h>
42782 #include <linux/i2c/twl.h>
42783+#include <asm/pgtable.h>
42784
42785 #include "twl-core.h"
42786
42787@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
42788 * Install an irq handler for each of the SIH modules;
42789 * clone dummy irq_chip since PIH can't *do* anything
42790 */
42791- twl4030_irq_chip = dummy_irq_chip;
42792- twl4030_irq_chip.name = "twl4030";
42793+ pax_open_kernel();
42794+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
42795+ *(const char **)&twl4030_irq_chip.name = "twl4030";
42796
42797- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
42798+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
42799+ pax_close_kernel();
42800
42801 for (i = irq_base; i < irq_end; i++) {
42802 irq_set_chip_and_handler(i, &twl4030_irq_chip,
42803diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
42804index 277a8db..0e0b754 100644
42805--- a/drivers/mfd/twl6030-irq.c
42806+++ b/drivers/mfd/twl6030-irq.c
42807@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
42808 * install an irq handler for each of the modules;
42809 * clone dummy irq_chip since PIH can't *do* anything
42810 */
42811- twl6030_irq_chip = dummy_irq_chip;
42812- twl6030_irq_chip.name = "twl6030";
42813- twl6030_irq_chip.irq_set_type = NULL;
42814- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
42815+ pax_open_kernel();
42816+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
42817+ *(const char **)&twl6030_irq_chip.name = "twl6030";
42818+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
42819+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
42820+ pax_close_kernel();
42821
42822 for (i = irq_base; i < irq_end; i++) {
42823 irq_set_chip_and_handler(i, &twl6030_irq_chip,
42824diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
42825index f32550a..e3e52a2 100644
42826--- a/drivers/misc/c2port/core.c
42827+++ b/drivers/misc/c2port/core.c
42828@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
42829 mutex_init(&c2dev->mutex);
42830
42831 /* Create binary file */
42832- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
42833+ pax_open_kernel();
42834+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
42835+ pax_close_kernel();
42836 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
42837 if (unlikely(ret))
42838 goto error_device_create_bin_file;
42839diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
42840index 36f5d52..32311c3 100644
42841--- a/drivers/misc/kgdbts.c
42842+++ b/drivers/misc/kgdbts.c
42843@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
42844 char before[BREAK_INSTR_SIZE];
42845 char after[BREAK_INSTR_SIZE];
42846
42847- probe_kernel_read(before, (char *)kgdbts_break_test,
42848+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
42849 BREAK_INSTR_SIZE);
42850 init_simple_test();
42851 ts.tst = plant_and_detach_test;
42852@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
42853 /* Activate test with initial breakpoint */
42854 if (!is_early)
42855 kgdb_breakpoint();
42856- probe_kernel_read(after, (char *)kgdbts_break_test,
42857+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
42858 BREAK_INSTR_SIZE);
42859 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
42860 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
42861diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
42862index 4cd4a3d..b48cbc7 100644
42863--- a/drivers/misc/lis3lv02d/lis3lv02d.c
42864+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
42865@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
42866 * the lid is closed. This leads to interrupts as soon as a little move
42867 * is done.
42868 */
42869- atomic_inc(&lis3->count);
42870+ atomic_inc_unchecked(&lis3->count);
42871
42872 wake_up_interruptible(&lis3->misc_wait);
42873 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
42874@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
42875 if (lis3->pm_dev)
42876 pm_runtime_get_sync(lis3->pm_dev);
42877
42878- atomic_set(&lis3->count, 0);
42879+ atomic_set_unchecked(&lis3->count, 0);
42880 return 0;
42881 }
42882
42883@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
42884 add_wait_queue(&lis3->misc_wait, &wait);
42885 while (true) {
42886 set_current_state(TASK_INTERRUPTIBLE);
42887- data = atomic_xchg(&lis3->count, 0);
42888+ data = atomic_xchg_unchecked(&lis3->count, 0);
42889 if (data)
42890 break;
42891
42892@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
42893 struct lis3lv02d, miscdev);
42894
42895 poll_wait(file, &lis3->misc_wait, wait);
42896- if (atomic_read(&lis3->count))
42897+ if (atomic_read_unchecked(&lis3->count))
42898 return POLLIN | POLLRDNORM;
42899 return 0;
42900 }
42901diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
42902index c439c82..1f20f57 100644
42903--- a/drivers/misc/lis3lv02d/lis3lv02d.h
42904+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
42905@@ -297,7 +297,7 @@ struct lis3lv02d {
42906 struct input_polled_dev *idev; /* input device */
42907 struct platform_device *pdev; /* platform device */
42908 struct regulator_bulk_data regulators[2];
42909- atomic_t count; /* interrupt count after last read */
42910+ atomic_unchecked_t count; /* interrupt count after last read */
42911 union axis_conversion ac; /* hw -> logical axis */
42912 int mapped_btns[3];
42913
42914diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
42915index 2f30bad..c4c13d0 100644
42916--- a/drivers/misc/sgi-gru/gruhandles.c
42917+++ b/drivers/misc/sgi-gru/gruhandles.c
42918@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
42919 unsigned long nsec;
42920
42921 nsec = CLKS2NSEC(clks);
42922- atomic_long_inc(&mcs_op_statistics[op].count);
42923- atomic_long_add(nsec, &mcs_op_statistics[op].total);
42924+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
42925+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
42926 if (mcs_op_statistics[op].max < nsec)
42927 mcs_op_statistics[op].max = nsec;
42928 }
42929diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
42930index 797d796..ae8f01e 100644
42931--- a/drivers/misc/sgi-gru/gruprocfs.c
42932+++ b/drivers/misc/sgi-gru/gruprocfs.c
42933@@ -32,9 +32,9 @@
42934
42935 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
42936
42937-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
42938+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
42939 {
42940- unsigned long val = atomic_long_read(v);
42941+ unsigned long val = atomic_long_read_unchecked(v);
42942
42943 seq_printf(s, "%16lu %s\n", val, id);
42944 }
42945@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
42946
42947 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
42948 for (op = 0; op < mcsop_last; op++) {
42949- count = atomic_long_read(&mcs_op_statistics[op].count);
42950- total = atomic_long_read(&mcs_op_statistics[op].total);
42951+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
42952+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
42953 max = mcs_op_statistics[op].max;
42954 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
42955 count ? total / count : 0, max);
42956diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
42957index 5c3ce24..4915ccb 100644
42958--- a/drivers/misc/sgi-gru/grutables.h
42959+++ b/drivers/misc/sgi-gru/grutables.h
42960@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
42961 * GRU statistics.
42962 */
42963 struct gru_stats_s {
42964- atomic_long_t vdata_alloc;
42965- atomic_long_t vdata_free;
42966- atomic_long_t gts_alloc;
42967- atomic_long_t gts_free;
42968- atomic_long_t gms_alloc;
42969- atomic_long_t gms_free;
42970- atomic_long_t gts_double_allocate;
42971- atomic_long_t assign_context;
42972- atomic_long_t assign_context_failed;
42973- atomic_long_t free_context;
42974- atomic_long_t load_user_context;
42975- atomic_long_t load_kernel_context;
42976- atomic_long_t lock_kernel_context;
42977- atomic_long_t unlock_kernel_context;
42978- atomic_long_t steal_user_context;
42979- atomic_long_t steal_kernel_context;
42980- atomic_long_t steal_context_failed;
42981- atomic_long_t nopfn;
42982- atomic_long_t asid_new;
42983- atomic_long_t asid_next;
42984- atomic_long_t asid_wrap;
42985- atomic_long_t asid_reuse;
42986- atomic_long_t intr;
42987- atomic_long_t intr_cbr;
42988- atomic_long_t intr_tfh;
42989- atomic_long_t intr_spurious;
42990- atomic_long_t intr_mm_lock_failed;
42991- atomic_long_t call_os;
42992- atomic_long_t call_os_wait_queue;
42993- atomic_long_t user_flush_tlb;
42994- atomic_long_t user_unload_context;
42995- atomic_long_t user_exception;
42996- atomic_long_t set_context_option;
42997- atomic_long_t check_context_retarget_intr;
42998- atomic_long_t check_context_unload;
42999- atomic_long_t tlb_dropin;
43000- atomic_long_t tlb_preload_page;
43001- atomic_long_t tlb_dropin_fail_no_asid;
43002- atomic_long_t tlb_dropin_fail_upm;
43003- atomic_long_t tlb_dropin_fail_invalid;
43004- atomic_long_t tlb_dropin_fail_range_active;
43005- atomic_long_t tlb_dropin_fail_idle;
43006- atomic_long_t tlb_dropin_fail_fmm;
43007- atomic_long_t tlb_dropin_fail_no_exception;
43008- atomic_long_t tfh_stale_on_fault;
43009- atomic_long_t mmu_invalidate_range;
43010- atomic_long_t mmu_invalidate_page;
43011- atomic_long_t flush_tlb;
43012- atomic_long_t flush_tlb_gru;
43013- atomic_long_t flush_tlb_gru_tgh;
43014- atomic_long_t flush_tlb_gru_zero_asid;
43015+ atomic_long_unchecked_t vdata_alloc;
43016+ atomic_long_unchecked_t vdata_free;
43017+ atomic_long_unchecked_t gts_alloc;
43018+ atomic_long_unchecked_t gts_free;
43019+ atomic_long_unchecked_t gms_alloc;
43020+ atomic_long_unchecked_t gms_free;
43021+ atomic_long_unchecked_t gts_double_allocate;
43022+ atomic_long_unchecked_t assign_context;
43023+ atomic_long_unchecked_t assign_context_failed;
43024+ atomic_long_unchecked_t free_context;
43025+ atomic_long_unchecked_t load_user_context;
43026+ atomic_long_unchecked_t load_kernel_context;
43027+ atomic_long_unchecked_t lock_kernel_context;
43028+ atomic_long_unchecked_t unlock_kernel_context;
43029+ atomic_long_unchecked_t steal_user_context;
43030+ atomic_long_unchecked_t steal_kernel_context;
43031+ atomic_long_unchecked_t steal_context_failed;
43032+ atomic_long_unchecked_t nopfn;
43033+ atomic_long_unchecked_t asid_new;
43034+ atomic_long_unchecked_t asid_next;
43035+ atomic_long_unchecked_t asid_wrap;
43036+ atomic_long_unchecked_t asid_reuse;
43037+ atomic_long_unchecked_t intr;
43038+ atomic_long_unchecked_t intr_cbr;
43039+ atomic_long_unchecked_t intr_tfh;
43040+ atomic_long_unchecked_t intr_spurious;
43041+ atomic_long_unchecked_t intr_mm_lock_failed;
43042+ atomic_long_unchecked_t call_os;
43043+ atomic_long_unchecked_t call_os_wait_queue;
43044+ atomic_long_unchecked_t user_flush_tlb;
43045+ atomic_long_unchecked_t user_unload_context;
43046+ atomic_long_unchecked_t user_exception;
43047+ atomic_long_unchecked_t set_context_option;
43048+ atomic_long_unchecked_t check_context_retarget_intr;
43049+ atomic_long_unchecked_t check_context_unload;
43050+ atomic_long_unchecked_t tlb_dropin;
43051+ atomic_long_unchecked_t tlb_preload_page;
43052+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
43053+ atomic_long_unchecked_t tlb_dropin_fail_upm;
43054+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
43055+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
43056+ atomic_long_unchecked_t tlb_dropin_fail_idle;
43057+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
43058+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
43059+ atomic_long_unchecked_t tfh_stale_on_fault;
43060+ atomic_long_unchecked_t mmu_invalidate_range;
43061+ atomic_long_unchecked_t mmu_invalidate_page;
43062+ atomic_long_unchecked_t flush_tlb;
43063+ atomic_long_unchecked_t flush_tlb_gru;
43064+ atomic_long_unchecked_t flush_tlb_gru_tgh;
43065+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
43066
43067- atomic_long_t copy_gpa;
43068- atomic_long_t read_gpa;
43069+ atomic_long_unchecked_t copy_gpa;
43070+ atomic_long_unchecked_t read_gpa;
43071
43072- atomic_long_t mesq_receive;
43073- atomic_long_t mesq_receive_none;
43074- atomic_long_t mesq_send;
43075- atomic_long_t mesq_send_failed;
43076- atomic_long_t mesq_noop;
43077- atomic_long_t mesq_send_unexpected_error;
43078- atomic_long_t mesq_send_lb_overflow;
43079- atomic_long_t mesq_send_qlimit_reached;
43080- atomic_long_t mesq_send_amo_nacked;
43081- atomic_long_t mesq_send_put_nacked;
43082- atomic_long_t mesq_page_overflow;
43083- atomic_long_t mesq_qf_locked;
43084- atomic_long_t mesq_qf_noop_not_full;
43085- atomic_long_t mesq_qf_switch_head_failed;
43086- atomic_long_t mesq_qf_unexpected_error;
43087- atomic_long_t mesq_noop_unexpected_error;
43088- atomic_long_t mesq_noop_lb_overflow;
43089- atomic_long_t mesq_noop_qlimit_reached;
43090- atomic_long_t mesq_noop_amo_nacked;
43091- atomic_long_t mesq_noop_put_nacked;
43092- atomic_long_t mesq_noop_page_overflow;
43093+ atomic_long_unchecked_t mesq_receive;
43094+ atomic_long_unchecked_t mesq_receive_none;
43095+ atomic_long_unchecked_t mesq_send;
43096+ atomic_long_unchecked_t mesq_send_failed;
43097+ atomic_long_unchecked_t mesq_noop;
43098+ atomic_long_unchecked_t mesq_send_unexpected_error;
43099+ atomic_long_unchecked_t mesq_send_lb_overflow;
43100+ atomic_long_unchecked_t mesq_send_qlimit_reached;
43101+ atomic_long_unchecked_t mesq_send_amo_nacked;
43102+ atomic_long_unchecked_t mesq_send_put_nacked;
43103+ atomic_long_unchecked_t mesq_page_overflow;
43104+ atomic_long_unchecked_t mesq_qf_locked;
43105+ atomic_long_unchecked_t mesq_qf_noop_not_full;
43106+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
43107+ atomic_long_unchecked_t mesq_qf_unexpected_error;
43108+ atomic_long_unchecked_t mesq_noop_unexpected_error;
43109+ atomic_long_unchecked_t mesq_noop_lb_overflow;
43110+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
43111+ atomic_long_unchecked_t mesq_noop_amo_nacked;
43112+ atomic_long_unchecked_t mesq_noop_put_nacked;
43113+ atomic_long_unchecked_t mesq_noop_page_overflow;
43114
43115 };
43116
43117@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
43118 tghop_invalidate, mcsop_last};
43119
43120 struct mcs_op_statistic {
43121- atomic_long_t count;
43122- atomic_long_t total;
43123+ atomic_long_unchecked_t count;
43124+ atomic_long_unchecked_t total;
43125 unsigned long max;
43126 };
43127
43128@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
43129
43130 #define STAT(id) do { \
43131 if (gru_options & OPT_STATS) \
43132- atomic_long_inc(&gru_stats.id); \
43133+ atomic_long_inc_unchecked(&gru_stats.id); \
43134 } while (0)
43135
43136 #ifdef CONFIG_SGI_GRU_DEBUG
43137diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
43138index c862cd4..0d176fe 100644
43139--- a/drivers/misc/sgi-xp/xp.h
43140+++ b/drivers/misc/sgi-xp/xp.h
43141@@ -288,7 +288,7 @@ struct xpc_interface {
43142 xpc_notify_func, void *);
43143 void (*received) (short, int, void *);
43144 enum xp_retval (*partid_to_nasids) (short, void *);
43145-};
43146+} __no_const;
43147
43148 extern struct xpc_interface xpc_interface;
43149
43150diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
43151index b94d5f7..7f494c5 100644
43152--- a/drivers/misc/sgi-xp/xpc.h
43153+++ b/drivers/misc/sgi-xp/xpc.h
43154@@ -835,6 +835,7 @@ struct xpc_arch_operations {
43155 void (*received_payload) (struct xpc_channel *, void *);
43156 void (*notify_senders_of_disconnect) (struct xpc_channel *);
43157 };
43158+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
43159
43160 /* struct xpc_partition act_state values (for XPC HB) */
43161
43162@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
43163 /* found in xpc_main.c */
43164 extern struct device *xpc_part;
43165 extern struct device *xpc_chan;
43166-extern struct xpc_arch_operations xpc_arch_ops;
43167+extern xpc_arch_operations_no_const xpc_arch_ops;
43168 extern int xpc_disengage_timelimit;
43169 extern int xpc_disengage_timedout;
43170 extern int xpc_activate_IRQ_rcvd;
43171diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
43172index d971817..33bdca5 100644
43173--- a/drivers/misc/sgi-xp/xpc_main.c
43174+++ b/drivers/misc/sgi-xp/xpc_main.c
43175@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
43176 .notifier_call = xpc_system_die,
43177 };
43178
43179-struct xpc_arch_operations xpc_arch_ops;
43180+xpc_arch_operations_no_const xpc_arch_ops;
43181
43182 /*
43183 * Timer function to enforce the timelimit on the partition disengage.
43184@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
43185
43186 if (((die_args->trapnr == X86_TRAP_MF) ||
43187 (die_args->trapnr == X86_TRAP_XF)) &&
43188- !user_mode_vm(die_args->regs))
43189+ !user_mode(die_args->regs))
43190 xpc_die_deactivate();
43191
43192 break;
43193diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
43194index 49f04bc..65660c2 100644
43195--- a/drivers/mmc/core/mmc_ops.c
43196+++ b/drivers/mmc/core/mmc_ops.c
43197@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
43198 void *data_buf;
43199 int is_on_stack;
43200
43201- is_on_stack = object_is_on_stack(buf);
43202+ is_on_stack = object_starts_on_stack(buf);
43203 if (is_on_stack) {
43204 /*
43205 * dma onto stack is unsafe/nonportable, but callers to this
43206diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
43207index 0b74189..818358f 100644
43208--- a/drivers/mmc/host/dw_mmc.h
43209+++ b/drivers/mmc/host/dw_mmc.h
43210@@ -202,5 +202,5 @@ struct dw_mci_drv_data {
43211 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
43212 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
43213 int (*parse_dt)(struct dw_mci *host);
43214-};
43215+} __do_const;
43216 #endif /* _DW_MMC_H_ */
43217diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
43218index c6f6246..60760a8 100644
43219--- a/drivers/mmc/host/sdhci-s3c.c
43220+++ b/drivers/mmc/host/sdhci-s3c.c
43221@@ -664,9 +664,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
43222 * we can use overriding functions instead of default.
43223 */
43224 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
43225- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43226- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43227- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43228+ pax_open_kernel();
43229+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43230+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43231+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43232+ pax_close_kernel();
43233 }
43234
43235 /* It supports additional host capabilities if needed */
43236diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
43237index 0c8bb6b..6f35deb 100644
43238--- a/drivers/mtd/nand/denali.c
43239+++ b/drivers/mtd/nand/denali.c
43240@@ -24,6 +24,7 @@
43241 #include <linux/slab.h>
43242 #include <linux/mtd/mtd.h>
43243 #include <linux/module.h>
43244+#include <linux/slab.h>
43245
43246 #include "denali.h"
43247
43248diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
43249index 51b9d6a..52af9a7 100644
43250--- a/drivers/mtd/nftlmount.c
43251+++ b/drivers/mtd/nftlmount.c
43252@@ -24,6 +24,7 @@
43253 #include <asm/errno.h>
43254 #include <linux/delay.h>
43255 #include <linux/slab.h>
43256+#include <linux/sched.h>
43257 #include <linux/mtd/mtd.h>
43258 #include <linux/mtd/nand.h>
43259 #include <linux/mtd/nftl.h>
43260diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
43261index f9d5615..99dd95f 100644
43262--- a/drivers/mtd/sm_ftl.c
43263+++ b/drivers/mtd/sm_ftl.c
43264@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
43265 #define SM_CIS_VENDOR_OFFSET 0x59
43266 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
43267 {
43268- struct attribute_group *attr_group;
43269+ attribute_group_no_const *attr_group;
43270 struct attribute **attributes;
43271 struct sm_sysfs_attribute *vendor_attribute;
43272
43273diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
43274index f975696..4597e21 100644
43275--- a/drivers/net/bonding/bond_main.c
43276+++ b/drivers/net/bonding/bond_main.c
43277@@ -4870,7 +4870,7 @@ static unsigned int bond_get_num_tx_queues(void)
43278 return tx_queues;
43279 }
43280
43281-static struct rtnl_link_ops bond_link_ops __read_mostly = {
43282+static struct rtnl_link_ops bond_link_ops = {
43283 .kind = "bond",
43284 .priv_size = sizeof(struct bonding),
43285 .setup = bond_setup,
43286@@ -4995,8 +4995,8 @@ static void __exit bonding_exit(void)
43287
43288 bond_destroy_debugfs();
43289
43290- rtnl_link_unregister(&bond_link_ops);
43291 unregister_pernet_subsys(&bond_net_ops);
43292+ rtnl_link_unregister(&bond_link_ops);
43293
43294 #ifdef CONFIG_NET_POLL_CONTROLLER
43295 /*
43296diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
43297index e1d2643..7f4133b 100644
43298--- a/drivers/net/ethernet/8390/ax88796.c
43299+++ b/drivers/net/ethernet/8390/ax88796.c
43300@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
43301 if (ax->plat->reg_offsets)
43302 ei_local->reg_offset = ax->plat->reg_offsets;
43303 else {
43304+ resource_size_t _mem_size = mem_size;
43305+ do_div(_mem_size, 0x18);
43306 ei_local->reg_offset = ax->reg_offsets;
43307 for (ret = 0; ret < 0x18; ret++)
43308- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
43309+ ax->reg_offsets[ret] = _mem_size * ret;
43310 }
43311
43312 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
43313diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43314index 151675d..0139a9d 100644
43315--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43316+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
43317@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
43318 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
43319 {
43320 /* RX_MODE controlling object */
43321- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
43322+ bnx2x_init_rx_mode_obj(bp);
43323
43324 /* multicast configuration controlling object */
43325 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
43326diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43327index ce1a916..10b52b0 100644
43328--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43329+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
43330@@ -960,6 +960,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
43331 struct bnx2x *bp = netdev_priv(dev);
43332
43333 /* Use the ethtool_dump "flag" field as the dump preset index */
43334+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
43335+ return -EINVAL;
43336+
43337 bp->dump_preset_idx = val->flag;
43338 return 0;
43339 }
43340@@ -986,8 +989,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
43341 struct bnx2x *bp = netdev_priv(dev);
43342 struct dump_header dump_hdr = {0};
43343
43344- memset(p, 0, dump->len);
43345-
43346 /* Disable parity attentions as long as following dump may
43347 * cause false alarms by reading never written registers. We
43348 * will re-enable parity attentions right after the dump.
43349diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43350index b4c9dea..2a9927f 100644
43351--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43352+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
43353@@ -11497,6 +11497,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
43354 bp->min_msix_vec_cnt = 2;
43355 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
43356
43357+ bp->dump_preset_idx = 1;
43358+
43359 return rc;
43360 }
43361
43362diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43363index 32a9609..0b1c53a 100644
43364--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43365+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
43366@@ -2387,15 +2387,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
43367 return rc;
43368 }
43369
43370-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
43371- struct bnx2x_rx_mode_obj *o)
43372+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
43373 {
43374 if (CHIP_IS_E1x(bp)) {
43375- o->wait_comp = bnx2x_empty_rx_mode_wait;
43376- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
43377+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
43378+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
43379 } else {
43380- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
43381- o->config_rx_mode = bnx2x_set_rx_mode_e2;
43382+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
43383+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
43384 }
43385 }
43386
43387diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43388index 43c00bc..dd1d03d 100644
43389--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43390+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
43391@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
43392
43393 /********************* RX MODE ****************/
43394
43395-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
43396- struct bnx2x_rx_mode_obj *o);
43397+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
43398
43399 /**
43400 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
43401diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
43402index ff6e30e..87e8452 100644
43403--- a/drivers/net/ethernet/broadcom/tg3.h
43404+++ b/drivers/net/ethernet/broadcom/tg3.h
43405@@ -147,6 +147,7 @@
43406 #define CHIPREV_ID_5750_A0 0x4000
43407 #define CHIPREV_ID_5750_A1 0x4001
43408 #define CHIPREV_ID_5750_A3 0x4003
43409+#define CHIPREV_ID_5750_C1 0x4201
43410 #define CHIPREV_ID_5750_C2 0x4202
43411 #define CHIPREV_ID_5752_A0_HW 0x5000
43412 #define CHIPREV_ID_5752_A0 0x6000
43413diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43414index 71497e8..b650951 100644
43415--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43416+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
43417@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
43418 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
43419 t3_read_reg(adapter, A_PCIE_PEX_ERR));
43420
43421+ rtnl_lock();
43422 t3_resume_ports(adapter);
43423+ rtnl_unlock();
43424 }
43425
43426 static const struct pci_error_handlers t3_err_handler = {
43427diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43428index 8cffcdf..aadf043 100644
43429--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43430+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
43431@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
43432 */
43433 struct l2t_skb_cb {
43434 arp_failure_handler_func arp_failure_handler;
43435-};
43436+} __no_const;
43437
43438 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
43439
43440diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
43441index 4c83003..2a2a5b9 100644
43442--- a/drivers/net/ethernet/dec/tulip/de4x5.c
43443+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
43444@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
43445 for (i=0; i<ETH_ALEN; i++) {
43446 tmp.addr[i] = dev->dev_addr[i];
43447 }
43448- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
43449+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
43450 break;
43451
43452 case DE4X5_SET_HWADDR: /* Set the hardware address */
43453@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
43454 spin_lock_irqsave(&lp->lock, flags);
43455 memcpy(&statbuf, &lp->pktStats, ioc->len);
43456 spin_unlock_irqrestore(&lp->lock, flags);
43457- if (copy_to_user(ioc->data, &statbuf, ioc->len))
43458+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
43459 return -EFAULT;
43460 break;
43461 }
43462diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
43463index 6e43426..1bd8365 100644
43464--- a/drivers/net/ethernet/emulex/benet/be_main.c
43465+++ b/drivers/net/ethernet/emulex/benet/be_main.c
43466@@ -469,7 +469,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
43467
43468 if (wrapped)
43469 newacc += 65536;
43470- ACCESS_ONCE(*acc) = newacc;
43471+ ACCESS_ONCE_RW(*acc) = newacc;
43472 }
43473
43474 void populate_erx_stats(struct be_adapter *adapter,
43475diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
43476index 21b85fb..b49e5fc 100644
43477--- a/drivers/net/ethernet/faraday/ftgmac100.c
43478+++ b/drivers/net/ethernet/faraday/ftgmac100.c
43479@@ -31,6 +31,8 @@
43480 #include <linux/netdevice.h>
43481 #include <linux/phy.h>
43482 #include <linux/platform_device.h>
43483+#include <linux/interrupt.h>
43484+#include <linux/irqreturn.h>
43485 #include <net/ip.h>
43486
43487 #include "ftgmac100.h"
43488diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
43489index a6eda8d..935d273 100644
43490--- a/drivers/net/ethernet/faraday/ftmac100.c
43491+++ b/drivers/net/ethernet/faraday/ftmac100.c
43492@@ -31,6 +31,8 @@
43493 #include <linux/module.h>
43494 #include <linux/netdevice.h>
43495 #include <linux/platform_device.h>
43496+#include <linux/interrupt.h>
43497+#include <linux/irqreturn.h>
43498
43499 #include "ftmac100.h"
43500
43501diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43502index 331987d..3be1135 100644
43503--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43504+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
43505@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
43506 }
43507
43508 /* update the base incval used to calculate frequency adjustment */
43509- ACCESS_ONCE(adapter->base_incval) = incval;
43510+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
43511 smp_mb();
43512
43513 /* need lock to prevent incorrect read while modifying cyclecounter */
43514diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
43515index fbe5363..266b4e3 100644
43516--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
43517+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
43518@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
43519 struct __vxge_hw_fifo *fifo;
43520 struct vxge_hw_fifo_config *config;
43521 u32 txdl_size, txdl_per_memblock;
43522- struct vxge_hw_mempool_cbs fifo_mp_callback;
43523+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
43524+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
43525+ };
43526+
43527 struct __vxge_hw_virtualpath *vpath;
43528
43529 if ((vp == NULL) || (attr == NULL)) {
43530@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
43531 goto exit;
43532 }
43533
43534- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
43535-
43536 fifo->mempool =
43537 __vxge_hw_mempool_create(vpath->hldev,
43538 fifo->config->memblock_size,
43539diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43540index 5e7fb1d..f8d1810 100644
43541--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43542+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
43543@@ -1948,7 +1948,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
43544 op_mode = QLC_83XX_DEFAULT_OPMODE;
43545
43546 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
43547- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
43548+ pax_open_kernel();
43549+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
43550+ pax_close_kernel();
43551 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43552 } else {
43553 return -EIO;
43554diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43555index b0c3de9..fc5857e 100644
43556--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43557+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
43558@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
43559 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
43560 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
43561 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43562- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
43563+ pax_open_kernel();
43564+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
43565+ pax_close_kernel();
43566 } else if (priv_level == QLCNIC_PRIV_FUNC) {
43567 ahw->op_mode = QLCNIC_PRIV_FUNC;
43568 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
43569- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
43570+ pax_open_kernel();
43571+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
43572+ pax_close_kernel();
43573 } else if (priv_level == QLCNIC_MGMT_FUNC) {
43574 ahw->op_mode = QLCNIC_MGMT_FUNC;
43575 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
43576- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
43577+ pax_open_kernel();
43578+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
43579+ pax_close_kernel();
43580 } else {
43581 return -EIO;
43582 }
43583diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43584index 6acf82b..14b097e 100644
43585--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43586+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
43587@@ -206,10 +206,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
43588 if (err) {
43589 dev_info(&adapter->pdev->dev,
43590 "Failed to set driver version in firmware\n");
43591- return -EIO;
43592+ err = -EIO;
43593 }
43594-
43595- return 0;
43596+ qlcnic_free_mbx_args(&cmd);
43597+ return err;
43598 }
43599
43600 int
43601diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43602index d3f8797..82a03d3 100644
43603--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43604+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
43605@@ -262,7 +262,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
43606
43607 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
43608 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
43609- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
43610+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
43611
43612 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
43613 vlan_req->vlan_id = cpu_to_le16(vlan_id);
43614diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
43615index 887aebe..9095ff9 100644
43616--- a/drivers/net/ethernet/realtek/8139cp.c
43617+++ b/drivers/net/ethernet/realtek/8139cp.c
43618@@ -524,6 +524,7 @@ rx_status_loop:
43619 PCI_DMA_FROMDEVICE);
43620 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
43621 dev->stats.rx_dropped++;
43622+ kfree_skb(new_skb);
43623 goto rx_next;
43624 }
43625
43626diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
43627index 393f961..d343034 100644
43628--- a/drivers/net/ethernet/realtek/r8169.c
43629+++ b/drivers/net/ethernet/realtek/r8169.c
43630@@ -753,22 +753,22 @@ struct rtl8169_private {
43631 struct mdio_ops {
43632 void (*write)(struct rtl8169_private *, int, int);
43633 int (*read)(struct rtl8169_private *, int);
43634- } mdio_ops;
43635+ } __no_const mdio_ops;
43636
43637 struct pll_power_ops {
43638 void (*down)(struct rtl8169_private *);
43639 void (*up)(struct rtl8169_private *);
43640- } pll_power_ops;
43641+ } __no_const pll_power_ops;
43642
43643 struct jumbo_ops {
43644 void (*enable)(struct rtl8169_private *);
43645 void (*disable)(struct rtl8169_private *);
43646- } jumbo_ops;
43647+ } __no_const jumbo_ops;
43648
43649 struct csi_ops {
43650 void (*write)(struct rtl8169_private *, int, int);
43651 u32 (*read)(struct rtl8169_private *, int);
43652- } csi_ops;
43653+ } __no_const csi_ops;
43654
43655 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
43656 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
43657diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
43658index 9a95abf..36df7f9 100644
43659--- a/drivers/net/ethernet/sfc/ptp.c
43660+++ b/drivers/net/ethernet/sfc/ptp.c
43661@@ -535,7 +535,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
43662 (u32)((u64)ptp->start.dma_addr >> 32));
43663
43664 /* Clear flag that signals MC ready */
43665- ACCESS_ONCE(*start) = 0;
43666+ ACCESS_ONCE_RW(*start) = 0;
43667 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
43668 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
43669
43670diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43671index 50617c5..b13724c 100644
43672--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43673+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
43674@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
43675
43676 writel(value, ioaddr + MMC_CNTRL);
43677
43678- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
43679- MMC_CNTRL, value);
43680+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
43681+// MMC_CNTRL, value);
43682 }
43683
43684 /* To mask all all interrupts.*/
43685diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
43686index e6fe0d8..2b7d752 100644
43687--- a/drivers/net/hyperv/hyperv_net.h
43688+++ b/drivers/net/hyperv/hyperv_net.h
43689@@ -101,7 +101,7 @@ struct rndis_device {
43690
43691 enum rndis_device_state state;
43692 bool link_state;
43693- atomic_t new_req_id;
43694+ atomic_unchecked_t new_req_id;
43695
43696 spinlock_t request_lock;
43697 struct list_head req_list;
43698diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
43699index 0775f0a..d4fb316 100644
43700--- a/drivers/net/hyperv/rndis_filter.c
43701+++ b/drivers/net/hyperv/rndis_filter.c
43702@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
43703 * template
43704 */
43705 set = &rndis_msg->msg.set_req;
43706- set->req_id = atomic_inc_return(&dev->new_req_id);
43707+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
43708
43709 /* Add to the request list */
43710 spin_lock_irqsave(&dev->request_lock, flags);
43711@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
43712
43713 /* Setup the rndis set */
43714 halt = &request->request_msg.msg.halt_req;
43715- halt->req_id = atomic_inc_return(&dev->new_req_id);
43716+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
43717
43718 /* Ignore return since this msg is optional. */
43719 rndis_filter_send_request(dev, request);
43720diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
43721index bf0d55e..82bcfbd1 100644
43722--- a/drivers/net/ieee802154/fakehard.c
43723+++ b/drivers/net/ieee802154/fakehard.c
43724@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
43725 phy->transmit_power = 0xbf;
43726
43727 dev->netdev_ops = &fake_ops;
43728- dev->ml_priv = &fake_mlme;
43729+ dev->ml_priv = (void *)&fake_mlme;
43730
43731 priv = netdev_priv(dev);
43732 priv->phy = phy;
43733diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
43734index 6e91931..2b0ebe7 100644
43735--- a/drivers/net/macvlan.c
43736+++ b/drivers/net/macvlan.c
43737@@ -905,13 +905,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
43738 int macvlan_link_register(struct rtnl_link_ops *ops)
43739 {
43740 /* common fields */
43741- ops->priv_size = sizeof(struct macvlan_dev);
43742- ops->validate = macvlan_validate;
43743- ops->maxtype = IFLA_MACVLAN_MAX;
43744- ops->policy = macvlan_policy;
43745- ops->changelink = macvlan_changelink;
43746- ops->get_size = macvlan_get_size;
43747- ops->fill_info = macvlan_fill_info;
43748+ pax_open_kernel();
43749+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
43750+ *(void **)&ops->validate = macvlan_validate;
43751+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
43752+ *(const void **)&ops->policy = macvlan_policy;
43753+ *(void **)&ops->changelink = macvlan_changelink;
43754+ *(void **)&ops->get_size = macvlan_get_size;
43755+ *(void **)&ops->fill_info = macvlan_fill_info;
43756+ pax_close_kernel();
43757
43758 return rtnl_link_register(ops);
43759 };
43760@@ -967,7 +969,7 @@ static int macvlan_device_event(struct notifier_block *unused,
43761 return NOTIFY_DONE;
43762 }
43763
43764-static struct notifier_block macvlan_notifier_block __read_mostly = {
43765+static struct notifier_block macvlan_notifier_block = {
43766 .notifier_call = macvlan_device_event,
43767 };
43768
43769diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
43770index 523d6b2..5e16aa1 100644
43771--- a/drivers/net/macvtap.c
43772+++ b/drivers/net/macvtap.c
43773@@ -1110,7 +1110,7 @@ static int macvtap_device_event(struct notifier_block *unused,
43774 return NOTIFY_DONE;
43775 }
43776
43777-static struct notifier_block macvtap_notifier_block __read_mostly = {
43778+static struct notifier_block macvtap_notifier_block = {
43779 .notifier_call = macvtap_device_event,
43780 };
43781
43782diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
43783index daec9b0..6428fcb 100644
43784--- a/drivers/net/phy/mdio-bitbang.c
43785+++ b/drivers/net/phy/mdio-bitbang.c
43786@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
43787 struct mdiobb_ctrl *ctrl = bus->priv;
43788
43789 module_put(ctrl->ops->owner);
43790+ mdiobus_unregister(bus);
43791 mdiobus_free(bus);
43792 }
43793 EXPORT_SYMBOL(free_mdio_bitbang);
43794diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
43795index 72ff14b..11d442d 100644
43796--- a/drivers/net/ppp/ppp_generic.c
43797+++ b/drivers/net/ppp/ppp_generic.c
43798@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43799 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
43800 struct ppp_stats stats;
43801 struct ppp_comp_stats cstats;
43802- char *vers;
43803
43804 switch (cmd) {
43805 case SIOCGPPPSTATS:
43806@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43807 break;
43808
43809 case SIOCGPPPVER:
43810- vers = PPP_VERSION;
43811- if (copy_to_user(addr, vers, strlen(vers) + 1))
43812+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
43813 break;
43814 err = 0;
43815 break;
43816diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
43817index 1252d9c..80e660b 100644
43818--- a/drivers/net/slip/slhc.c
43819+++ b/drivers/net/slip/slhc.c
43820@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
43821 register struct tcphdr *thp;
43822 register struct iphdr *ip;
43823 register struct cstate *cs;
43824- int len, hdrlen;
43825+ long len, hdrlen;
43826 unsigned char *cp = icp;
43827
43828 /* We've got a compressed packet; read the change byte */
43829diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
43830index b305105..8ead6df 100644
43831--- a/drivers/net/team/team.c
43832+++ b/drivers/net/team/team.c
43833@@ -2682,7 +2682,7 @@ static int team_device_event(struct notifier_block *unused,
43834 return NOTIFY_DONE;
43835 }
43836
43837-static struct notifier_block team_notifier_block __read_mostly = {
43838+static struct notifier_block team_notifier_block = {
43839 .notifier_call = team_device_event,
43840 };
43841
43842diff --git a/drivers/net/tun.c b/drivers/net/tun.c
43843index 2491eb2..1a453eb 100644
43844--- a/drivers/net/tun.c
43845+++ b/drivers/net/tun.c
43846@@ -1076,8 +1076,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
43847 u32 rxhash;
43848
43849 if (!(tun->flags & TUN_NO_PI)) {
43850- if ((len -= sizeof(pi)) > total_len)
43851+ if (len < sizeof(pi))
43852 return -EINVAL;
43853+ len -= sizeof(pi);
43854
43855 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
43856 return -EFAULT;
43857@@ -1085,8 +1086,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
43858 }
43859
43860 if (tun->flags & TUN_VNET_HDR) {
43861- if ((len -= tun->vnet_hdr_sz) > total_len)
43862+ if (len < tun->vnet_hdr_sz)
43863 return -EINVAL;
43864+ len -= tun->vnet_hdr_sz;
43865
43866 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
43867 return -EFAULT;
43868@@ -1869,7 +1871,7 @@ unlock:
43869 }
43870
43871 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
43872- unsigned long arg, int ifreq_len)
43873+ unsigned long arg, size_t ifreq_len)
43874 {
43875 struct tun_file *tfile = file->private_data;
43876 struct tun_struct *tun;
43877@@ -1881,6 +1883,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
43878 int vnet_hdr_sz;
43879 int ret;
43880
43881+ if (ifreq_len > sizeof ifr)
43882+ return -EFAULT;
43883+
43884 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
43885 if (copy_from_user(&ifr, argp, ifreq_len))
43886 return -EFAULT;
43887diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
43888index cba1d46..f703766 100644
43889--- a/drivers/net/usb/hso.c
43890+++ b/drivers/net/usb/hso.c
43891@@ -71,7 +71,7 @@
43892 #include <asm/byteorder.h>
43893 #include <linux/serial_core.h>
43894 #include <linux/serial.h>
43895-
43896+#include <asm/local.h>
43897
43898 #define MOD_AUTHOR "Option Wireless"
43899 #define MOD_DESCRIPTION "USB High Speed Option driver"
43900@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
43901 struct urb *urb;
43902
43903 urb = serial->rx_urb[0];
43904- if (serial->port.count > 0) {
43905+ if (atomic_read(&serial->port.count) > 0) {
43906 count = put_rxbuf_data(urb, serial);
43907 if (count == -1)
43908 return;
43909@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
43910 DUMP1(urb->transfer_buffer, urb->actual_length);
43911
43912 /* Anyone listening? */
43913- if (serial->port.count == 0)
43914+ if (atomic_read(&serial->port.count) == 0)
43915 return;
43916
43917 if (status == 0) {
43918@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
43919 tty_port_tty_set(&serial->port, tty);
43920
43921 /* check for port already opened, if not set the termios */
43922- serial->port.count++;
43923- if (serial->port.count == 1) {
43924+ if (atomic_inc_return(&serial->port.count) == 1) {
43925 serial->rx_state = RX_IDLE;
43926 /* Force default termio settings */
43927 _hso_serial_set_termios(tty, NULL);
43928@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
43929 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
43930 if (result) {
43931 hso_stop_serial_device(serial->parent);
43932- serial->port.count--;
43933+ atomic_dec(&serial->port.count);
43934 kref_put(&serial->parent->ref, hso_serial_ref_free);
43935 }
43936 } else {
43937@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
43938
43939 /* reset the rts and dtr */
43940 /* do the actual close */
43941- serial->port.count--;
43942+ atomic_dec(&serial->port.count);
43943
43944- if (serial->port.count <= 0) {
43945- serial->port.count = 0;
43946+ if (atomic_read(&serial->port.count) <= 0) {
43947+ atomic_set(&serial->port.count, 0);
43948 tty_port_tty_set(&serial->port, NULL);
43949 if (!usb_gone)
43950 hso_stop_serial_device(serial->parent);
43951@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
43952
43953 /* the actual setup */
43954 spin_lock_irqsave(&serial->serial_lock, flags);
43955- if (serial->port.count)
43956+ if (atomic_read(&serial->port.count))
43957 _hso_serial_set_termios(tty, old);
43958 else
43959 tty->termios = *old;
43960@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
43961 D1("Pending read interrupt on port %d\n", i);
43962 spin_lock(&serial->serial_lock);
43963 if (serial->rx_state == RX_IDLE &&
43964- serial->port.count > 0) {
43965+ atomic_read(&serial->port.count) > 0) {
43966 /* Setup and send a ctrl req read on
43967 * port i */
43968 if (!serial->rx_urb_filled[0]) {
43969@@ -3057,7 +3056,7 @@ static int hso_resume(struct usb_interface *iface)
43970 /* Start all serial ports */
43971 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
43972 if (serial_table[i] && (serial_table[i]->interface == iface)) {
43973- if (dev2ser(serial_table[i])->port.count) {
43974+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
43975 result =
43976 hso_start_serial_device(serial_table[i], GFP_NOIO);
43977 hso_kick_transmit(dev2ser(serial_table[i]));
43978diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
43979index 57325f3..36b181f 100644
43980--- a/drivers/net/vxlan.c
43981+++ b/drivers/net/vxlan.c
43982@@ -1579,7 +1579,7 @@ nla_put_failure:
43983 return -EMSGSIZE;
43984 }
43985
43986-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
43987+static struct rtnl_link_ops vxlan_link_ops = {
43988 .kind = "vxlan",
43989 .maxtype = IFLA_VXLAN_MAX,
43990 .policy = vxlan_policy,
43991diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
43992index 34c8a33..3261fdc 100644
43993--- a/drivers/net/wireless/at76c50x-usb.c
43994+++ b/drivers/net/wireless/at76c50x-usb.c
43995@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
43996 }
43997
43998 /* Convert timeout from the DFU status to jiffies */
43999-static inline unsigned long at76_get_timeout(struct dfu_status *s)
44000+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
44001 {
44002 return msecs_to_jiffies((s->poll_timeout[2] << 16)
44003 | (s->poll_timeout[1] << 8)
44004diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44005index 8d78253..bebbb68 100644
44006--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44007+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44008@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44009 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
44010 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
44011
44012- ACCESS_ONCE(ads->ds_link) = i->link;
44013- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
44014+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
44015+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
44016
44017 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
44018 ctl6 = SM(i->keytype, AR_EncrType);
44019@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44020
44021 if ((i->is_first || i->is_last) &&
44022 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
44023- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
44024+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
44025 | set11nTries(i->rates, 1)
44026 | set11nTries(i->rates, 2)
44027 | set11nTries(i->rates, 3)
44028 | (i->dur_update ? AR_DurUpdateEna : 0)
44029 | SM(0, AR_BurstDur);
44030
44031- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
44032+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
44033 | set11nRate(i->rates, 1)
44034 | set11nRate(i->rates, 2)
44035 | set11nRate(i->rates, 3);
44036 } else {
44037- ACCESS_ONCE(ads->ds_ctl2) = 0;
44038- ACCESS_ONCE(ads->ds_ctl3) = 0;
44039+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
44040+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
44041 }
44042
44043 if (!i->is_first) {
44044- ACCESS_ONCE(ads->ds_ctl0) = 0;
44045- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44046- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44047+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
44048+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44049+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44050 return;
44051 }
44052
44053@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44054 break;
44055 }
44056
44057- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44058+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44059 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44060 | SM(i->txpower, AR_XmitPower)
44061 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44062@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44063 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
44064 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
44065
44066- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44067- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44068+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44069+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44070
44071 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
44072 return;
44073
44074- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44075+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44076 | set11nPktDurRTSCTS(i->rates, 1);
44077
44078- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44079+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44080 | set11nPktDurRTSCTS(i->rates, 3);
44081
44082- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44083+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44084 | set11nRateFlags(i->rates, 1)
44085 | set11nRateFlags(i->rates, 2)
44086 | set11nRateFlags(i->rates, 3)
44087diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44088index 301bf72..3f5654f 100644
44089--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44090+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44091@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44092 (i->qcu << AR_TxQcuNum_S) | desc_len;
44093
44094 checksum += val;
44095- ACCESS_ONCE(ads->info) = val;
44096+ ACCESS_ONCE_RW(ads->info) = val;
44097
44098 checksum += i->link;
44099- ACCESS_ONCE(ads->link) = i->link;
44100+ ACCESS_ONCE_RW(ads->link) = i->link;
44101
44102 checksum += i->buf_addr[0];
44103- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
44104+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
44105 checksum += i->buf_addr[1];
44106- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
44107+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
44108 checksum += i->buf_addr[2];
44109- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
44110+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
44111 checksum += i->buf_addr[3];
44112- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
44113+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
44114
44115 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
44116- ACCESS_ONCE(ads->ctl3) = val;
44117+ ACCESS_ONCE_RW(ads->ctl3) = val;
44118 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
44119- ACCESS_ONCE(ads->ctl5) = val;
44120+ ACCESS_ONCE_RW(ads->ctl5) = val;
44121 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
44122- ACCESS_ONCE(ads->ctl7) = val;
44123+ ACCESS_ONCE_RW(ads->ctl7) = val;
44124 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
44125- ACCESS_ONCE(ads->ctl9) = val;
44126+ ACCESS_ONCE_RW(ads->ctl9) = val;
44127
44128 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
44129- ACCESS_ONCE(ads->ctl10) = checksum;
44130+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
44131
44132 if (i->is_first || i->is_last) {
44133- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
44134+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
44135 | set11nTries(i->rates, 1)
44136 | set11nTries(i->rates, 2)
44137 | set11nTries(i->rates, 3)
44138 | (i->dur_update ? AR_DurUpdateEna : 0)
44139 | SM(0, AR_BurstDur);
44140
44141- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
44142+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
44143 | set11nRate(i->rates, 1)
44144 | set11nRate(i->rates, 2)
44145 | set11nRate(i->rates, 3);
44146 } else {
44147- ACCESS_ONCE(ads->ctl13) = 0;
44148- ACCESS_ONCE(ads->ctl14) = 0;
44149+ ACCESS_ONCE_RW(ads->ctl13) = 0;
44150+ ACCESS_ONCE_RW(ads->ctl14) = 0;
44151 }
44152
44153 ads->ctl20 = 0;
44154@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44155
44156 ctl17 = SM(i->keytype, AR_EncrType);
44157 if (!i->is_first) {
44158- ACCESS_ONCE(ads->ctl11) = 0;
44159- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44160- ACCESS_ONCE(ads->ctl15) = 0;
44161- ACCESS_ONCE(ads->ctl16) = 0;
44162- ACCESS_ONCE(ads->ctl17) = ctl17;
44163- ACCESS_ONCE(ads->ctl18) = 0;
44164- ACCESS_ONCE(ads->ctl19) = 0;
44165+ ACCESS_ONCE_RW(ads->ctl11) = 0;
44166+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44167+ ACCESS_ONCE_RW(ads->ctl15) = 0;
44168+ ACCESS_ONCE_RW(ads->ctl16) = 0;
44169+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44170+ ACCESS_ONCE_RW(ads->ctl18) = 0;
44171+ ACCESS_ONCE_RW(ads->ctl19) = 0;
44172 return;
44173 }
44174
44175- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44176+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44177 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44178 | SM(i->txpower, AR_XmitPower)
44179 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44180@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44181 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
44182 ctl12 |= SM(val, AR_PAPRDChainMask);
44183
44184- ACCESS_ONCE(ads->ctl12) = ctl12;
44185- ACCESS_ONCE(ads->ctl17) = ctl17;
44186+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
44187+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44188
44189- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44190+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44191 | set11nPktDurRTSCTS(i->rates, 1);
44192
44193- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44194+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44195 | set11nPktDurRTSCTS(i->rates, 3);
44196
44197- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
44198+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
44199 | set11nRateFlags(i->rates, 1)
44200 | set11nRateFlags(i->rates, 2)
44201 | set11nRateFlags(i->rates, 3)
44202 | SM(i->rtscts_rate, AR_RTSCTSRate);
44203
44204- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
44205+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
44206 }
44207
44208 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
44209diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
44210index ae30343..a117806 100644
44211--- a/drivers/net/wireless/ath/ath9k/hw.h
44212+++ b/drivers/net/wireless/ath/ath9k/hw.h
44213@@ -652,7 +652,7 @@ struct ath_hw_private_ops {
44214
44215 /* ANI */
44216 void (*ani_cache_ini_regs)(struct ath_hw *ah);
44217-};
44218+} __no_const;
44219
44220 /**
44221 * struct ath_spec_scan - parameters for Atheros spectral scan
44222@@ -721,7 +721,7 @@ struct ath_hw_ops {
44223 struct ath_spec_scan *param);
44224 void (*spectral_scan_trigger)(struct ath_hw *ah);
44225 void (*spectral_scan_wait)(struct ath_hw *ah);
44226-};
44227+} __no_const;
44228
44229 struct ath_nf_limits {
44230 s16 max;
44231diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
44232index ac07473..e509030 100644
44233--- a/drivers/net/wireless/hostap/hostap_ioctl.c
44234+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
44235@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
44236
44237 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
44238
44239- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
44240+ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
44241 data->flags = 1; /* has quality information */
44242- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
44243+ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
44244 sizeof(struct iw_quality) * data->length);
44245
44246 kfree(addr);
44247diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
44248index b37a582..680835d 100644
44249--- a/drivers/net/wireless/iwlegacy/3945-mac.c
44250+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
44251@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44252 */
44253 if (il3945_mod_params.disable_hw_scan) {
44254 D_INFO("Disabling hw_scan\n");
44255- il3945_mac_ops.hw_scan = NULL;
44256+ pax_open_kernel();
44257+ *(void **)&il3945_mac_ops.hw_scan = NULL;
44258+ pax_close_kernel();
44259 }
44260
44261 D_INFO("*** LOAD DRIVER ***\n");
44262diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44263index d532948..e0d8bb1 100644
44264--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44265+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
44266@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
44267 {
44268 struct iwl_priv *priv = file->private_data;
44269 char buf[64];
44270- int buf_size;
44271+ size_t buf_size;
44272 u32 offset, len;
44273
44274 memset(buf, 0, sizeof(buf));
44275@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
44276 struct iwl_priv *priv = file->private_data;
44277
44278 char buf[8];
44279- int buf_size;
44280+ size_t buf_size;
44281 u32 reset_flag;
44282
44283 memset(buf, 0, sizeof(buf));
44284@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
44285 {
44286 struct iwl_priv *priv = file->private_data;
44287 char buf[8];
44288- int buf_size;
44289+ size_t buf_size;
44290 int ht40;
44291
44292 memset(buf, 0, sizeof(buf));
44293@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
44294 {
44295 struct iwl_priv *priv = file->private_data;
44296 char buf[8];
44297- int buf_size;
44298+ size_t buf_size;
44299 int value;
44300
44301 memset(buf, 0, sizeof(buf));
44302@@ -698,10 +698,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
44303 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
44304 DEBUGFS_READ_FILE_OPS(current_sleep_command);
44305
44306-static const char *fmt_value = " %-30s %10u\n";
44307-static const char *fmt_hex = " %-30s 0x%02X\n";
44308-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
44309-static const char *fmt_header =
44310+static const char fmt_value[] = " %-30s %10u\n";
44311+static const char fmt_hex[] = " %-30s 0x%02X\n";
44312+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
44313+static const char fmt_header[] =
44314 "%-32s current cumulative delta max\n";
44315
44316 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
44317@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
44318 {
44319 struct iwl_priv *priv = file->private_data;
44320 char buf[8];
44321- int buf_size;
44322+ size_t buf_size;
44323 int clear;
44324
44325 memset(buf, 0, sizeof(buf));
44326@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
44327 {
44328 struct iwl_priv *priv = file->private_data;
44329 char buf[8];
44330- int buf_size;
44331+ size_t buf_size;
44332 int trace;
44333
44334 memset(buf, 0, sizeof(buf));
44335@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
44336 {
44337 struct iwl_priv *priv = file->private_data;
44338 char buf[8];
44339- int buf_size;
44340+ size_t buf_size;
44341 int missed;
44342
44343 memset(buf, 0, sizeof(buf));
44344@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
44345
44346 struct iwl_priv *priv = file->private_data;
44347 char buf[8];
44348- int buf_size;
44349+ size_t buf_size;
44350 int plcp;
44351
44352 memset(buf, 0, sizeof(buf));
44353@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
44354
44355 struct iwl_priv *priv = file->private_data;
44356 char buf[8];
44357- int buf_size;
44358+ size_t buf_size;
44359 int flush;
44360
44361 memset(buf, 0, sizeof(buf));
44362@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
44363
44364 struct iwl_priv *priv = file->private_data;
44365 char buf[8];
44366- int buf_size;
44367+ size_t buf_size;
44368 int rts;
44369
44370 if (!priv->cfg->ht_params)
44371@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
44372 {
44373 struct iwl_priv *priv = file->private_data;
44374 char buf[8];
44375- int buf_size;
44376+ size_t buf_size;
44377
44378 memset(buf, 0, sizeof(buf));
44379 buf_size = min(count, sizeof(buf) - 1);
44380@@ -2254,7 +2254,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
44381 struct iwl_priv *priv = file->private_data;
44382 u32 event_log_flag;
44383 char buf[8];
44384- int buf_size;
44385+ size_t buf_size;
44386
44387 /* check that the interface is up */
44388 if (!iwl_is_ready(priv))
44389@@ -2308,7 +2308,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
44390 struct iwl_priv *priv = file->private_data;
44391 char buf[8];
44392 u32 calib_disabled;
44393- int buf_size;
44394+ size_t buf_size;
44395
44396 memset(buf, 0, sizeof(buf));
44397 buf_size = min(count, sizeof(buf) - 1);
44398diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
44399index 50ba0a4..29424e7 100644
44400--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
44401+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
44402@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
44403 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
44404
44405 char buf[8];
44406- int buf_size;
44407+ size_t buf_size;
44408 u32 reset_flag;
44409
44410 memset(buf, 0, sizeof(buf));
44411@@ -1350,7 +1350,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
44412 {
44413 struct iwl_trans *trans = file->private_data;
44414 char buf[8];
44415- int buf_size;
44416+ size_t buf_size;
44417 int csr;
44418
44419 memset(buf, 0, sizeof(buf));
44420diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
44421index cb34c78..9fec0dc 100644
44422--- a/drivers/net/wireless/mac80211_hwsim.c
44423+++ b/drivers/net/wireless/mac80211_hwsim.c
44424@@ -2195,25 +2195,19 @@ static int __init init_mac80211_hwsim(void)
44425
44426 if (channels > 1) {
44427 hwsim_if_comb.num_different_channels = channels;
44428- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
44429- mac80211_hwsim_ops.cancel_hw_scan =
44430- mac80211_hwsim_cancel_hw_scan;
44431- mac80211_hwsim_ops.sw_scan_start = NULL;
44432- mac80211_hwsim_ops.sw_scan_complete = NULL;
44433- mac80211_hwsim_ops.remain_on_channel =
44434- mac80211_hwsim_roc;
44435- mac80211_hwsim_ops.cancel_remain_on_channel =
44436- mac80211_hwsim_croc;
44437- mac80211_hwsim_ops.add_chanctx =
44438- mac80211_hwsim_add_chanctx;
44439- mac80211_hwsim_ops.remove_chanctx =
44440- mac80211_hwsim_remove_chanctx;
44441- mac80211_hwsim_ops.change_chanctx =
44442- mac80211_hwsim_change_chanctx;
44443- mac80211_hwsim_ops.assign_vif_chanctx =
44444- mac80211_hwsim_assign_vif_chanctx;
44445- mac80211_hwsim_ops.unassign_vif_chanctx =
44446- mac80211_hwsim_unassign_vif_chanctx;
44447+ pax_open_kernel();
44448+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
44449+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
44450+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
44451+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
44452+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
44453+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
44454+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
44455+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
44456+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
44457+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
44458+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
44459+ pax_close_kernel();
44460 }
44461
44462 spin_lock_init(&hwsim_radio_lock);
44463diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
44464index 8169a85..7fa3b47 100644
44465--- a/drivers/net/wireless/rndis_wlan.c
44466+++ b/drivers/net/wireless/rndis_wlan.c
44467@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
44468
44469 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
44470
44471- if (rts_threshold < 0 || rts_threshold > 2347)
44472+ if (rts_threshold > 2347)
44473 rts_threshold = 2347;
44474
44475 tmp = cpu_to_le32(rts_threshold);
44476diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
44477index 7510723..5ba37f5 100644
44478--- a/drivers/net/wireless/rt2x00/rt2x00.h
44479+++ b/drivers/net/wireless/rt2x00/rt2x00.h
44480@@ -386,7 +386,7 @@ struct rt2x00_intf {
44481 * for hardware which doesn't support hardware
44482 * sequence counting.
44483 */
44484- atomic_t seqno;
44485+ atomic_unchecked_t seqno;
44486 };
44487
44488 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
44489diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
44490index d955741..8730748 100644
44491--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
44492+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
44493@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
44494 * sequence counter given by mac80211.
44495 */
44496 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
44497- seqno = atomic_add_return(0x10, &intf->seqno);
44498+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
44499 else
44500- seqno = atomic_read(&intf->seqno);
44501+ seqno = atomic_read_unchecked(&intf->seqno);
44502
44503 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
44504 hdr->seq_ctrl |= cpu_to_le16(seqno);
44505diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
44506index e2b3d9c..67a5184 100644
44507--- a/drivers/net/wireless/ti/wl1251/sdio.c
44508+++ b/drivers/net/wireless/ti/wl1251/sdio.c
44509@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
44510
44511 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
44512
44513- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
44514- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
44515+ pax_open_kernel();
44516+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
44517+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
44518+ pax_close_kernel();
44519
44520 wl1251_info("using dedicated interrupt line");
44521 } else {
44522- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
44523- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
44524+ pax_open_kernel();
44525+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
44526+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
44527+ pax_close_kernel();
44528
44529 wl1251_info("using SDIO interrupt");
44530 }
44531diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
44532index 1c627da..69f7d17 100644
44533--- a/drivers/net/wireless/ti/wl12xx/main.c
44534+++ b/drivers/net/wireless/ti/wl12xx/main.c
44535@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
44536 sizeof(wl->conf.mem));
44537
44538 /* read data preparation is only needed by wl127x */
44539- wl->ops->prepare_read = wl127x_prepare_read;
44540+ pax_open_kernel();
44541+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
44542+ pax_close_kernel();
44543
44544 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
44545 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
44546@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
44547 sizeof(wl->conf.mem));
44548
44549 /* read data preparation is only needed by wl127x */
44550- wl->ops->prepare_read = wl127x_prepare_read;
44551+ pax_open_kernel();
44552+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
44553+ pax_close_kernel();
44554
44555 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
44556 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
44557diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
44558index 9fa692d..b31fee0 100644
44559--- a/drivers/net/wireless/ti/wl18xx/main.c
44560+++ b/drivers/net/wireless/ti/wl18xx/main.c
44561@@ -1687,8 +1687,10 @@ static int wl18xx_setup(struct wl1271 *wl)
44562 }
44563
44564 if (!checksum_param) {
44565- wl18xx_ops.set_rx_csum = NULL;
44566- wl18xx_ops.init_vif = NULL;
44567+ pax_open_kernel();
44568+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
44569+ *(void **)&wl18xx_ops.init_vif = NULL;
44570+ pax_close_kernel();
44571 }
44572
44573 /* Enable 11a Band only if we have 5G antennas */
44574diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
44575index 7ef0b4a..ff65c28 100644
44576--- a/drivers/net/wireless/zd1211rw/zd_usb.c
44577+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
44578@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
44579 {
44580 struct zd_usb *usb = urb->context;
44581 struct zd_usb_interrupt *intr = &usb->intr;
44582- int len;
44583+ unsigned int len;
44584 u16 int_num;
44585
44586 ZD_ASSERT(in_interrupt());
44587diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
44588index d93b2b6..ae50401 100644
44589--- a/drivers/oprofile/buffer_sync.c
44590+++ b/drivers/oprofile/buffer_sync.c
44591@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
44592 if (cookie == NO_COOKIE)
44593 offset = pc;
44594 if (cookie == INVALID_COOKIE) {
44595- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
44596+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
44597 offset = pc;
44598 }
44599 if (cookie != last_cookie) {
44600@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
44601 /* add userspace sample */
44602
44603 if (!mm) {
44604- atomic_inc(&oprofile_stats.sample_lost_no_mm);
44605+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
44606 return 0;
44607 }
44608
44609 cookie = lookup_dcookie(mm, s->eip, &offset);
44610
44611 if (cookie == INVALID_COOKIE) {
44612- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
44613+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
44614 return 0;
44615 }
44616
44617@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
44618 /* ignore backtraces if failed to add a sample */
44619 if (state == sb_bt_start) {
44620 state = sb_bt_ignore;
44621- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
44622+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
44623 }
44624 }
44625 release_mm(mm);
44626diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
44627index c0cc4e7..44d4e54 100644
44628--- a/drivers/oprofile/event_buffer.c
44629+++ b/drivers/oprofile/event_buffer.c
44630@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
44631 }
44632
44633 if (buffer_pos == buffer_size) {
44634- atomic_inc(&oprofile_stats.event_lost_overflow);
44635+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
44636 return;
44637 }
44638
44639diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
44640index ed2c3ec..deda85a 100644
44641--- a/drivers/oprofile/oprof.c
44642+++ b/drivers/oprofile/oprof.c
44643@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
44644 if (oprofile_ops.switch_events())
44645 return;
44646
44647- atomic_inc(&oprofile_stats.multiplex_counter);
44648+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
44649 start_switch_worker();
44650 }
44651
44652diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
44653index 84a208d..d61b0a1 100644
44654--- a/drivers/oprofile/oprofile_files.c
44655+++ b/drivers/oprofile/oprofile_files.c
44656@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
44657
44658 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
44659
44660-static ssize_t timeout_read(struct file *file, char __user *buf,
44661+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
44662 size_t count, loff_t *offset)
44663 {
44664 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
44665diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
44666index 917d28e..d62d981 100644
44667--- a/drivers/oprofile/oprofile_stats.c
44668+++ b/drivers/oprofile/oprofile_stats.c
44669@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
44670 cpu_buf->sample_invalid_eip = 0;
44671 }
44672
44673- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
44674- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
44675- atomic_set(&oprofile_stats.event_lost_overflow, 0);
44676- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
44677- atomic_set(&oprofile_stats.multiplex_counter, 0);
44678+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
44679+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
44680+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
44681+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
44682+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
44683 }
44684
44685
44686diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
44687index 38b6fc0..b5cbfce 100644
44688--- a/drivers/oprofile/oprofile_stats.h
44689+++ b/drivers/oprofile/oprofile_stats.h
44690@@ -13,11 +13,11 @@
44691 #include <linux/atomic.h>
44692
44693 struct oprofile_stat_struct {
44694- atomic_t sample_lost_no_mm;
44695- atomic_t sample_lost_no_mapping;
44696- atomic_t bt_lost_no_mapping;
44697- atomic_t event_lost_overflow;
44698- atomic_t multiplex_counter;
44699+ atomic_unchecked_t sample_lost_no_mm;
44700+ atomic_unchecked_t sample_lost_no_mapping;
44701+ atomic_unchecked_t bt_lost_no_mapping;
44702+ atomic_unchecked_t event_lost_overflow;
44703+ atomic_unchecked_t multiplex_counter;
44704 };
44705
44706 extern struct oprofile_stat_struct oprofile_stats;
44707diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
44708index 7c12d9c..558bf3bb 100644
44709--- a/drivers/oprofile/oprofilefs.c
44710+++ b/drivers/oprofile/oprofilefs.c
44711@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
44712
44713
44714 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
44715- char const *name, atomic_t *val)
44716+ char const *name, atomic_unchecked_t *val)
44717 {
44718 return __oprofilefs_create_file(sb, root, name,
44719 &atomic_ro_fops, 0444, val);
44720diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
44721index 93404f7..4a313d8 100644
44722--- a/drivers/oprofile/timer_int.c
44723+++ b/drivers/oprofile/timer_int.c
44724@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
44725 return NOTIFY_OK;
44726 }
44727
44728-static struct notifier_block __refdata oprofile_cpu_notifier = {
44729+static struct notifier_block oprofile_cpu_notifier = {
44730 .notifier_call = oprofile_cpu_notify,
44731 };
44732
44733diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
44734index 92ed045..62d39bd7 100644
44735--- a/drivers/parport/procfs.c
44736+++ b/drivers/parport/procfs.c
44737@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
44738
44739 *ppos += len;
44740
44741- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
44742+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
44743 }
44744
44745 #ifdef CONFIG_PARPORT_1284
44746@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
44747
44748 *ppos += len;
44749
44750- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
44751+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
44752 }
44753 #endif /* IEEE1284.3 support. */
44754
44755diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
44756index c35e8ad..fc33beb 100644
44757--- a/drivers/pci/hotplug/acpiphp_ibm.c
44758+++ b/drivers/pci/hotplug/acpiphp_ibm.c
44759@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
44760 goto init_cleanup;
44761 }
44762
44763- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
44764+ pax_open_kernel();
44765+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
44766+ pax_close_kernel();
44767 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
44768
44769 return retval;
44770diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
44771index a6a71c4..c91097b 100644
44772--- a/drivers/pci/hotplug/cpcihp_generic.c
44773+++ b/drivers/pci/hotplug/cpcihp_generic.c
44774@@ -73,7 +73,6 @@ static u16 port;
44775 static unsigned int enum_bit;
44776 static u8 enum_mask;
44777
44778-static struct cpci_hp_controller_ops generic_hpc_ops;
44779 static struct cpci_hp_controller generic_hpc;
44780
44781 static int __init validate_parameters(void)
44782@@ -139,6 +138,10 @@ static int query_enum(void)
44783 return ((value & enum_mask) == enum_mask);
44784 }
44785
44786+static struct cpci_hp_controller_ops generic_hpc_ops = {
44787+ .query_enum = query_enum,
44788+};
44789+
44790 static int __init cpcihp_generic_init(void)
44791 {
44792 int status;
44793@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
44794 pci_dev_put(dev);
44795
44796 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
44797- generic_hpc_ops.query_enum = query_enum;
44798 generic_hpc.ops = &generic_hpc_ops;
44799
44800 status = cpci_hp_register_controller(&generic_hpc);
44801diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
44802index 449b4bb..257e2e8 100644
44803--- a/drivers/pci/hotplug/cpcihp_zt5550.c
44804+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
44805@@ -59,7 +59,6 @@
44806 /* local variables */
44807 static bool debug;
44808 static bool poll;
44809-static struct cpci_hp_controller_ops zt5550_hpc_ops;
44810 static struct cpci_hp_controller zt5550_hpc;
44811
44812 /* Primary cPCI bus bridge device */
44813@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
44814 return 0;
44815 }
44816
44817+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
44818+ .query_enum = zt5550_hc_query_enum,
44819+};
44820+
44821 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
44822 {
44823 int status;
44824@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
44825 dbg("returned from zt5550_hc_config");
44826
44827 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
44828- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
44829 zt5550_hpc.ops = &zt5550_hpc_ops;
44830 if(!poll) {
44831 zt5550_hpc.irq = hc_dev->irq;
44832 zt5550_hpc.irq_flags = IRQF_SHARED;
44833 zt5550_hpc.dev_id = hc_dev;
44834
44835- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
44836- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
44837- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
44838+ pax_open_kernel();
44839+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
44840+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
44841+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
44842+ pax_open_kernel();
44843 } else {
44844 info("using ENUM# polling mode");
44845 }
44846diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
44847index 76ba8a1..20ca857 100644
44848--- a/drivers/pci/hotplug/cpqphp_nvram.c
44849+++ b/drivers/pci/hotplug/cpqphp_nvram.c
44850@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
44851
44852 void compaq_nvram_init (void __iomem *rom_start)
44853 {
44854+
44855+#ifndef CONFIG_PAX_KERNEXEC
44856 if (rom_start) {
44857 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
44858 }
44859+#endif
44860+
44861 dbg("int15 entry = %p\n", compaq_int15_entry_point);
44862
44863 /* initialize our int15 lock */
44864diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
44865index ec20f74..c1d961e 100644
44866--- a/drivers/pci/hotplug/pci_hotplug_core.c
44867+++ b/drivers/pci/hotplug/pci_hotplug_core.c
44868@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
44869 return -EINVAL;
44870 }
44871
44872- slot->ops->owner = owner;
44873- slot->ops->mod_name = mod_name;
44874+ pax_open_kernel();
44875+ *(struct module **)&slot->ops->owner = owner;
44876+ *(const char **)&slot->ops->mod_name = mod_name;
44877+ pax_close_kernel();
44878
44879 mutex_lock(&pci_hp_mutex);
44880 /*
44881diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
44882index 7d72c5e..edce02c 100644
44883--- a/drivers/pci/hotplug/pciehp_core.c
44884+++ b/drivers/pci/hotplug/pciehp_core.c
44885@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
44886 struct slot *slot = ctrl->slot;
44887 struct hotplug_slot *hotplug = NULL;
44888 struct hotplug_slot_info *info = NULL;
44889- struct hotplug_slot_ops *ops = NULL;
44890+ hotplug_slot_ops_no_const *ops = NULL;
44891 char name[SLOT_NAME_SIZE];
44892 int retval = -ENOMEM;
44893
44894diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
44895index 5b4a9d9..cd5ac1f 100644
44896--- a/drivers/pci/pci-sysfs.c
44897+++ b/drivers/pci/pci-sysfs.c
44898@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
44899 {
44900 /* allocate attribute structure, piggyback attribute name */
44901 int name_len = write_combine ? 13 : 10;
44902- struct bin_attribute *res_attr;
44903+ bin_attribute_no_const *res_attr;
44904 int retval;
44905
44906 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
44907@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
44908 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
44909 {
44910 int retval;
44911- struct bin_attribute *attr;
44912+ bin_attribute_no_const *attr;
44913
44914 /* If the device has VPD, try to expose it in sysfs. */
44915 if (dev->vpd) {
44916@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
44917 {
44918 int retval;
44919 int rom_size = 0;
44920- struct bin_attribute *attr;
44921+ bin_attribute_no_const *attr;
44922
44923 if (!sysfs_initialized)
44924 return -EACCES;
44925diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
44926index d1182c4..2a138ec 100644
44927--- a/drivers/pci/pci.h
44928+++ b/drivers/pci/pci.h
44929@@ -92,7 +92,7 @@ struct pci_vpd_ops {
44930 struct pci_vpd {
44931 unsigned int len;
44932 const struct pci_vpd_ops *ops;
44933- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
44934+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
44935 };
44936
44937 int pci_vpd_pci22_init(struct pci_dev *dev);
44938diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
44939index d320df6..ca9a8f6 100644
44940--- a/drivers/pci/pcie/aspm.c
44941+++ b/drivers/pci/pcie/aspm.c
44942@@ -27,9 +27,9 @@
44943 #define MODULE_PARAM_PREFIX "pcie_aspm."
44944
44945 /* Note: those are not register definitions */
44946-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
44947-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
44948-#define ASPM_STATE_L1 (4) /* L1 state */
44949+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
44950+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
44951+#define ASPM_STATE_L1 (4U) /* L1 state */
44952 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
44953 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
44954
44955diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
44956index ea37072..10e58e56 100644
44957--- a/drivers/pci/probe.c
44958+++ b/drivers/pci/probe.c
44959@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
44960 struct pci_bus_region region;
44961 bool bar_too_big = false, bar_disabled = false;
44962
44963- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
44964+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
44965
44966 /* No printks while decoding is disabled! */
44967 if (!dev->mmio_always_on) {
44968diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
44969index 0812608..b04018c4 100644
44970--- a/drivers/pci/proc.c
44971+++ b/drivers/pci/proc.c
44972@@ -453,7 +453,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
44973 static int __init pci_proc_init(void)
44974 {
44975 struct pci_dev *dev = NULL;
44976+
44977+#ifdef CONFIG_GRKERNSEC_PROC_ADD
44978+#ifdef CONFIG_GRKERNSEC_PROC_USER
44979+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
44980+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44981+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
44982+#endif
44983+#else
44984 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
44985+#endif
44986 proc_create("devices", 0, proc_bus_pci_dir,
44987 &proc_bus_pci_dev_operations);
44988 proc_initialized = 1;
44989diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
44990index 3e5b4497..dcdfb70 100644
44991--- a/drivers/platform/x86/chromeos_laptop.c
44992+++ b/drivers/platform/x86/chromeos_laptop.c
44993@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
44994 return 0;
44995 }
44996
44997-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
44998+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
44999 {
45000 .ident = "Samsung Series 5 550 - Touchpad",
45001 .matches = {
45002diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
45003index 6b22938..bc9700e 100644
45004--- a/drivers/platform/x86/msi-laptop.c
45005+++ b/drivers/platform/x86/msi-laptop.c
45006@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
45007
45008 if (!quirks->ec_read_only) {
45009 /* allow userland write sysfs file */
45010- dev_attr_bluetooth.store = store_bluetooth;
45011- dev_attr_wlan.store = store_wlan;
45012- dev_attr_threeg.store = store_threeg;
45013- dev_attr_bluetooth.attr.mode |= S_IWUSR;
45014- dev_attr_wlan.attr.mode |= S_IWUSR;
45015- dev_attr_threeg.attr.mode |= S_IWUSR;
45016+ pax_open_kernel();
45017+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
45018+ *(void **)&dev_attr_wlan.store = store_wlan;
45019+ *(void **)&dev_attr_threeg.store = store_threeg;
45020+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
45021+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
45022+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
45023+ pax_close_kernel();
45024 }
45025
45026 /* disable hardware control by fn key */
45027diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
45028index 2ac045f..39c443d 100644
45029--- a/drivers/platform/x86/sony-laptop.c
45030+++ b/drivers/platform/x86/sony-laptop.c
45031@@ -2483,7 +2483,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
45032 }
45033
45034 /* High speed charging function */
45035-static struct device_attribute *hsc_handle;
45036+static device_attribute_no_const *hsc_handle;
45037
45038 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
45039 struct device_attribute *attr,
45040diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
45041index 54d31c0..3f896d3 100644
45042--- a/drivers/platform/x86/thinkpad_acpi.c
45043+++ b/drivers/platform/x86/thinkpad_acpi.c
45044@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
45045 return 0;
45046 }
45047
45048-void static hotkey_mask_warn_incomplete_mask(void)
45049+static void hotkey_mask_warn_incomplete_mask(void)
45050 {
45051 /* log only what the user can fix... */
45052 const u32 wantedmask = hotkey_driver_mask &
45053@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
45054 }
45055 }
45056
45057-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45058- struct tp_nvram_state *newn,
45059- const u32 event_mask)
45060-{
45061-
45062 #define TPACPI_COMPARE_KEY(__scancode, __member) \
45063 do { \
45064 if ((event_mask & (1 << __scancode)) && \
45065@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45066 tpacpi_hotkey_send_key(__scancode); \
45067 } while (0)
45068
45069- void issue_volchange(const unsigned int oldvol,
45070- const unsigned int newvol)
45071- {
45072- unsigned int i = oldvol;
45073+static void issue_volchange(const unsigned int oldvol,
45074+ const unsigned int newvol,
45075+ const u32 event_mask)
45076+{
45077+ unsigned int i = oldvol;
45078
45079- while (i > newvol) {
45080- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
45081- i--;
45082- }
45083- while (i < newvol) {
45084- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45085- i++;
45086- }
45087+ while (i > newvol) {
45088+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
45089+ i--;
45090 }
45091+ while (i < newvol) {
45092+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45093+ i++;
45094+ }
45095+}
45096
45097- void issue_brightnesschange(const unsigned int oldbrt,
45098- const unsigned int newbrt)
45099- {
45100- unsigned int i = oldbrt;
45101+static void issue_brightnesschange(const unsigned int oldbrt,
45102+ const unsigned int newbrt,
45103+ const u32 event_mask)
45104+{
45105+ unsigned int i = oldbrt;
45106
45107- while (i > newbrt) {
45108- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
45109- i--;
45110- }
45111- while (i < newbrt) {
45112- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45113- i++;
45114- }
45115+ while (i > newbrt) {
45116+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
45117+ i--;
45118+ }
45119+ while (i < newbrt) {
45120+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45121+ i++;
45122 }
45123+}
45124
45125+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45126+ struct tp_nvram_state *newn,
45127+ const u32 event_mask)
45128+{
45129 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
45130 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
45131 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
45132@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45133 oldn->volume_level != newn->volume_level) {
45134 /* recently muted, or repeated mute keypress, or
45135 * multiple presses ending in mute */
45136- issue_volchange(oldn->volume_level, newn->volume_level);
45137+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
45138 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
45139 }
45140 } else {
45141@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45142 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
45143 }
45144 if (oldn->volume_level != newn->volume_level) {
45145- issue_volchange(oldn->volume_level, newn->volume_level);
45146+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
45147 } else if (oldn->volume_toggle != newn->volume_toggle) {
45148 /* repeated vol up/down keypress at end of scale ? */
45149 if (newn->volume_level == 0)
45150@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45151 /* handle brightness */
45152 if (oldn->brightness_level != newn->brightness_level) {
45153 issue_brightnesschange(oldn->brightness_level,
45154- newn->brightness_level);
45155+ newn->brightness_level,
45156+ event_mask);
45157 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
45158 /* repeated key presses that didn't change state */
45159 if (newn->brightness_level == 0)
45160@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45161 && !tp_features.bright_unkfw)
45162 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
45163 }
45164+}
45165
45166 #undef TPACPI_COMPARE_KEY
45167 #undef TPACPI_MAY_SEND_KEY
45168-}
45169
45170 /*
45171 * Polling driver
45172diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
45173index 769d265..a3a05ca 100644
45174--- a/drivers/pnp/pnpbios/bioscalls.c
45175+++ b/drivers/pnp/pnpbios/bioscalls.c
45176@@ -58,7 +58,7 @@ do { \
45177 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
45178 } while(0)
45179
45180-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
45181+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
45182 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
45183
45184 /*
45185@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
45186
45187 cpu = get_cpu();
45188 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
45189+
45190+ pax_open_kernel();
45191 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
45192+ pax_close_kernel();
45193
45194 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
45195 spin_lock_irqsave(&pnp_bios_lock, flags);
45196@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
45197 :"memory");
45198 spin_unlock_irqrestore(&pnp_bios_lock, flags);
45199
45200+ pax_open_kernel();
45201 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
45202+ pax_close_kernel();
45203+
45204 put_cpu();
45205
45206 /* If we get here and this is set then the PnP BIOS faulted on us. */
45207@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
45208 return status;
45209 }
45210
45211-void pnpbios_calls_init(union pnp_bios_install_struct *header)
45212+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
45213 {
45214 int i;
45215
45216@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
45217 pnp_bios_callpoint.offset = header->fields.pm16offset;
45218 pnp_bios_callpoint.segment = PNP_CS16;
45219
45220+ pax_open_kernel();
45221+
45222 for_each_possible_cpu(i) {
45223 struct desc_struct *gdt = get_cpu_gdt_table(i);
45224 if (!gdt)
45225@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
45226 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
45227 (unsigned long)__va(header->fields.pm16dseg));
45228 }
45229+
45230+ pax_close_kernel();
45231 }
45232diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
45233index 3e6db1c..1fbbdae 100644
45234--- a/drivers/pnp/resource.c
45235+++ b/drivers/pnp/resource.c
45236@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
45237 return 1;
45238
45239 /* check if the resource is valid */
45240- if (*irq < 0 || *irq > 15)
45241+ if (*irq > 15)
45242 return 0;
45243
45244 /* check if the resource is reserved */
45245@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
45246 return 1;
45247
45248 /* check if the resource is valid */
45249- if (*dma < 0 || *dma == 4 || *dma > 7)
45250+ if (*dma == 4 || *dma > 7)
45251 return 0;
45252
45253 /* check if the resource is reserved */
45254diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
45255index 0c52e2a..3421ab7 100644
45256--- a/drivers/power/pda_power.c
45257+++ b/drivers/power/pda_power.c
45258@@ -37,7 +37,11 @@ static int polling;
45259
45260 #if IS_ENABLED(CONFIG_USB_PHY)
45261 static struct usb_phy *transceiver;
45262-static struct notifier_block otg_nb;
45263+static int otg_handle_notification(struct notifier_block *nb,
45264+ unsigned long event, void *unused);
45265+static struct notifier_block otg_nb = {
45266+ .notifier_call = otg_handle_notification
45267+};
45268 #endif
45269
45270 static struct regulator *ac_draw;
45271@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
45272
45273 #if IS_ENABLED(CONFIG_USB_PHY)
45274 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
45275- otg_nb.notifier_call = otg_handle_notification;
45276 ret = usb_register_notifier(transceiver, &otg_nb);
45277 if (ret) {
45278 dev_err(dev, "failure to register otg notifier\n");
45279diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
45280index cc439fd..8fa30df 100644
45281--- a/drivers/power/power_supply.h
45282+++ b/drivers/power/power_supply.h
45283@@ -16,12 +16,12 @@ struct power_supply;
45284
45285 #ifdef CONFIG_SYSFS
45286
45287-extern void power_supply_init_attrs(struct device_type *dev_type);
45288+extern void power_supply_init_attrs(void);
45289 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
45290
45291 #else
45292
45293-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
45294+static inline void power_supply_init_attrs(void) {}
45295 #define power_supply_uevent NULL
45296
45297 #endif /* CONFIG_SYSFS */
45298diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
45299index 1c517c3..ffa2f17 100644
45300--- a/drivers/power/power_supply_core.c
45301+++ b/drivers/power/power_supply_core.c
45302@@ -24,7 +24,10 @@
45303 struct class *power_supply_class;
45304 EXPORT_SYMBOL_GPL(power_supply_class);
45305
45306-static struct device_type power_supply_dev_type;
45307+extern const struct attribute_group *power_supply_attr_groups[];
45308+static struct device_type power_supply_dev_type = {
45309+ .groups = power_supply_attr_groups,
45310+};
45311
45312 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
45313 struct power_supply *supply)
45314@@ -554,7 +557,7 @@ static int __init power_supply_class_init(void)
45315 return PTR_ERR(power_supply_class);
45316
45317 power_supply_class->dev_uevent = power_supply_uevent;
45318- power_supply_init_attrs(&power_supply_dev_type);
45319+ power_supply_init_attrs();
45320
45321 return 0;
45322 }
45323diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
45324index 29178f7..c65f324 100644
45325--- a/drivers/power/power_supply_sysfs.c
45326+++ b/drivers/power/power_supply_sysfs.c
45327@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
45328 .is_visible = power_supply_attr_is_visible,
45329 };
45330
45331-static const struct attribute_group *power_supply_attr_groups[] = {
45332+const struct attribute_group *power_supply_attr_groups[] = {
45333 &power_supply_attr_group,
45334 NULL,
45335 };
45336
45337-void power_supply_init_attrs(struct device_type *dev_type)
45338+void power_supply_init_attrs(void)
45339 {
45340 int i;
45341
45342- dev_type->groups = power_supply_attr_groups;
45343-
45344 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
45345 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
45346 }
45347diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
45348index d428ef9..fdc0357 100644
45349--- a/drivers/regulator/max8660.c
45350+++ b/drivers/regulator/max8660.c
45351@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
45352 max8660->shadow_regs[MAX8660_OVER1] = 5;
45353 } else {
45354 /* Otherwise devices can be toggled via software */
45355- max8660_dcdc_ops.enable = max8660_dcdc_enable;
45356- max8660_dcdc_ops.disable = max8660_dcdc_disable;
45357+ pax_open_kernel();
45358+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
45359+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
45360+ pax_close_kernel();
45361 }
45362
45363 /*
45364diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
45365index adb1414..c13e0ce 100644
45366--- a/drivers/regulator/max8973-regulator.c
45367+++ b/drivers/regulator/max8973-regulator.c
45368@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
45369 if (!pdata->enable_ext_control) {
45370 max->desc.enable_reg = MAX8973_VOUT;
45371 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
45372- max8973_dcdc_ops.enable = regulator_enable_regmap;
45373- max8973_dcdc_ops.disable = regulator_disable_regmap;
45374- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
45375+ pax_open_kernel();
45376+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
45377+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
45378+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
45379+ pax_close_kernel();
45380 }
45381
45382 max->enable_external_control = pdata->enable_ext_control;
45383diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
45384index b716283..3cc4349 100644
45385--- a/drivers/regulator/mc13892-regulator.c
45386+++ b/drivers/regulator/mc13892-regulator.c
45387@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
45388 }
45389 mc13xxx_unlock(mc13892);
45390
45391- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
45392+ pax_open_kernel();
45393+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
45394 = mc13892_vcam_set_mode;
45395- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
45396+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
45397 = mc13892_vcam_get_mode;
45398+ pax_close_kernel();
45399
45400 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
45401 ARRAY_SIZE(mc13892_regulators));
45402diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
45403index f1cb706..4c7832a 100644
45404--- a/drivers/rtc/rtc-cmos.c
45405+++ b/drivers/rtc/rtc-cmos.c
45406@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
45407 hpet_rtc_timer_init();
45408
45409 /* export at least the first block of NVRAM */
45410- nvram.size = address_space - NVRAM_OFFSET;
45411+ pax_open_kernel();
45412+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
45413+ pax_close_kernel();
45414 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
45415 if (retval < 0) {
45416 dev_dbg(dev, "can't create nvram file? %d\n", retval);
45417diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
45418index d049393..bb20be0 100644
45419--- a/drivers/rtc/rtc-dev.c
45420+++ b/drivers/rtc/rtc-dev.c
45421@@ -16,6 +16,7 @@
45422 #include <linux/module.h>
45423 #include <linux/rtc.h>
45424 #include <linux/sched.h>
45425+#include <linux/grsecurity.h>
45426 #include "rtc-core.h"
45427
45428 static dev_t rtc_devt;
45429@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
45430 if (copy_from_user(&tm, uarg, sizeof(tm)))
45431 return -EFAULT;
45432
45433+ gr_log_timechange();
45434+
45435 return rtc_set_time(rtc, &tm);
45436
45437 case RTC_PIE_ON:
45438diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
45439index b53992a..776df84 100644
45440--- a/drivers/rtc/rtc-ds1307.c
45441+++ b/drivers/rtc/rtc-ds1307.c
45442@@ -107,7 +107,7 @@ struct ds1307 {
45443 u8 offset; /* register's offset */
45444 u8 regs[11];
45445 u16 nvram_offset;
45446- struct bin_attribute *nvram;
45447+ bin_attribute_no_const *nvram;
45448 enum ds_type type;
45449 unsigned long flags;
45450 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
45451diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
45452index 130f29a..6179d03 100644
45453--- a/drivers/rtc/rtc-m48t59.c
45454+++ b/drivers/rtc/rtc-m48t59.c
45455@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
45456 goto out;
45457 }
45458
45459- m48t59_nvram_attr.size = pdata->offset;
45460+ pax_open_kernel();
45461+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
45462+ pax_close_kernel();
45463
45464 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
45465 if (ret) {
45466diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
45467index e693af6..2e525b6 100644
45468--- a/drivers/scsi/bfa/bfa_fcpim.h
45469+++ b/drivers/scsi/bfa/bfa_fcpim.h
45470@@ -36,7 +36,7 @@ struct bfa_iotag_s {
45471
45472 struct bfa_itn_s {
45473 bfa_isr_func_t isr;
45474-};
45475+} __no_const;
45476
45477 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
45478 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
45479diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
45480index 23a90e7..9cf04ee 100644
45481--- a/drivers/scsi/bfa/bfa_ioc.h
45482+++ b/drivers/scsi/bfa/bfa_ioc.h
45483@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
45484 bfa_ioc_disable_cbfn_t disable_cbfn;
45485 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
45486 bfa_ioc_reset_cbfn_t reset_cbfn;
45487-};
45488+} __no_const;
45489
45490 /*
45491 * IOC event notification mechanism.
45492@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
45493 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
45494 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
45495 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
45496-};
45497+} __no_const;
45498
45499 /*
45500 * Queue element to wait for room in request queue. FIFO order is
45501diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
45502index df0c3c7..b00e1d0 100644
45503--- a/drivers/scsi/hosts.c
45504+++ b/drivers/scsi/hosts.c
45505@@ -42,7 +42,7 @@
45506 #include "scsi_logging.h"
45507
45508
45509-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
45510+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
45511
45512
45513 static void scsi_host_cls_release(struct device *dev)
45514@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
45515 * subtract one because we increment first then return, but we need to
45516 * know what the next host number was before increment
45517 */
45518- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
45519+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
45520 shost->dma_channel = 0xff;
45521
45522 /* These three are default values which can be overridden */
45523diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
45524index 7f4f790..b75b92a 100644
45525--- a/drivers/scsi/hpsa.c
45526+++ b/drivers/scsi/hpsa.c
45527@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
45528 unsigned long flags;
45529
45530 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
45531- return h->access.command_completed(h, q);
45532+ return h->access->command_completed(h, q);
45533
45534 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
45535 a = rq->head[rq->current_entry];
45536@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
45537 while (!list_empty(&h->reqQ)) {
45538 c = list_entry(h->reqQ.next, struct CommandList, list);
45539 /* can't do anything if fifo is full */
45540- if ((h->access.fifo_full(h))) {
45541+ if ((h->access->fifo_full(h))) {
45542 dev_warn(&h->pdev->dev, "fifo full\n");
45543 break;
45544 }
45545@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
45546
45547 /* Tell the controller execute command */
45548 spin_unlock_irqrestore(&h->lock, flags);
45549- h->access.submit_command(h, c);
45550+ h->access->submit_command(h, c);
45551 spin_lock_irqsave(&h->lock, flags);
45552 }
45553 spin_unlock_irqrestore(&h->lock, flags);
45554@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
45555
45556 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
45557 {
45558- return h->access.command_completed(h, q);
45559+ return h->access->command_completed(h, q);
45560 }
45561
45562 static inline bool interrupt_pending(struct ctlr_info *h)
45563 {
45564- return h->access.intr_pending(h);
45565+ return h->access->intr_pending(h);
45566 }
45567
45568 static inline long interrupt_not_for_us(struct ctlr_info *h)
45569 {
45570- return (h->access.intr_pending(h) == 0) ||
45571+ return (h->access->intr_pending(h) == 0) ||
45572 (h->interrupts_enabled == 0);
45573 }
45574
45575@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
45576 if (prod_index < 0)
45577 return -ENODEV;
45578 h->product_name = products[prod_index].product_name;
45579- h->access = *(products[prod_index].access);
45580+ h->access = products[prod_index].access;
45581
45582 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
45583 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
45584@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
45585
45586 assert_spin_locked(&lockup_detector_lock);
45587 remove_ctlr_from_lockup_detector_list(h);
45588- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45589+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45590 spin_lock_irqsave(&h->lock, flags);
45591 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
45592 spin_unlock_irqrestore(&h->lock, flags);
45593@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
45594 }
45595
45596 /* make sure the board interrupts are off */
45597- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45598+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45599
45600 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
45601 goto clean2;
45602@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
45603 * fake ones to scoop up any residual completions.
45604 */
45605 spin_lock_irqsave(&h->lock, flags);
45606- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45607+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45608 spin_unlock_irqrestore(&h->lock, flags);
45609 free_irqs(h);
45610 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
45611@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
45612 dev_info(&h->pdev->dev, "Board READY.\n");
45613 dev_info(&h->pdev->dev,
45614 "Waiting for stale completions to drain.\n");
45615- h->access.set_intr_mask(h, HPSA_INTR_ON);
45616+ h->access->set_intr_mask(h, HPSA_INTR_ON);
45617 msleep(10000);
45618- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45619+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45620
45621 rc = controller_reset_failed(h->cfgtable);
45622 if (rc)
45623@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
45624 }
45625
45626 /* Turn the interrupts on so we can service requests */
45627- h->access.set_intr_mask(h, HPSA_INTR_ON);
45628+ h->access->set_intr_mask(h, HPSA_INTR_ON);
45629
45630 hpsa_hba_inquiry(h);
45631 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
45632@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
45633 * To write all data in the battery backed cache to disks
45634 */
45635 hpsa_flush_cache(h);
45636- h->access.set_intr_mask(h, HPSA_INTR_OFF);
45637+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
45638 hpsa_free_irqs_and_disable_msix(h);
45639 }
45640
45641@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
45642 return;
45643 }
45644 /* Change the access methods to the performant access methods */
45645- h->access = SA5_performant_access;
45646+ h->access = &SA5_performant_access;
45647 h->transMethod = CFGTBL_Trans_Performant;
45648 }
45649
45650diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
45651index 9816479..c5d4e97 100644
45652--- a/drivers/scsi/hpsa.h
45653+++ b/drivers/scsi/hpsa.h
45654@@ -79,7 +79,7 @@ struct ctlr_info {
45655 unsigned int msix_vector;
45656 unsigned int msi_vector;
45657 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
45658- struct access_method access;
45659+ struct access_method *access;
45660
45661 /* queue and queue Info */
45662 struct list_head reqQ;
45663diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
45664index 8b928c6..9c76300 100644
45665--- a/drivers/scsi/libfc/fc_exch.c
45666+++ b/drivers/scsi/libfc/fc_exch.c
45667@@ -100,12 +100,12 @@ struct fc_exch_mgr {
45668 u16 pool_max_index;
45669
45670 struct {
45671- atomic_t no_free_exch;
45672- atomic_t no_free_exch_xid;
45673- atomic_t xid_not_found;
45674- atomic_t xid_busy;
45675- atomic_t seq_not_found;
45676- atomic_t non_bls_resp;
45677+ atomic_unchecked_t no_free_exch;
45678+ atomic_unchecked_t no_free_exch_xid;
45679+ atomic_unchecked_t xid_not_found;
45680+ atomic_unchecked_t xid_busy;
45681+ atomic_unchecked_t seq_not_found;
45682+ atomic_unchecked_t non_bls_resp;
45683 } stats;
45684 };
45685
45686@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
45687 /* allocate memory for exchange */
45688 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
45689 if (!ep) {
45690- atomic_inc(&mp->stats.no_free_exch);
45691+ atomic_inc_unchecked(&mp->stats.no_free_exch);
45692 goto out;
45693 }
45694 memset(ep, 0, sizeof(*ep));
45695@@ -797,7 +797,7 @@ out:
45696 return ep;
45697 err:
45698 spin_unlock_bh(&pool->lock);
45699- atomic_inc(&mp->stats.no_free_exch_xid);
45700+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
45701 mempool_free(ep, mp->ep_pool);
45702 return NULL;
45703 }
45704@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45705 xid = ntohs(fh->fh_ox_id); /* we originated exch */
45706 ep = fc_exch_find(mp, xid);
45707 if (!ep) {
45708- atomic_inc(&mp->stats.xid_not_found);
45709+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45710 reject = FC_RJT_OX_ID;
45711 goto out;
45712 }
45713@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45714 ep = fc_exch_find(mp, xid);
45715 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
45716 if (ep) {
45717- atomic_inc(&mp->stats.xid_busy);
45718+ atomic_inc_unchecked(&mp->stats.xid_busy);
45719 reject = FC_RJT_RX_ID;
45720 goto rel;
45721 }
45722@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45723 }
45724 xid = ep->xid; /* get our XID */
45725 } else if (!ep) {
45726- atomic_inc(&mp->stats.xid_not_found);
45727+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45728 reject = FC_RJT_RX_ID; /* XID not found */
45729 goto out;
45730 }
45731@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
45732 } else {
45733 sp = &ep->seq;
45734 if (sp->id != fh->fh_seq_id) {
45735- atomic_inc(&mp->stats.seq_not_found);
45736+ atomic_inc_unchecked(&mp->stats.seq_not_found);
45737 if (f_ctl & FC_FC_END_SEQ) {
45738 /*
45739 * Update sequence_id based on incoming last
45740@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45741
45742 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
45743 if (!ep) {
45744- atomic_inc(&mp->stats.xid_not_found);
45745+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45746 goto out;
45747 }
45748 if (ep->esb_stat & ESB_ST_COMPLETE) {
45749- atomic_inc(&mp->stats.xid_not_found);
45750+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45751 goto rel;
45752 }
45753 if (ep->rxid == FC_XID_UNKNOWN)
45754 ep->rxid = ntohs(fh->fh_rx_id);
45755 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
45756- atomic_inc(&mp->stats.xid_not_found);
45757+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45758 goto rel;
45759 }
45760 if (ep->did != ntoh24(fh->fh_s_id) &&
45761 ep->did != FC_FID_FLOGI) {
45762- atomic_inc(&mp->stats.xid_not_found);
45763+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45764 goto rel;
45765 }
45766 sof = fr_sof(fp);
45767@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45768 sp->ssb_stat |= SSB_ST_RESP;
45769 sp->id = fh->fh_seq_id;
45770 } else if (sp->id != fh->fh_seq_id) {
45771- atomic_inc(&mp->stats.seq_not_found);
45772+ atomic_inc_unchecked(&mp->stats.seq_not_found);
45773 goto rel;
45774 }
45775
45776@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
45777 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
45778
45779 if (!sp)
45780- atomic_inc(&mp->stats.xid_not_found);
45781+ atomic_inc_unchecked(&mp->stats.xid_not_found);
45782 else
45783- atomic_inc(&mp->stats.non_bls_resp);
45784+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
45785
45786 fc_frame_free(fp);
45787 }
45788@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
45789
45790 list_for_each_entry(ema, &lport->ema_list, ema_list) {
45791 mp = ema->mp;
45792- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
45793+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
45794 st->fc_no_free_exch_xid +=
45795- atomic_read(&mp->stats.no_free_exch_xid);
45796- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
45797- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
45798- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
45799- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
45800+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
45801+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
45802+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
45803+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
45804+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
45805 }
45806 }
45807 EXPORT_SYMBOL(fc_exch_update_stats);
45808diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
45809index 161c98e..6d563b3 100644
45810--- a/drivers/scsi/libsas/sas_ata.c
45811+++ b/drivers/scsi/libsas/sas_ata.c
45812@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
45813 .postreset = ata_std_postreset,
45814 .error_handler = ata_std_error_handler,
45815 .post_internal_cmd = sas_ata_post_internal,
45816- .qc_defer = ata_std_qc_defer,
45817+ .qc_defer = ata_std_qc_defer,
45818 .qc_prep = ata_noop_qc_prep,
45819 .qc_issue = sas_ata_qc_issue,
45820 .qc_fill_rtf = sas_ata_qc_fill_rtf,
45821diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
45822index bcc56ca..6f4174a 100644
45823--- a/drivers/scsi/lpfc/lpfc.h
45824+++ b/drivers/scsi/lpfc/lpfc.h
45825@@ -431,7 +431,7 @@ struct lpfc_vport {
45826 struct dentry *debug_nodelist;
45827 struct dentry *vport_debugfs_root;
45828 struct lpfc_debugfs_trc *disc_trc;
45829- atomic_t disc_trc_cnt;
45830+ atomic_unchecked_t disc_trc_cnt;
45831 #endif
45832 uint8_t stat_data_enabled;
45833 uint8_t stat_data_blocked;
45834@@ -865,8 +865,8 @@ struct lpfc_hba {
45835 struct timer_list fabric_block_timer;
45836 unsigned long bit_flags;
45837 #define FABRIC_COMANDS_BLOCKED 0
45838- atomic_t num_rsrc_err;
45839- atomic_t num_cmd_success;
45840+ atomic_unchecked_t num_rsrc_err;
45841+ atomic_unchecked_t num_cmd_success;
45842 unsigned long last_rsrc_error_time;
45843 unsigned long last_ramp_down_time;
45844 unsigned long last_ramp_up_time;
45845@@ -902,7 +902,7 @@ struct lpfc_hba {
45846
45847 struct dentry *debug_slow_ring_trc;
45848 struct lpfc_debugfs_trc *slow_ring_trc;
45849- atomic_t slow_ring_trc_cnt;
45850+ atomic_unchecked_t slow_ring_trc_cnt;
45851 /* iDiag debugfs sub-directory */
45852 struct dentry *idiag_root;
45853 struct dentry *idiag_pci_cfg;
45854diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
45855index f525ecb..32549a4 100644
45856--- a/drivers/scsi/lpfc/lpfc_debugfs.c
45857+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
45858@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
45859
45860 #include <linux/debugfs.h>
45861
45862-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
45863+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
45864 static unsigned long lpfc_debugfs_start_time = 0L;
45865
45866 /* iDiag */
45867@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
45868 lpfc_debugfs_enable = 0;
45869
45870 len = 0;
45871- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
45872+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
45873 (lpfc_debugfs_max_disc_trc - 1);
45874 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
45875 dtp = vport->disc_trc + i;
45876@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
45877 lpfc_debugfs_enable = 0;
45878
45879 len = 0;
45880- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
45881+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
45882 (lpfc_debugfs_max_slow_ring_trc - 1);
45883 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
45884 dtp = phba->slow_ring_trc + i;
45885@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
45886 !vport || !vport->disc_trc)
45887 return;
45888
45889- index = atomic_inc_return(&vport->disc_trc_cnt) &
45890+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
45891 (lpfc_debugfs_max_disc_trc - 1);
45892 dtp = vport->disc_trc + index;
45893 dtp->fmt = fmt;
45894 dtp->data1 = data1;
45895 dtp->data2 = data2;
45896 dtp->data3 = data3;
45897- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
45898+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
45899 dtp->jif = jiffies;
45900 #endif
45901 return;
45902@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
45903 !phba || !phba->slow_ring_trc)
45904 return;
45905
45906- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
45907+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
45908 (lpfc_debugfs_max_slow_ring_trc - 1);
45909 dtp = phba->slow_ring_trc + index;
45910 dtp->fmt = fmt;
45911 dtp->data1 = data1;
45912 dtp->data2 = data2;
45913 dtp->data3 = data3;
45914- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
45915+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
45916 dtp->jif = jiffies;
45917 #endif
45918 return;
45919@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
45920 "slow_ring buffer\n");
45921 goto debug_failed;
45922 }
45923- atomic_set(&phba->slow_ring_trc_cnt, 0);
45924+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
45925 memset(phba->slow_ring_trc, 0,
45926 (sizeof(struct lpfc_debugfs_trc) *
45927 lpfc_debugfs_max_slow_ring_trc));
45928@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
45929 "buffer\n");
45930 goto debug_failed;
45931 }
45932- atomic_set(&vport->disc_trc_cnt, 0);
45933+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
45934
45935 snprintf(name, sizeof(name), "discovery_trace");
45936 vport->debug_disc_trc =
45937diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
45938index cb465b2..2e7b25f 100644
45939--- a/drivers/scsi/lpfc/lpfc_init.c
45940+++ b/drivers/scsi/lpfc/lpfc_init.c
45941@@ -10950,8 +10950,10 @@ lpfc_init(void)
45942 "misc_register returned with status %d", error);
45943
45944 if (lpfc_enable_npiv) {
45945- lpfc_transport_functions.vport_create = lpfc_vport_create;
45946- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
45947+ pax_open_kernel();
45948+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
45949+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
45950+ pax_close_kernel();
45951 }
45952 lpfc_transport_template =
45953 fc_attach_transport(&lpfc_transport_functions);
45954diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
45955index 8523b278e..ce1d812 100644
45956--- a/drivers/scsi/lpfc/lpfc_scsi.c
45957+++ b/drivers/scsi/lpfc/lpfc_scsi.c
45958@@ -331,7 +331,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
45959 uint32_t evt_posted;
45960
45961 spin_lock_irqsave(&phba->hbalock, flags);
45962- atomic_inc(&phba->num_rsrc_err);
45963+ atomic_inc_unchecked(&phba->num_rsrc_err);
45964 phba->last_rsrc_error_time = jiffies;
45965
45966 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
45967@@ -372,7 +372,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
45968 unsigned long flags;
45969 struct lpfc_hba *phba = vport->phba;
45970 uint32_t evt_posted;
45971- atomic_inc(&phba->num_cmd_success);
45972+ atomic_inc_unchecked(&phba->num_cmd_success);
45973
45974 if (vport->cfg_lun_queue_depth <= queue_depth)
45975 return;
45976@@ -416,8 +416,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
45977 unsigned long num_rsrc_err, num_cmd_success;
45978 int i;
45979
45980- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
45981- num_cmd_success = atomic_read(&phba->num_cmd_success);
45982+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
45983+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
45984
45985 /*
45986 * The error and success command counters are global per
45987@@ -445,8 +445,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
45988 }
45989 }
45990 lpfc_destroy_vport_work_array(phba, vports);
45991- atomic_set(&phba->num_rsrc_err, 0);
45992- atomic_set(&phba->num_cmd_success, 0);
45993+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
45994+ atomic_set_unchecked(&phba->num_cmd_success, 0);
45995 }
45996
45997 /**
45998@@ -480,8 +480,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
45999 }
46000 }
46001 lpfc_destroy_vport_work_array(phba, vports);
46002- atomic_set(&phba->num_rsrc_err, 0);
46003- atomic_set(&phba->num_cmd_success, 0);
46004+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
46005+ atomic_set_unchecked(&phba->num_cmd_success, 0);
46006 }
46007
46008 /**
46009diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
46010index 8e1b737..50ff510 100644
46011--- a/drivers/scsi/pmcraid.c
46012+++ b/drivers/scsi/pmcraid.c
46013@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
46014 res->scsi_dev = scsi_dev;
46015 scsi_dev->hostdata = res;
46016 res->change_detected = 0;
46017- atomic_set(&res->read_failures, 0);
46018- atomic_set(&res->write_failures, 0);
46019+ atomic_set_unchecked(&res->read_failures, 0);
46020+ atomic_set_unchecked(&res->write_failures, 0);
46021 rc = 0;
46022 }
46023 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
46024@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
46025
46026 /* If this was a SCSI read/write command keep count of errors */
46027 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
46028- atomic_inc(&res->read_failures);
46029+ atomic_inc_unchecked(&res->read_failures);
46030 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
46031- atomic_inc(&res->write_failures);
46032+ atomic_inc_unchecked(&res->write_failures);
46033
46034 if (!RES_IS_GSCSI(res->cfg_entry) &&
46035 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
46036@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
46037 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
46038 * hrrq_id assigned here in queuecommand
46039 */
46040- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
46041+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
46042 pinstance->num_hrrq;
46043 cmd->cmd_done = pmcraid_io_done;
46044
46045@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
46046 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
46047 * hrrq_id assigned here in queuecommand
46048 */
46049- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
46050+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
46051 pinstance->num_hrrq;
46052
46053 if (request_size) {
46054@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
46055
46056 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
46057 /* add resources only after host is added into system */
46058- if (!atomic_read(&pinstance->expose_resources))
46059+ if (!atomic_read_unchecked(&pinstance->expose_resources))
46060 return;
46061
46062 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
46063@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
46064 init_waitqueue_head(&pinstance->reset_wait_q);
46065
46066 atomic_set(&pinstance->outstanding_cmds, 0);
46067- atomic_set(&pinstance->last_message_id, 0);
46068- atomic_set(&pinstance->expose_resources, 0);
46069+ atomic_set_unchecked(&pinstance->last_message_id, 0);
46070+ atomic_set_unchecked(&pinstance->expose_resources, 0);
46071
46072 INIT_LIST_HEAD(&pinstance->free_res_q);
46073 INIT_LIST_HEAD(&pinstance->used_res_q);
46074@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
46075 /* Schedule worker thread to handle CCN and take care of adding and
46076 * removing devices to OS
46077 */
46078- atomic_set(&pinstance->expose_resources, 1);
46079+ atomic_set_unchecked(&pinstance->expose_resources, 1);
46080 schedule_work(&pinstance->worker_q);
46081 return rc;
46082
46083diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
46084index e1d150f..6c6df44 100644
46085--- a/drivers/scsi/pmcraid.h
46086+++ b/drivers/scsi/pmcraid.h
46087@@ -748,7 +748,7 @@ struct pmcraid_instance {
46088 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
46089
46090 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
46091- atomic_t last_message_id;
46092+ atomic_unchecked_t last_message_id;
46093
46094 /* configuration table */
46095 struct pmcraid_config_table *cfg_table;
46096@@ -777,7 +777,7 @@ struct pmcraid_instance {
46097 atomic_t outstanding_cmds;
46098
46099 /* should add/delete resources to mid-layer now ?*/
46100- atomic_t expose_resources;
46101+ atomic_unchecked_t expose_resources;
46102
46103
46104
46105@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
46106 struct pmcraid_config_table_entry_ext cfg_entry_ext;
46107 };
46108 struct scsi_device *scsi_dev; /* Link scsi_device structure */
46109- atomic_t read_failures; /* count of failed READ commands */
46110- atomic_t write_failures; /* count of failed WRITE commands */
46111+ atomic_unchecked_t read_failures; /* count of failed READ commands */
46112+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
46113
46114 /* To indicate add/delete/modify during CCN */
46115 u8 change_detected;
46116diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
46117index bf60c63..74d4dce 100644
46118--- a/drivers/scsi/qla2xxx/qla_attr.c
46119+++ b/drivers/scsi/qla2xxx/qla_attr.c
46120@@ -2001,7 +2001,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
46121 return 0;
46122 }
46123
46124-struct fc_function_template qla2xxx_transport_functions = {
46125+fc_function_template_no_const qla2xxx_transport_functions = {
46126
46127 .show_host_node_name = 1,
46128 .show_host_port_name = 1,
46129@@ -2048,7 +2048,7 @@ struct fc_function_template qla2xxx_transport_functions = {
46130 .bsg_timeout = qla24xx_bsg_timeout,
46131 };
46132
46133-struct fc_function_template qla2xxx_transport_vport_functions = {
46134+fc_function_template_no_const qla2xxx_transport_vport_functions = {
46135
46136 .show_host_node_name = 1,
46137 .show_host_port_name = 1,
46138diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
46139index 026bfde..90c4018 100644
46140--- a/drivers/scsi/qla2xxx/qla_gbl.h
46141+++ b/drivers/scsi/qla2xxx/qla_gbl.h
46142@@ -528,8 +528,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
46143 struct device_attribute;
46144 extern struct device_attribute *qla2x00_host_attrs[];
46145 struct fc_function_template;
46146-extern struct fc_function_template qla2xxx_transport_functions;
46147-extern struct fc_function_template qla2xxx_transport_vport_functions;
46148+extern fc_function_template_no_const qla2xxx_transport_functions;
46149+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
46150 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
46151 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
46152 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
46153diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
46154index ad72c1d..afc9a98 100644
46155--- a/drivers/scsi/qla2xxx/qla_os.c
46156+++ b/drivers/scsi/qla2xxx/qla_os.c
46157@@ -1571,8 +1571,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
46158 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
46159 /* Ok, a 64bit DMA mask is applicable. */
46160 ha->flags.enable_64bit_addressing = 1;
46161- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
46162- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
46163+ pax_open_kernel();
46164+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
46165+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
46166+ pax_close_kernel();
46167 return;
46168 }
46169 }
46170diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
46171index ddf16a8..80f4dd0 100644
46172--- a/drivers/scsi/qla4xxx/ql4_def.h
46173+++ b/drivers/scsi/qla4xxx/ql4_def.h
46174@@ -291,7 +291,7 @@ struct ddb_entry {
46175 * (4000 only) */
46176 atomic_t relogin_timer; /* Max Time to wait for
46177 * relogin to complete */
46178- atomic_t relogin_retry_count; /* Num of times relogin has been
46179+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
46180 * retried */
46181 uint32_t default_time2wait; /* Default Min time between
46182 * relogins (+aens) */
46183diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
46184index 4d231c1..2892c37 100644
46185--- a/drivers/scsi/qla4xxx/ql4_os.c
46186+++ b/drivers/scsi/qla4xxx/ql4_os.c
46187@@ -2971,12 +2971,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
46188 */
46189 if (!iscsi_is_session_online(cls_sess)) {
46190 /* Reset retry relogin timer */
46191- atomic_inc(&ddb_entry->relogin_retry_count);
46192+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
46193 DEBUG2(ql4_printk(KERN_INFO, ha,
46194 "%s: index[%d] relogin timed out-retrying"
46195 " relogin (%d), retry (%d)\n", __func__,
46196 ddb_entry->fw_ddb_index,
46197- atomic_read(&ddb_entry->relogin_retry_count),
46198+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
46199 ddb_entry->default_time2wait + 4));
46200 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
46201 atomic_set(&ddb_entry->retry_relogin_timer,
46202@@ -5081,7 +5081,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
46203
46204 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
46205 atomic_set(&ddb_entry->relogin_timer, 0);
46206- atomic_set(&ddb_entry->relogin_retry_count, 0);
46207+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
46208 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
46209 ddb_entry->default_relogin_timeout =
46210 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
46211diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
46212index eaa808e..95f8841 100644
46213--- a/drivers/scsi/scsi.c
46214+++ b/drivers/scsi/scsi.c
46215@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
46216 unsigned long timeout;
46217 int rtn = 0;
46218
46219- atomic_inc(&cmd->device->iorequest_cnt);
46220+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
46221
46222 /* check if the device is still usable */
46223 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
46224diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
46225index 86d5220..f22c51a 100644
46226--- a/drivers/scsi/scsi_lib.c
46227+++ b/drivers/scsi/scsi_lib.c
46228@@ -1458,7 +1458,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
46229 shost = sdev->host;
46230 scsi_init_cmd_errh(cmd);
46231 cmd->result = DID_NO_CONNECT << 16;
46232- atomic_inc(&cmd->device->iorequest_cnt);
46233+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
46234
46235 /*
46236 * SCSI request completion path will do scsi_device_unbusy(),
46237@@ -1484,9 +1484,9 @@ static void scsi_softirq_done(struct request *rq)
46238
46239 INIT_LIST_HEAD(&cmd->eh_entry);
46240
46241- atomic_inc(&cmd->device->iodone_cnt);
46242+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
46243 if (cmd->result)
46244- atomic_inc(&cmd->device->ioerr_cnt);
46245+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
46246
46247 disposition = scsi_decide_disposition(cmd);
46248 if (disposition != SUCCESS &&
46249diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
46250index 931a7d9..0c2a754 100644
46251--- a/drivers/scsi/scsi_sysfs.c
46252+++ b/drivers/scsi/scsi_sysfs.c
46253@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
46254 char *buf) \
46255 { \
46256 struct scsi_device *sdev = to_scsi_device(dev); \
46257- unsigned long long count = atomic_read(&sdev->field); \
46258+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
46259 return snprintf(buf, 20, "0x%llx\n", count); \
46260 } \
46261 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
46262diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
46263index 84a1fdf..693b0d6 100644
46264--- a/drivers/scsi/scsi_tgt_lib.c
46265+++ b/drivers/scsi/scsi_tgt_lib.c
46266@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
46267 int err;
46268
46269 dprintk("%lx %u\n", uaddr, len);
46270- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
46271+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
46272 if (err) {
46273 /*
46274 * TODO: need to fixup sg_tablesize, max_segment_size,
46275diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
46276index e106c27..11a380e 100644
46277--- a/drivers/scsi/scsi_transport_fc.c
46278+++ b/drivers/scsi/scsi_transport_fc.c
46279@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
46280 * Netlink Infrastructure
46281 */
46282
46283-static atomic_t fc_event_seq;
46284+static atomic_unchecked_t fc_event_seq;
46285
46286 /**
46287 * fc_get_event_number - Obtain the next sequential FC event number
46288@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
46289 u32
46290 fc_get_event_number(void)
46291 {
46292- return atomic_add_return(1, &fc_event_seq);
46293+ return atomic_add_return_unchecked(1, &fc_event_seq);
46294 }
46295 EXPORT_SYMBOL(fc_get_event_number);
46296
46297@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
46298 {
46299 int error;
46300
46301- atomic_set(&fc_event_seq, 0);
46302+ atomic_set_unchecked(&fc_event_seq, 0);
46303
46304 error = transport_class_register(&fc_host_class);
46305 if (error)
46306@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
46307 char *cp;
46308
46309 *val = simple_strtoul(buf, &cp, 0);
46310- if ((*cp && (*cp != '\n')) || (*val < 0))
46311+ if (*cp && (*cp != '\n'))
46312 return -EINVAL;
46313 /*
46314 * Check for overflow; dev_loss_tmo is u32
46315diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
46316index 133926b..903000d 100644
46317--- a/drivers/scsi/scsi_transport_iscsi.c
46318+++ b/drivers/scsi/scsi_transport_iscsi.c
46319@@ -80,7 +80,7 @@ struct iscsi_internal {
46320 struct transport_container session_cont;
46321 };
46322
46323-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
46324+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
46325 static struct workqueue_struct *iscsi_eh_timer_workq;
46326
46327 static DEFINE_IDA(iscsi_sess_ida);
46328@@ -1738,7 +1738,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
46329 int err;
46330
46331 ihost = shost->shost_data;
46332- session->sid = atomic_add_return(1, &iscsi_session_nr);
46333+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
46334
46335 if (target_id == ISCSI_MAX_TARGET) {
46336 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
46337@@ -3944,7 +3944,7 @@ static __init int iscsi_transport_init(void)
46338 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
46339 ISCSI_TRANSPORT_VERSION);
46340
46341- atomic_set(&iscsi_session_nr, 0);
46342+ atomic_set_unchecked(&iscsi_session_nr, 0);
46343
46344 err = class_register(&iscsi_transport_class);
46345 if (err)
46346diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
46347index f379c7f..e8fc69c 100644
46348--- a/drivers/scsi/scsi_transport_srp.c
46349+++ b/drivers/scsi/scsi_transport_srp.c
46350@@ -33,7 +33,7 @@
46351 #include "scsi_transport_srp_internal.h"
46352
46353 struct srp_host_attrs {
46354- atomic_t next_port_id;
46355+ atomic_unchecked_t next_port_id;
46356 };
46357 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
46358
46359@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
46360 struct Scsi_Host *shost = dev_to_shost(dev);
46361 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
46362
46363- atomic_set(&srp_host->next_port_id, 0);
46364+ atomic_set_unchecked(&srp_host->next_port_id, 0);
46365 return 0;
46366 }
46367
46368@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
46369 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
46370 rport->roles = ids->roles;
46371
46372- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
46373+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
46374 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
46375
46376 transport_setup_device(&rport->dev);
46377diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
46378index 610417e..1544fa9 100644
46379--- a/drivers/scsi/sd.c
46380+++ b/drivers/scsi/sd.c
46381@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
46382 sdkp->disk = gd;
46383 sdkp->index = index;
46384 atomic_set(&sdkp->openers, 0);
46385- atomic_set(&sdkp->device->ioerr_cnt, 0);
46386+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
46387
46388 if (!sdp->request_queue->rq_timeout) {
46389 if (sdp->type != TYPE_MOD)
46390diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
46391index df5e961..df6b97f 100644
46392--- a/drivers/scsi/sg.c
46393+++ b/drivers/scsi/sg.c
46394@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
46395 sdp->disk->disk_name,
46396 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
46397 NULL,
46398- (char *)arg);
46399+ (char __user *)arg);
46400 case BLKTRACESTART:
46401 return blk_trace_startstop(sdp->device->request_queue, 1);
46402 case BLKTRACESTOP:
46403diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
46404index 32b7bb1..2f1c4bd 100644
46405--- a/drivers/spi/spi.c
46406+++ b/drivers/spi/spi.c
46407@@ -1631,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master)
46408 EXPORT_SYMBOL_GPL(spi_bus_unlock);
46409
46410 /* portable code must never pass more than 32 bytes */
46411-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
46412+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
46413
46414 static u8 *buf;
46415
46416diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
46417index 3675020..e80d92c 100644
46418--- a/drivers/staging/media/solo6x10/solo6x10-core.c
46419+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
46420@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
46421
46422 static int solo_sysfs_init(struct solo_dev *solo_dev)
46423 {
46424- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
46425+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
46426 struct device *dev = &solo_dev->dev;
46427 const char *driver;
46428 int i;
46429diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
46430index 34afc16..ffe44dd 100644
46431--- a/drivers/staging/octeon/ethernet-rx.c
46432+++ b/drivers/staging/octeon/ethernet-rx.c
46433@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
46434 /* Increment RX stats for virtual ports */
46435 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
46436 #ifdef CONFIG_64BIT
46437- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
46438- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
46439+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
46440+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
46441 #else
46442- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
46443- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
46444+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
46445+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
46446 #endif
46447 }
46448 netif_receive_skb(skb);
46449@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
46450 dev->name);
46451 */
46452 #ifdef CONFIG_64BIT
46453- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
46454+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
46455 #else
46456- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
46457+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
46458 #endif
46459 dev_kfree_skb_irq(skb);
46460 }
46461diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
46462index c3a90e7..023619a 100644
46463--- a/drivers/staging/octeon/ethernet.c
46464+++ b/drivers/staging/octeon/ethernet.c
46465@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
46466 * since the RX tasklet also increments it.
46467 */
46468 #ifdef CONFIG_64BIT
46469- atomic64_add(rx_status.dropped_packets,
46470- (atomic64_t *)&priv->stats.rx_dropped);
46471+ atomic64_add_unchecked(rx_status.dropped_packets,
46472+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
46473 #else
46474- atomic_add(rx_status.dropped_packets,
46475- (atomic_t *)&priv->stats.rx_dropped);
46476+ atomic_add_unchecked(rx_status.dropped_packets,
46477+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
46478 #endif
46479 }
46480
46481diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
46482index dc23395..cf7e9b1 100644
46483--- a/drivers/staging/rtl8712/rtl871x_io.h
46484+++ b/drivers/staging/rtl8712/rtl871x_io.h
46485@@ -108,7 +108,7 @@ struct _io_ops {
46486 u8 *pmem);
46487 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
46488 u8 *pmem);
46489-};
46490+} __no_const;
46491
46492 struct io_req {
46493 struct list_head list;
46494diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
46495index 1f5088b..0e59820 100644
46496--- a/drivers/staging/sbe-2t3e3/netdev.c
46497+++ b/drivers/staging/sbe-2t3e3/netdev.c
46498@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46499 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
46500
46501 if (rlen)
46502- if (copy_to_user(data, &resp, rlen))
46503+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
46504 return -EFAULT;
46505
46506 return 0;
46507diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
46508index a863a98..d272795 100644
46509--- a/drivers/staging/usbip/vhci.h
46510+++ b/drivers/staging/usbip/vhci.h
46511@@ -83,7 +83,7 @@ struct vhci_hcd {
46512 unsigned resuming:1;
46513 unsigned long re_timeout;
46514
46515- atomic_t seqnum;
46516+ atomic_unchecked_t seqnum;
46517
46518 /*
46519 * NOTE:
46520diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
46521index d7974cb..d78076b 100644
46522--- a/drivers/staging/usbip/vhci_hcd.c
46523+++ b/drivers/staging/usbip/vhci_hcd.c
46524@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
46525
46526 spin_lock(&vdev->priv_lock);
46527
46528- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
46529+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
46530 if (priv->seqnum == 0xffff)
46531 dev_info(&urb->dev->dev, "seqnum max\n");
46532
46533@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
46534 return -ENOMEM;
46535 }
46536
46537- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
46538+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
46539 if (unlink->seqnum == 0xffff)
46540 pr_info("seqnum max\n");
46541
46542@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
46543 vdev->rhport = rhport;
46544 }
46545
46546- atomic_set(&vhci->seqnum, 0);
46547+ atomic_set_unchecked(&vhci->seqnum, 0);
46548 spin_lock_init(&vhci->lock);
46549
46550 hcd->power_budget = 0; /* no limit */
46551diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
46552index d07fcb5..358e1e1 100644
46553--- a/drivers/staging/usbip/vhci_rx.c
46554+++ b/drivers/staging/usbip/vhci_rx.c
46555@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
46556 if (!urb) {
46557 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
46558 pr_info("max seqnum %d\n",
46559- atomic_read(&the_controller->seqnum));
46560+ atomic_read_unchecked(&the_controller->seqnum));
46561 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
46562 return;
46563 }
46564diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
46565index 8417c2f..ef5ebd6 100644
46566--- a/drivers/staging/vt6655/hostap.c
46567+++ b/drivers/staging/vt6655/hostap.c
46568@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
46569 *
46570 */
46571
46572+static net_device_ops_no_const apdev_netdev_ops;
46573+
46574 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
46575 {
46576 PSDevice apdev_priv;
46577 struct net_device *dev = pDevice->dev;
46578 int ret;
46579- const struct net_device_ops apdev_netdev_ops = {
46580- .ndo_start_xmit = pDevice->tx_80211,
46581- };
46582
46583 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
46584
46585@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
46586 *apdev_priv = *pDevice;
46587 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
46588
46589+ /* only half broken now */
46590+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
46591 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
46592
46593 pDevice->apdev->type = ARPHRD_IEEE80211;
46594diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
46595index c699a30..b90a5fd 100644
46596--- a/drivers/staging/vt6656/hostap.c
46597+++ b/drivers/staging/vt6656/hostap.c
46598@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
46599 *
46600 */
46601
46602+static net_device_ops_no_const apdev_netdev_ops;
46603+
46604 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
46605 {
46606 struct vnt_private *apdev_priv;
46607 struct net_device *dev = pDevice->dev;
46608 int ret;
46609- const struct net_device_ops apdev_netdev_ops = {
46610- .ndo_start_xmit = pDevice->tx_80211,
46611- };
46612
46613 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
46614
46615@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
46616 *apdev_priv = *pDevice;
46617 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
46618
46619+ /* only half broken now */
46620+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
46621 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
46622
46623 pDevice->apdev->type = ARPHRD_IEEE80211;
46624diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
46625index d128ce2..fc1f9a1 100644
46626--- a/drivers/staging/zcache/tmem.h
46627+++ b/drivers/staging/zcache/tmem.h
46628@@ -225,7 +225,7 @@ struct tmem_pamops {
46629 bool (*is_remote)(void *);
46630 int (*replace_in_obj)(void *, struct tmem_obj *);
46631 #endif
46632-};
46633+} __no_const;
46634 extern void tmem_register_pamops(struct tmem_pamops *m);
46635
46636 /* memory allocation methods provided by the host implementation */
46637@@ -234,7 +234,7 @@ struct tmem_hostops {
46638 void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
46639 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
46640 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
46641-};
46642+} __no_const;
46643 extern void tmem_register_hostops(struct tmem_hostops *m);
46644
46645 /* core tmem accessor functions */
46646diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
46647index 4630481..c26782a 100644
46648--- a/drivers/target/target_core_device.c
46649+++ b/drivers/target/target_core_device.c
46650@@ -1400,7 +1400,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
46651 spin_lock_init(&dev->se_port_lock);
46652 spin_lock_init(&dev->se_tmr_lock);
46653 spin_lock_init(&dev->qf_cmd_lock);
46654- atomic_set(&dev->dev_ordered_id, 0);
46655+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
46656 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
46657 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
46658 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
46659diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
46660index 21e3158..43c6004 100644
46661--- a/drivers/target/target_core_transport.c
46662+++ b/drivers/target/target_core_transport.c
46663@@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
46664 * Used to determine when ORDERED commands should go from
46665 * Dormant to Active status.
46666 */
46667- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
46668+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
46669 smp_mb__after_atomic_inc();
46670 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
46671 cmd->se_ordered_id, cmd->sam_task_attr,
46672diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
46673index 33f83fe..d80f8e1 100644
46674--- a/drivers/tty/cyclades.c
46675+++ b/drivers/tty/cyclades.c
46676@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
46677 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
46678 info->port.count);
46679 #endif
46680- info->port.count++;
46681+ atomic_inc(&info->port.count);
46682 #ifdef CY_DEBUG_COUNT
46683 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
46684- current->pid, info->port.count);
46685+ current->pid, atomic_read(&info->port.count));
46686 #endif
46687
46688 /*
46689@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
46690 for (j = 0; j < cy_card[i].nports; j++) {
46691 info = &cy_card[i].ports[j];
46692
46693- if (info->port.count) {
46694+ if (atomic_read(&info->port.count)) {
46695 /* XXX is the ldisc num worth this? */
46696 struct tty_struct *tty;
46697 struct tty_ldisc *ld;
46698diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
46699index eb255e8..f637a57 100644
46700--- a/drivers/tty/hvc/hvc_console.c
46701+++ b/drivers/tty/hvc/hvc_console.c
46702@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
46703
46704 spin_lock_irqsave(&hp->port.lock, flags);
46705 /* Check and then increment for fast path open. */
46706- if (hp->port.count++ > 0) {
46707+ if (atomic_inc_return(&hp->port.count) > 1) {
46708 spin_unlock_irqrestore(&hp->port.lock, flags);
46709 hvc_kick();
46710 return 0;
46711@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
46712
46713 spin_lock_irqsave(&hp->port.lock, flags);
46714
46715- if (--hp->port.count == 0) {
46716+ if (atomic_dec_return(&hp->port.count) == 0) {
46717 spin_unlock_irqrestore(&hp->port.lock, flags);
46718 /* We are done with the tty pointer now. */
46719 tty_port_tty_set(&hp->port, NULL);
46720@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
46721 */
46722 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
46723 } else {
46724- if (hp->port.count < 0)
46725+ if (atomic_read(&hp->port.count) < 0)
46726 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
46727- hp->vtermno, hp->port.count);
46728+ hp->vtermno, atomic_read(&hp->port.count));
46729 spin_unlock_irqrestore(&hp->port.lock, flags);
46730 }
46731 }
46732@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
46733 * open->hangup case this can be called after the final close so prevent
46734 * that from happening for now.
46735 */
46736- if (hp->port.count <= 0) {
46737+ if (atomic_read(&hp->port.count) <= 0) {
46738 spin_unlock_irqrestore(&hp->port.lock, flags);
46739 return;
46740 }
46741
46742- hp->port.count = 0;
46743+ atomic_set(&hp->port.count, 0);
46744 spin_unlock_irqrestore(&hp->port.lock, flags);
46745 tty_port_tty_set(&hp->port, NULL);
46746
46747@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
46748 return -EPIPE;
46749
46750 /* FIXME what's this (unprotected) check for? */
46751- if (hp->port.count <= 0)
46752+ if (atomic_read(&hp->port.count) <= 0)
46753 return -EIO;
46754
46755 spin_lock_irqsave(&hp->lock, flags);
46756diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
46757index 81e939e..95ead10 100644
46758--- a/drivers/tty/hvc/hvcs.c
46759+++ b/drivers/tty/hvc/hvcs.c
46760@@ -83,6 +83,7 @@
46761 #include <asm/hvcserver.h>
46762 #include <asm/uaccess.h>
46763 #include <asm/vio.h>
46764+#include <asm/local.h>
46765
46766 /*
46767 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
46768@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
46769
46770 spin_lock_irqsave(&hvcsd->lock, flags);
46771
46772- if (hvcsd->port.count > 0) {
46773+ if (atomic_read(&hvcsd->port.count) > 0) {
46774 spin_unlock_irqrestore(&hvcsd->lock, flags);
46775 printk(KERN_INFO "HVCS: vterm state unchanged. "
46776 "The hvcs device node is still in use.\n");
46777@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
46778 }
46779 }
46780
46781- hvcsd->port.count = 0;
46782+ atomic_set(&hvcsd->port.count, 0);
46783 hvcsd->port.tty = tty;
46784 tty->driver_data = hvcsd;
46785
46786@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
46787 unsigned long flags;
46788
46789 spin_lock_irqsave(&hvcsd->lock, flags);
46790- hvcsd->port.count++;
46791+ atomic_inc(&hvcsd->port.count);
46792 hvcsd->todo_mask |= HVCS_SCHED_READ;
46793 spin_unlock_irqrestore(&hvcsd->lock, flags);
46794
46795@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
46796 hvcsd = tty->driver_data;
46797
46798 spin_lock_irqsave(&hvcsd->lock, flags);
46799- if (--hvcsd->port.count == 0) {
46800+ if (atomic_dec_and_test(&hvcsd->port.count)) {
46801
46802 vio_disable_interrupts(hvcsd->vdev);
46803
46804@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
46805
46806 free_irq(irq, hvcsd);
46807 return;
46808- } else if (hvcsd->port.count < 0) {
46809+ } else if (atomic_read(&hvcsd->port.count) < 0) {
46810 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
46811 " is missmanaged.\n",
46812- hvcsd->vdev->unit_address, hvcsd->port.count);
46813+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
46814 }
46815
46816 spin_unlock_irqrestore(&hvcsd->lock, flags);
46817@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
46818
46819 spin_lock_irqsave(&hvcsd->lock, flags);
46820 /* Preserve this so that we know how many kref refs to put */
46821- temp_open_count = hvcsd->port.count;
46822+ temp_open_count = atomic_read(&hvcsd->port.count);
46823
46824 /*
46825 * Don't kref put inside the spinlock because the destruction
46826@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
46827 tty->driver_data = NULL;
46828 hvcsd->port.tty = NULL;
46829
46830- hvcsd->port.count = 0;
46831+ atomic_set(&hvcsd->port.count, 0);
46832
46833 /* This will drop any buffered data on the floor which is OK in a hangup
46834 * scenario. */
46835@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
46836 * the middle of a write operation? This is a crummy place to do this
46837 * but we want to keep it all in the spinlock.
46838 */
46839- if (hvcsd->port.count <= 0) {
46840+ if (atomic_read(&hvcsd->port.count) <= 0) {
46841 spin_unlock_irqrestore(&hvcsd->lock, flags);
46842 return -ENODEV;
46843 }
46844@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
46845 {
46846 struct hvcs_struct *hvcsd = tty->driver_data;
46847
46848- if (!hvcsd || hvcsd->port.count <= 0)
46849+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
46850 return 0;
46851
46852 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
46853diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
46854index 8fd72ff..34a0bed 100644
46855--- a/drivers/tty/ipwireless/tty.c
46856+++ b/drivers/tty/ipwireless/tty.c
46857@@ -29,6 +29,7 @@
46858 #include <linux/tty_driver.h>
46859 #include <linux/tty_flip.h>
46860 #include <linux/uaccess.h>
46861+#include <asm/local.h>
46862
46863 #include "tty.h"
46864 #include "network.h"
46865@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
46866 mutex_unlock(&tty->ipw_tty_mutex);
46867 return -ENODEV;
46868 }
46869- if (tty->port.count == 0)
46870+ if (atomic_read(&tty->port.count) == 0)
46871 tty->tx_bytes_queued = 0;
46872
46873- tty->port.count++;
46874+ atomic_inc(&tty->port.count);
46875
46876 tty->port.tty = linux_tty;
46877 linux_tty->driver_data = tty;
46878@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
46879
46880 static void do_ipw_close(struct ipw_tty *tty)
46881 {
46882- tty->port.count--;
46883-
46884- if (tty->port.count == 0) {
46885+ if (atomic_dec_return(&tty->port.count) == 0) {
46886 struct tty_struct *linux_tty = tty->port.tty;
46887
46888 if (linux_tty != NULL) {
46889@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
46890 return;
46891
46892 mutex_lock(&tty->ipw_tty_mutex);
46893- if (tty->port.count == 0) {
46894+ if (atomic_read(&tty->port.count) == 0) {
46895 mutex_unlock(&tty->ipw_tty_mutex);
46896 return;
46897 }
46898@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
46899
46900 mutex_lock(&tty->ipw_tty_mutex);
46901
46902- if (!tty->port.count) {
46903+ if (!atomic_read(&tty->port.count)) {
46904 mutex_unlock(&tty->ipw_tty_mutex);
46905 return;
46906 }
46907@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
46908 return -ENODEV;
46909
46910 mutex_lock(&tty->ipw_tty_mutex);
46911- if (!tty->port.count) {
46912+ if (!atomic_read(&tty->port.count)) {
46913 mutex_unlock(&tty->ipw_tty_mutex);
46914 return -EINVAL;
46915 }
46916@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
46917 if (!tty)
46918 return -ENODEV;
46919
46920- if (!tty->port.count)
46921+ if (!atomic_read(&tty->port.count))
46922 return -EINVAL;
46923
46924 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
46925@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
46926 if (!tty)
46927 return 0;
46928
46929- if (!tty->port.count)
46930+ if (!atomic_read(&tty->port.count))
46931 return 0;
46932
46933 return tty->tx_bytes_queued;
46934@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
46935 if (!tty)
46936 return -ENODEV;
46937
46938- if (!tty->port.count)
46939+ if (!atomic_read(&tty->port.count))
46940 return -EINVAL;
46941
46942 return get_control_lines(tty);
46943@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
46944 if (!tty)
46945 return -ENODEV;
46946
46947- if (!tty->port.count)
46948+ if (!atomic_read(&tty->port.count))
46949 return -EINVAL;
46950
46951 return set_control_lines(tty, set, clear);
46952@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
46953 if (!tty)
46954 return -ENODEV;
46955
46956- if (!tty->port.count)
46957+ if (!atomic_read(&tty->port.count))
46958 return -EINVAL;
46959
46960 /* FIXME: Exactly how is the tty object locked here .. */
46961@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
46962 * are gone */
46963 mutex_lock(&ttyj->ipw_tty_mutex);
46964 }
46965- while (ttyj->port.count)
46966+ while (atomic_read(&ttyj->port.count))
46967 do_ipw_close(ttyj);
46968 ipwireless_disassociate_network_ttys(network,
46969 ttyj->channel_idx);
46970diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
46971index 1deaca4..c8582d4 100644
46972--- a/drivers/tty/moxa.c
46973+++ b/drivers/tty/moxa.c
46974@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
46975 }
46976
46977 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
46978- ch->port.count++;
46979+ atomic_inc(&ch->port.count);
46980 tty->driver_data = ch;
46981 tty_port_tty_set(&ch->port, tty);
46982 mutex_lock(&ch->port.mutex);
46983diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
46984index 6422390..49003ac8 100644
46985--- a/drivers/tty/n_gsm.c
46986+++ b/drivers/tty/n_gsm.c
46987@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
46988 spin_lock_init(&dlci->lock);
46989 mutex_init(&dlci->mutex);
46990 dlci->fifo = &dlci->_fifo;
46991- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
46992+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
46993 kfree(dlci);
46994 return NULL;
46995 }
46996@@ -2932,7 +2932,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
46997 struct gsm_dlci *dlci = tty->driver_data;
46998 struct tty_port *port = &dlci->port;
46999
47000- port->count++;
47001+ atomic_inc(&port->count);
47002 dlci_get(dlci);
47003 dlci_get(dlci->gsm->dlci[0]);
47004 mux_get(dlci->gsm);
47005diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
47006index 6c7fe90..9241dab 100644
47007--- a/drivers/tty/n_tty.c
47008+++ b/drivers/tty/n_tty.c
47009@@ -2203,6 +2203,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
47010 {
47011 *ops = tty_ldisc_N_TTY;
47012 ops->owner = NULL;
47013- ops->refcount = ops->flags = 0;
47014+ atomic_set(&ops->refcount, 0);
47015+ ops->flags = 0;
47016 }
47017 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
47018diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
47019index abfd990..5ab5da9 100644
47020--- a/drivers/tty/pty.c
47021+++ b/drivers/tty/pty.c
47022@@ -796,8 +796,10 @@ static void __init unix98_pty_init(void)
47023 panic("Couldn't register Unix98 pts driver");
47024
47025 /* Now create the /dev/ptmx special device */
47026+ pax_open_kernel();
47027 tty_default_fops(&ptmx_fops);
47028- ptmx_fops.open = ptmx_open;
47029+ *(void **)&ptmx_fops.open = ptmx_open;
47030+ pax_close_kernel();
47031
47032 cdev_init(&ptmx_cdev, &ptmx_fops);
47033 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
47034diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
47035index 354564e..fe50d9a 100644
47036--- a/drivers/tty/rocket.c
47037+++ b/drivers/tty/rocket.c
47038@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
47039 tty->driver_data = info;
47040 tty_port_tty_set(port, tty);
47041
47042- if (port->count++ == 0) {
47043+ if (atomic_inc_return(&port->count) == 1) {
47044 atomic_inc(&rp_num_ports_open);
47045
47046 #ifdef ROCKET_DEBUG_OPEN
47047@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
47048 #endif
47049 }
47050 #ifdef ROCKET_DEBUG_OPEN
47051- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
47052+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
47053 #endif
47054
47055 /*
47056@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
47057 spin_unlock_irqrestore(&info->port.lock, flags);
47058 return;
47059 }
47060- if (info->port.count)
47061+ if (atomic_read(&info->port.count))
47062 atomic_dec(&rp_num_ports_open);
47063 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
47064 spin_unlock_irqrestore(&info->port.lock, flags);
47065diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
47066index 1002054..dd644a8 100644
47067--- a/drivers/tty/serial/kgdboc.c
47068+++ b/drivers/tty/serial/kgdboc.c
47069@@ -24,8 +24,9 @@
47070 #define MAX_CONFIG_LEN 40
47071
47072 static struct kgdb_io kgdboc_io_ops;
47073+static struct kgdb_io kgdboc_io_ops_console;
47074
47075-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
47076+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
47077 static int configured = -1;
47078
47079 static char config[MAX_CONFIG_LEN];
47080@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
47081 kgdboc_unregister_kbd();
47082 if (configured == 1)
47083 kgdb_unregister_io_module(&kgdboc_io_ops);
47084+ else if (configured == 2)
47085+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
47086 }
47087
47088 static int configure_kgdboc(void)
47089@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
47090 int err;
47091 char *cptr = config;
47092 struct console *cons;
47093+ int is_console = 0;
47094
47095 err = kgdboc_option_setup(config);
47096 if (err || !strlen(config) || isspace(config[0]))
47097 goto noconfig;
47098
47099 err = -ENODEV;
47100- kgdboc_io_ops.is_console = 0;
47101 kgdb_tty_driver = NULL;
47102
47103 kgdboc_use_kms = 0;
47104@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
47105 int idx;
47106 if (cons->device && cons->device(cons, &idx) == p &&
47107 idx == tty_line) {
47108- kgdboc_io_ops.is_console = 1;
47109+ is_console = 1;
47110 break;
47111 }
47112 cons = cons->next;
47113@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
47114 kgdb_tty_line = tty_line;
47115
47116 do_register:
47117- err = kgdb_register_io_module(&kgdboc_io_ops);
47118+ if (is_console) {
47119+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
47120+ configured = 2;
47121+ } else {
47122+ err = kgdb_register_io_module(&kgdboc_io_ops);
47123+ configured = 1;
47124+ }
47125 if (err)
47126 goto noconfig;
47127
47128@@ -205,8 +214,6 @@ do_register:
47129 if (err)
47130 goto nmi_con_failed;
47131
47132- configured = 1;
47133-
47134 return 0;
47135
47136 nmi_con_failed:
47137@@ -223,7 +230,7 @@ noconfig:
47138 static int __init init_kgdboc(void)
47139 {
47140 /* Already configured? */
47141- if (configured == 1)
47142+ if (configured >= 1)
47143 return 0;
47144
47145 return configure_kgdboc();
47146@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
47147 if (config[len - 1] == '\n')
47148 config[len - 1] = '\0';
47149
47150- if (configured == 1)
47151+ if (configured >= 1)
47152 cleanup_kgdboc();
47153
47154 /* Go and configure with the new params. */
47155@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
47156 .post_exception = kgdboc_post_exp_handler,
47157 };
47158
47159+static struct kgdb_io kgdboc_io_ops_console = {
47160+ .name = "kgdboc",
47161+ .read_char = kgdboc_get_char,
47162+ .write_char = kgdboc_put_char,
47163+ .pre_exception = kgdboc_pre_exp_handler,
47164+ .post_exception = kgdboc_post_exp_handler,
47165+ .is_console = 1
47166+};
47167+
47168 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
47169 /* This is only available if kgdboc is a built in for early debugging */
47170 static int __init kgdboc_early_init(char *opt)
47171diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
47172index 0c8a9fa..234a95f 100644
47173--- a/drivers/tty/serial/samsung.c
47174+++ b/drivers/tty/serial/samsung.c
47175@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
47176 }
47177 }
47178
47179+static int s3c64xx_serial_startup(struct uart_port *port);
47180 static int s3c24xx_serial_startup(struct uart_port *port)
47181 {
47182 struct s3c24xx_uart_port *ourport = to_ourport(port);
47183 int ret;
47184
47185+ /* Startup sequence is different for s3c64xx and higher SoC's */
47186+ if (s3c24xx_serial_has_interrupt_mask(port))
47187+ return s3c64xx_serial_startup(port);
47188+
47189 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
47190 port->mapbase, port->membase);
47191
47192@@ -1124,10 +1129,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
47193 /* setup info for port */
47194 port->dev = &platdev->dev;
47195
47196- /* Startup sequence is different for s3c64xx and higher SoC's */
47197- if (s3c24xx_serial_has_interrupt_mask(port))
47198- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
47199-
47200 port->uartclk = 1;
47201
47202 if (cfg->uart_flags & UPF_CONS_FLOW) {
47203diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
47204index f87dbfd..42ad4b1 100644
47205--- a/drivers/tty/serial/serial_core.c
47206+++ b/drivers/tty/serial/serial_core.c
47207@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
47208 uart_flush_buffer(tty);
47209 uart_shutdown(tty, state);
47210 spin_lock_irqsave(&port->lock, flags);
47211- port->count = 0;
47212+ atomic_set(&port->count, 0);
47213 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
47214 spin_unlock_irqrestore(&port->lock, flags);
47215 tty_port_tty_set(port, NULL);
47216@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47217 goto end;
47218 }
47219
47220- port->count++;
47221+ atomic_inc(&port->count);
47222 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
47223 retval = -ENXIO;
47224 goto err_dec_count;
47225@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47226 /*
47227 * Make sure the device is in D0 state.
47228 */
47229- if (port->count == 1)
47230+ if (atomic_read(&port->count) == 1)
47231 uart_change_pm(state, UART_PM_STATE_ON);
47232
47233 /*
47234@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
47235 end:
47236 return retval;
47237 err_dec_count:
47238- port->count--;
47239+ atomic_inc(&port->count);
47240 mutex_unlock(&port->mutex);
47241 goto end;
47242 }
47243diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
47244index 8eaf1ab..85c030d 100644
47245--- a/drivers/tty/synclink.c
47246+++ b/drivers/tty/synclink.c
47247@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
47248
47249 if (debug_level >= DEBUG_LEVEL_INFO)
47250 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
47251- __FILE__,__LINE__, info->device_name, info->port.count);
47252+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
47253
47254 if (tty_port_close_start(&info->port, tty, filp) == 0)
47255 goto cleanup;
47256@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
47257 cleanup:
47258 if (debug_level >= DEBUG_LEVEL_INFO)
47259 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
47260- tty->driver->name, info->port.count);
47261+ tty->driver->name, atomic_read(&info->port.count));
47262
47263 } /* end of mgsl_close() */
47264
47265@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
47266
47267 mgsl_flush_buffer(tty);
47268 shutdown(info);
47269-
47270- info->port.count = 0;
47271+
47272+ atomic_set(&info->port.count, 0);
47273 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47274 info->port.tty = NULL;
47275
47276@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47277
47278 if (debug_level >= DEBUG_LEVEL_INFO)
47279 printk("%s(%d):block_til_ready before block on %s count=%d\n",
47280- __FILE__,__LINE__, tty->driver->name, port->count );
47281+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47282
47283 spin_lock_irqsave(&info->irq_spinlock, flags);
47284 if (!tty_hung_up_p(filp)) {
47285 extra_count = true;
47286- port->count--;
47287+ atomic_dec(&port->count);
47288 }
47289 spin_unlock_irqrestore(&info->irq_spinlock, flags);
47290 port->blocked_open++;
47291@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47292
47293 if (debug_level >= DEBUG_LEVEL_INFO)
47294 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
47295- __FILE__,__LINE__, tty->driver->name, port->count );
47296+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47297
47298 tty_unlock(tty);
47299 schedule();
47300@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
47301
47302 /* FIXME: Racy on hangup during close wait */
47303 if (extra_count)
47304- port->count++;
47305+ atomic_inc(&port->count);
47306 port->blocked_open--;
47307
47308 if (debug_level >= DEBUG_LEVEL_INFO)
47309 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
47310- __FILE__,__LINE__, tty->driver->name, port->count );
47311+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47312
47313 if (!retval)
47314 port->flags |= ASYNC_NORMAL_ACTIVE;
47315@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
47316
47317 if (debug_level >= DEBUG_LEVEL_INFO)
47318 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
47319- __FILE__,__LINE__,tty->driver->name, info->port.count);
47320+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
47321
47322 /* If port is closing, signal caller to try again */
47323 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47324@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
47325 spin_unlock_irqrestore(&info->netlock, flags);
47326 goto cleanup;
47327 }
47328- info->port.count++;
47329+ atomic_inc(&info->port.count);
47330 spin_unlock_irqrestore(&info->netlock, flags);
47331
47332- if (info->port.count == 1) {
47333+ if (atomic_read(&info->port.count) == 1) {
47334 /* 1st open on this device, init hardware */
47335 retval = startup(info);
47336 if (retval < 0)
47337@@ -3446,8 +3446,8 @@ cleanup:
47338 if (retval) {
47339 if (tty->count == 1)
47340 info->port.tty = NULL; /* tty layer will release tty struct */
47341- if(info->port.count)
47342- info->port.count--;
47343+ if (atomic_read(&info->port.count))
47344+ atomic_dec(&info->port.count);
47345 }
47346
47347 return retval;
47348@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47349 unsigned short new_crctype;
47350
47351 /* return error if TTY interface open */
47352- if (info->port.count)
47353+ if (atomic_read(&info->port.count))
47354 return -EBUSY;
47355
47356 switch (encoding)
47357@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
47358
47359 /* arbitrate between network and tty opens */
47360 spin_lock_irqsave(&info->netlock, flags);
47361- if (info->port.count != 0 || info->netcount != 0) {
47362+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47363 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
47364 spin_unlock_irqrestore(&info->netlock, flags);
47365 return -EBUSY;
47366@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47367 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
47368
47369 /* return error if TTY interface open */
47370- if (info->port.count)
47371+ if (atomic_read(&info->port.count))
47372 return -EBUSY;
47373
47374 if (cmd != SIOCWANDEV)
47375diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
47376index 1abf946..1ee34fc 100644
47377--- a/drivers/tty/synclink_gt.c
47378+++ b/drivers/tty/synclink_gt.c
47379@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
47380 tty->driver_data = info;
47381 info->port.tty = tty;
47382
47383- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
47384+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
47385
47386 /* If port is closing, signal caller to try again */
47387 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47388@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
47389 mutex_unlock(&info->port.mutex);
47390 goto cleanup;
47391 }
47392- info->port.count++;
47393+ atomic_inc(&info->port.count);
47394 spin_unlock_irqrestore(&info->netlock, flags);
47395
47396- if (info->port.count == 1) {
47397+ if (atomic_read(&info->port.count) == 1) {
47398 /* 1st open on this device, init hardware */
47399 retval = startup(info);
47400 if (retval < 0) {
47401@@ -715,8 +715,8 @@ cleanup:
47402 if (retval) {
47403 if (tty->count == 1)
47404 info->port.tty = NULL; /* tty layer will release tty struct */
47405- if(info->port.count)
47406- info->port.count--;
47407+ if(atomic_read(&info->port.count))
47408+ atomic_dec(&info->port.count);
47409 }
47410
47411 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
47412@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47413
47414 if (sanity_check(info, tty->name, "close"))
47415 return;
47416- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
47417+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
47418
47419 if (tty_port_close_start(&info->port, tty, filp) == 0)
47420 goto cleanup;
47421@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47422 tty_port_close_end(&info->port, tty);
47423 info->port.tty = NULL;
47424 cleanup:
47425- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
47426+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
47427 }
47428
47429 static void hangup(struct tty_struct *tty)
47430@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
47431 shutdown(info);
47432
47433 spin_lock_irqsave(&info->port.lock, flags);
47434- info->port.count = 0;
47435+ atomic_set(&info->port.count, 0);
47436 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47437 info->port.tty = NULL;
47438 spin_unlock_irqrestore(&info->port.lock, flags);
47439@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47440 unsigned short new_crctype;
47441
47442 /* return error if TTY interface open */
47443- if (info->port.count)
47444+ if (atomic_read(&info->port.count))
47445 return -EBUSY;
47446
47447 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
47448@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
47449
47450 /* arbitrate between network and tty opens */
47451 spin_lock_irqsave(&info->netlock, flags);
47452- if (info->port.count != 0 || info->netcount != 0) {
47453+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47454 DBGINFO(("%s hdlc_open busy\n", dev->name));
47455 spin_unlock_irqrestore(&info->netlock, flags);
47456 return -EBUSY;
47457@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47458 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
47459
47460 /* return error if TTY interface open */
47461- if (info->port.count)
47462+ if (atomic_read(&info->port.count))
47463 return -EBUSY;
47464
47465 if (cmd != SIOCWANDEV)
47466@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
47467 if (port == NULL)
47468 continue;
47469 spin_lock(&port->lock);
47470- if ((port->port.count || port->netcount) &&
47471+ if ((atomic_read(&port->port.count) || port->netcount) &&
47472 port->pending_bh && !port->bh_running &&
47473 !port->bh_requested) {
47474 DBGISR(("%s bh queued\n", port->device_name));
47475@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47476 spin_lock_irqsave(&info->lock, flags);
47477 if (!tty_hung_up_p(filp)) {
47478 extra_count = true;
47479- port->count--;
47480+ atomic_dec(&port->count);
47481 }
47482 spin_unlock_irqrestore(&info->lock, flags);
47483 port->blocked_open++;
47484@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47485 remove_wait_queue(&port->open_wait, &wait);
47486
47487 if (extra_count)
47488- port->count++;
47489+ atomic_inc(&port->count);
47490 port->blocked_open--;
47491
47492 if (!retval)
47493diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
47494index ff17138..e38b41e 100644
47495--- a/drivers/tty/synclinkmp.c
47496+++ b/drivers/tty/synclinkmp.c
47497@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
47498
47499 if (debug_level >= DEBUG_LEVEL_INFO)
47500 printk("%s(%d):%s open(), old ref count = %d\n",
47501- __FILE__,__LINE__,tty->driver->name, info->port.count);
47502+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
47503
47504 /* If port is closing, signal caller to try again */
47505 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
47506@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
47507 spin_unlock_irqrestore(&info->netlock, flags);
47508 goto cleanup;
47509 }
47510- info->port.count++;
47511+ atomic_inc(&info->port.count);
47512 spin_unlock_irqrestore(&info->netlock, flags);
47513
47514- if (info->port.count == 1) {
47515+ if (atomic_read(&info->port.count) == 1) {
47516 /* 1st open on this device, init hardware */
47517 retval = startup(info);
47518 if (retval < 0)
47519@@ -796,8 +796,8 @@ cleanup:
47520 if (retval) {
47521 if (tty->count == 1)
47522 info->port.tty = NULL; /* tty layer will release tty struct */
47523- if(info->port.count)
47524- info->port.count--;
47525+ if(atomic_read(&info->port.count))
47526+ atomic_dec(&info->port.count);
47527 }
47528
47529 return retval;
47530@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47531
47532 if (debug_level >= DEBUG_LEVEL_INFO)
47533 printk("%s(%d):%s close() entry, count=%d\n",
47534- __FILE__,__LINE__, info->device_name, info->port.count);
47535+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
47536
47537 if (tty_port_close_start(&info->port, tty, filp) == 0)
47538 goto cleanup;
47539@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
47540 cleanup:
47541 if (debug_level >= DEBUG_LEVEL_INFO)
47542 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
47543- tty->driver->name, info->port.count);
47544+ tty->driver->name, atomic_read(&info->port.count));
47545 }
47546
47547 /* Called by tty_hangup() when a hangup is signaled.
47548@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
47549 shutdown(info);
47550
47551 spin_lock_irqsave(&info->port.lock, flags);
47552- info->port.count = 0;
47553+ atomic_set(&info->port.count, 0);
47554 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
47555 info->port.tty = NULL;
47556 spin_unlock_irqrestore(&info->port.lock, flags);
47557@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
47558 unsigned short new_crctype;
47559
47560 /* return error if TTY interface open */
47561- if (info->port.count)
47562+ if (atomic_read(&info->port.count))
47563 return -EBUSY;
47564
47565 switch (encoding)
47566@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
47567
47568 /* arbitrate between network and tty opens */
47569 spin_lock_irqsave(&info->netlock, flags);
47570- if (info->port.count != 0 || info->netcount != 0) {
47571+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
47572 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
47573 spin_unlock_irqrestore(&info->netlock, flags);
47574 return -EBUSY;
47575@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47576 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
47577
47578 /* return error if TTY interface open */
47579- if (info->port.count)
47580+ if (atomic_read(&info->port.count))
47581 return -EBUSY;
47582
47583 if (cmd != SIOCWANDEV)
47584@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
47585 * do not request bottom half processing if the
47586 * device is not open in a normal mode.
47587 */
47588- if ( port && (port->port.count || port->netcount) &&
47589+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
47590 port->pending_bh && !port->bh_running &&
47591 !port->bh_requested ) {
47592 if ( debug_level >= DEBUG_LEVEL_ISR )
47593@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47594
47595 if (debug_level >= DEBUG_LEVEL_INFO)
47596 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
47597- __FILE__,__LINE__, tty->driver->name, port->count );
47598+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47599
47600 spin_lock_irqsave(&info->lock, flags);
47601 if (!tty_hung_up_p(filp)) {
47602 extra_count = true;
47603- port->count--;
47604+ atomic_dec(&port->count);
47605 }
47606 spin_unlock_irqrestore(&info->lock, flags);
47607 port->blocked_open++;
47608@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47609
47610 if (debug_level >= DEBUG_LEVEL_INFO)
47611 printk("%s(%d):%s block_til_ready() count=%d\n",
47612- __FILE__,__LINE__, tty->driver->name, port->count );
47613+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47614
47615 tty_unlock(tty);
47616 schedule();
47617@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
47618 remove_wait_queue(&port->open_wait, &wait);
47619
47620 if (extra_count)
47621- port->count++;
47622+ atomic_inc(&port->count);
47623 port->blocked_open--;
47624
47625 if (debug_level >= DEBUG_LEVEL_INFO)
47626 printk("%s(%d):%s block_til_ready() after, count=%d\n",
47627- __FILE__,__LINE__, tty->driver->name, port->count );
47628+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
47629
47630 if (!retval)
47631 port->flags |= ASYNC_NORMAL_ACTIVE;
47632diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
47633index b51c154..17d55d1 100644
47634--- a/drivers/tty/sysrq.c
47635+++ b/drivers/tty/sysrq.c
47636@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
47637 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
47638 size_t count, loff_t *ppos)
47639 {
47640- if (count) {
47641+ if (count && capable(CAP_SYS_ADMIN)) {
47642 char c;
47643
47644 if (get_user(c, buf))
47645diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
47646index 4476682..d77e748 100644
47647--- a/drivers/tty/tty_io.c
47648+++ b/drivers/tty/tty_io.c
47649@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
47650
47651 void tty_default_fops(struct file_operations *fops)
47652 {
47653- *fops = tty_fops;
47654+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
47655 }
47656
47657 /*
47658diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
47659index 1afe192..73d2c20 100644
47660--- a/drivers/tty/tty_ldisc.c
47661+++ b/drivers/tty/tty_ldisc.c
47662@@ -66,7 +66,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
47663 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47664 tty_ldiscs[disc] = new_ldisc;
47665 new_ldisc->num = disc;
47666- new_ldisc->refcount = 0;
47667+ atomic_set(&new_ldisc->refcount, 0);
47668 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47669
47670 return ret;
47671@@ -94,7 +94,7 @@ int tty_unregister_ldisc(int disc)
47672 return -EINVAL;
47673
47674 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47675- if (tty_ldiscs[disc]->refcount)
47676+ if (atomic_read(&tty_ldiscs[disc]->refcount))
47677 ret = -EBUSY;
47678 else
47679 tty_ldiscs[disc] = NULL;
47680@@ -115,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
47681 if (ldops) {
47682 ret = ERR_PTR(-EAGAIN);
47683 if (try_module_get(ldops->owner)) {
47684- ldops->refcount++;
47685+ atomic_inc(&ldops->refcount);
47686 ret = ldops;
47687 }
47688 }
47689@@ -128,7 +128,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
47690 unsigned long flags;
47691
47692 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
47693- ldops->refcount--;
47694+ atomic_dec(&ldops->refcount);
47695 module_put(ldops->owner);
47696 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47697 }
47698@@ -196,7 +196,7 @@ static inline void tty_ldisc_put(struct tty_ldisc *ld)
47699 /* unreleased reader reference(s) will cause this WARN */
47700 WARN_ON(!atomic_dec_and_test(&ld->users));
47701
47702- ld->ops->refcount--;
47703+ atomic_dec(&ld->ops->refcount);
47704 module_put(ld->ops->owner);
47705 kfree(ld);
47706 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
47707diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
47708index f597e88..b7f68ed 100644
47709--- a/drivers/tty/tty_port.c
47710+++ b/drivers/tty/tty_port.c
47711@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
47712 unsigned long flags;
47713
47714 spin_lock_irqsave(&port->lock, flags);
47715- port->count = 0;
47716+ atomic_set(&port->count, 0);
47717 port->flags &= ~ASYNC_NORMAL_ACTIVE;
47718 tty = port->tty;
47719 if (tty)
47720@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
47721 /* The port lock protects the port counts */
47722 spin_lock_irqsave(&port->lock, flags);
47723 if (!tty_hung_up_p(filp))
47724- port->count--;
47725+ atomic_dec(&port->count);
47726 port->blocked_open++;
47727 spin_unlock_irqrestore(&port->lock, flags);
47728
47729@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
47730 we must not mess that up further */
47731 spin_lock_irqsave(&port->lock, flags);
47732 if (!tty_hung_up_p(filp))
47733- port->count++;
47734+ atomic_inc(&port->count);
47735 port->blocked_open--;
47736 if (retval == 0)
47737 port->flags |= ASYNC_NORMAL_ACTIVE;
47738@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
47739 return 0;
47740 }
47741
47742- if (tty->count == 1 && port->count != 1) {
47743+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
47744 printk(KERN_WARNING
47745 "tty_port_close_start: tty->count = 1 port count = %d.\n",
47746- port->count);
47747- port->count = 1;
47748+ atomic_read(&port->count));
47749+ atomic_set(&port->count, 1);
47750 }
47751- if (--port->count < 0) {
47752+ if (atomic_dec_return(&port->count) < 0) {
47753 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
47754- port->count);
47755- port->count = 0;
47756+ atomic_read(&port->count));
47757+ atomic_set(&port->count, 0);
47758 }
47759
47760- if (port->count) {
47761+ if (atomic_read(&port->count)) {
47762 spin_unlock_irqrestore(&port->lock, flags);
47763 if (port->ops->drop)
47764 port->ops->drop(port);
47765@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
47766 {
47767 spin_lock_irq(&port->lock);
47768 if (!tty_hung_up_p(filp))
47769- ++port->count;
47770+ atomic_inc(&port->count);
47771 spin_unlock_irq(&port->lock);
47772 tty_port_tty_set(port, tty);
47773
47774diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
47775index a9af1b9a..1e08e7f 100644
47776--- a/drivers/tty/vt/keyboard.c
47777+++ b/drivers/tty/vt/keyboard.c
47778@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
47779 kbd->kbdmode == VC_OFF) &&
47780 value != KVAL(K_SAK))
47781 return; /* SAK is allowed even in raw mode */
47782+
47783+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47784+ {
47785+ void *func = fn_handler[value];
47786+ if (func == fn_show_state || func == fn_show_ptregs ||
47787+ func == fn_show_mem)
47788+ return;
47789+ }
47790+#endif
47791+
47792 fn_handler[value](vc);
47793 }
47794
47795@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
47796 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
47797 return -EFAULT;
47798
47799- if (!capable(CAP_SYS_TTY_CONFIG))
47800- perm = 0;
47801-
47802 switch (cmd) {
47803 case KDGKBENT:
47804 /* Ensure another thread doesn't free it under us */
47805@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
47806 spin_unlock_irqrestore(&kbd_event_lock, flags);
47807 return put_user(val, &user_kbe->kb_value);
47808 case KDSKBENT:
47809+ if (!capable(CAP_SYS_TTY_CONFIG))
47810+ perm = 0;
47811+
47812 if (!perm)
47813 return -EPERM;
47814 if (!i && v == K_NOSUCHMAP) {
47815@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
47816 int i, j, k;
47817 int ret;
47818
47819- if (!capable(CAP_SYS_TTY_CONFIG))
47820- perm = 0;
47821-
47822 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
47823 if (!kbs) {
47824 ret = -ENOMEM;
47825@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
47826 kfree(kbs);
47827 return ((p && *p) ? -EOVERFLOW : 0);
47828 case KDSKBSENT:
47829+ if (!capable(CAP_SYS_TTY_CONFIG))
47830+ perm = 0;
47831+
47832 if (!perm) {
47833 ret = -EPERM;
47834 goto reterr;
47835diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
47836index b645c47..a55c182 100644
47837--- a/drivers/uio/uio.c
47838+++ b/drivers/uio/uio.c
47839@@ -25,6 +25,7 @@
47840 #include <linux/kobject.h>
47841 #include <linux/cdev.h>
47842 #include <linux/uio_driver.h>
47843+#include <asm/local.h>
47844
47845 #define UIO_MAX_DEVICES (1U << MINORBITS)
47846
47847@@ -32,10 +33,10 @@ struct uio_device {
47848 struct module *owner;
47849 struct device *dev;
47850 int minor;
47851- atomic_t event;
47852+ atomic_unchecked_t event;
47853 struct fasync_struct *async_queue;
47854 wait_queue_head_t wait;
47855- int vma_count;
47856+ local_t vma_count;
47857 struct uio_info *info;
47858 struct kobject *map_dir;
47859 struct kobject *portio_dir;
47860@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
47861 struct device_attribute *attr, char *buf)
47862 {
47863 struct uio_device *idev = dev_get_drvdata(dev);
47864- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
47865+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
47866 }
47867
47868 static struct device_attribute uio_class_attributes[] = {
47869@@ -398,7 +399,7 @@ void uio_event_notify(struct uio_info *info)
47870 {
47871 struct uio_device *idev = info->uio_dev;
47872
47873- atomic_inc(&idev->event);
47874+ atomic_inc_unchecked(&idev->event);
47875 wake_up_interruptible(&idev->wait);
47876 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
47877 }
47878@@ -451,7 +452,7 @@ static int uio_open(struct inode *inode, struct file *filep)
47879 }
47880
47881 listener->dev = idev;
47882- listener->event_count = atomic_read(&idev->event);
47883+ listener->event_count = atomic_read_unchecked(&idev->event);
47884 filep->private_data = listener;
47885
47886 if (idev->info->open) {
47887@@ -502,7 +503,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
47888 return -EIO;
47889
47890 poll_wait(filep, &idev->wait, wait);
47891- if (listener->event_count != atomic_read(&idev->event))
47892+ if (listener->event_count != atomic_read_unchecked(&idev->event))
47893 return POLLIN | POLLRDNORM;
47894 return 0;
47895 }
47896@@ -527,7 +528,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
47897 do {
47898 set_current_state(TASK_INTERRUPTIBLE);
47899
47900- event_count = atomic_read(&idev->event);
47901+ event_count = atomic_read_unchecked(&idev->event);
47902 if (event_count != listener->event_count) {
47903 if (copy_to_user(buf, &event_count, count))
47904 retval = -EFAULT;
47905@@ -596,13 +597,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
47906 static void uio_vma_open(struct vm_area_struct *vma)
47907 {
47908 struct uio_device *idev = vma->vm_private_data;
47909- idev->vma_count++;
47910+ local_inc(&idev->vma_count);
47911 }
47912
47913 static void uio_vma_close(struct vm_area_struct *vma)
47914 {
47915 struct uio_device *idev = vma->vm_private_data;
47916- idev->vma_count--;
47917+ local_dec(&idev->vma_count);
47918 }
47919
47920 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
47921@@ -809,7 +810,7 @@ int __uio_register_device(struct module *owner,
47922 idev->owner = owner;
47923 idev->info = info;
47924 init_waitqueue_head(&idev->wait);
47925- atomic_set(&idev->event, 0);
47926+ atomic_set_unchecked(&idev->event, 0);
47927
47928 ret = uio_get_minor(idev);
47929 if (ret)
47930diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
47931index 8a7eb77..c00402f 100644
47932--- a/drivers/usb/atm/cxacru.c
47933+++ b/drivers/usb/atm/cxacru.c
47934@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
47935 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
47936 if (ret < 2)
47937 return -EINVAL;
47938- if (index < 0 || index > 0x7f)
47939+ if (index > 0x7f)
47940 return -EINVAL;
47941 pos += tmp;
47942
47943diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
47944index d3527dd..26effa2 100644
47945--- a/drivers/usb/atm/usbatm.c
47946+++ b/drivers/usb/atm/usbatm.c
47947@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47948 if (printk_ratelimit())
47949 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
47950 __func__, vpi, vci);
47951- atomic_inc(&vcc->stats->rx_err);
47952+ atomic_inc_unchecked(&vcc->stats->rx_err);
47953 return;
47954 }
47955
47956@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47957 if (length > ATM_MAX_AAL5_PDU) {
47958 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
47959 __func__, length, vcc);
47960- atomic_inc(&vcc->stats->rx_err);
47961+ atomic_inc_unchecked(&vcc->stats->rx_err);
47962 goto out;
47963 }
47964
47965@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47966 if (sarb->len < pdu_length) {
47967 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
47968 __func__, pdu_length, sarb->len, vcc);
47969- atomic_inc(&vcc->stats->rx_err);
47970+ atomic_inc_unchecked(&vcc->stats->rx_err);
47971 goto out;
47972 }
47973
47974 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
47975 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
47976 __func__, vcc);
47977- atomic_inc(&vcc->stats->rx_err);
47978+ atomic_inc_unchecked(&vcc->stats->rx_err);
47979 goto out;
47980 }
47981
47982@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47983 if (printk_ratelimit())
47984 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
47985 __func__, length);
47986- atomic_inc(&vcc->stats->rx_drop);
47987+ atomic_inc_unchecked(&vcc->stats->rx_drop);
47988 goto out;
47989 }
47990
47991@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
47992
47993 vcc->push(vcc, skb);
47994
47995- atomic_inc(&vcc->stats->rx);
47996+ atomic_inc_unchecked(&vcc->stats->rx);
47997 out:
47998 skb_trim(sarb, 0);
47999 }
48000@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
48001 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
48002
48003 usbatm_pop(vcc, skb);
48004- atomic_inc(&vcc->stats->tx);
48005+ atomic_inc_unchecked(&vcc->stats->tx);
48006
48007 skb = skb_dequeue(&instance->sndqueue);
48008 }
48009@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
48010 if (!left--)
48011 return sprintf(page,
48012 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
48013- atomic_read(&atm_dev->stats.aal5.tx),
48014- atomic_read(&atm_dev->stats.aal5.tx_err),
48015- atomic_read(&atm_dev->stats.aal5.rx),
48016- atomic_read(&atm_dev->stats.aal5.rx_err),
48017- atomic_read(&atm_dev->stats.aal5.rx_drop));
48018+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
48019+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
48020+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
48021+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
48022+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
48023
48024 if (!left--) {
48025 if (instance->disconnected)
48026diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
48027index 2a3bbdf..91d72cf 100644
48028--- a/drivers/usb/core/devices.c
48029+++ b/drivers/usb/core/devices.c
48030@@ -126,7 +126,7 @@ static const char format_endpt[] =
48031 * time it gets called.
48032 */
48033 static struct device_connect_event {
48034- atomic_t count;
48035+ atomic_unchecked_t count;
48036 wait_queue_head_t wait;
48037 } device_event = {
48038 .count = ATOMIC_INIT(1),
48039@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
48040
48041 void usbfs_conn_disc_event(void)
48042 {
48043- atomic_add(2, &device_event.count);
48044+ atomic_add_unchecked(2, &device_event.count);
48045 wake_up(&device_event.wait);
48046 }
48047
48048@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
48049
48050 poll_wait(file, &device_event.wait, wait);
48051
48052- event_count = atomic_read(&device_event.count);
48053+ event_count = atomic_read_unchecked(&device_event.count);
48054 if (file->f_version != event_count) {
48055 file->f_version = event_count;
48056 return POLLIN | POLLRDNORM;
48057diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
48058index d53547d..6a22d02 100644
48059--- a/drivers/usb/core/hcd.c
48060+++ b/drivers/usb/core/hcd.c
48061@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
48062 */
48063 usb_get_urb(urb);
48064 atomic_inc(&urb->use_count);
48065- atomic_inc(&urb->dev->urbnum);
48066+ atomic_inc_unchecked(&urb->dev->urbnum);
48067 usbmon_urb_submit(&hcd->self, urb);
48068
48069 /* NOTE requirements on root-hub callers (usbfs and the hub
48070@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
48071 urb->hcpriv = NULL;
48072 INIT_LIST_HEAD(&urb->urb_list);
48073 atomic_dec(&urb->use_count);
48074- atomic_dec(&urb->dev->urbnum);
48075+ atomic_dec_unchecked(&urb->dev->urbnum);
48076 if (atomic_read(&urb->reject))
48077 wake_up(&usb_kill_urb_queue);
48078 usb_put_urb(urb);
48079diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
48080index da2905a..834a569 100644
48081--- a/drivers/usb/core/hub.c
48082+++ b/drivers/usb/core/hub.c
48083@@ -27,6 +27,7 @@
48084 #include <linux/freezer.h>
48085 #include <linux/random.h>
48086 #include <linux/pm_qos.h>
48087+#include <linux/grsecurity.h>
48088
48089 #include <asm/uaccess.h>
48090 #include <asm/byteorder.h>
48091@@ -4424,6 +4425,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
48092 goto done;
48093 return;
48094 }
48095+
48096+ if (gr_handle_new_usb())
48097+ goto done;
48098+
48099 if (hub_is_superspeed(hub->hdev))
48100 unit_load = 150;
48101 else
48102diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
48103index 444d30e..f15c850 100644
48104--- a/drivers/usb/core/message.c
48105+++ b/drivers/usb/core/message.c
48106@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
48107 * method can wait for it to complete. Since you don't have a handle on the
48108 * URB used, you can't cancel the request.
48109 */
48110-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
48111+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
48112 __u8 requesttype, __u16 value, __u16 index, void *data,
48113 __u16 size, int timeout)
48114 {
48115diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
48116index aa38db4..0a08682 100644
48117--- a/drivers/usb/core/sysfs.c
48118+++ b/drivers/usb/core/sysfs.c
48119@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
48120 struct usb_device *udev;
48121
48122 udev = to_usb_device(dev);
48123- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
48124+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
48125 }
48126 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
48127
48128diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
48129index b10da72..43aa0b2 100644
48130--- a/drivers/usb/core/usb.c
48131+++ b/drivers/usb/core/usb.c
48132@@ -389,7 +389,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
48133 set_dev_node(&dev->dev, dev_to_node(bus->controller));
48134 dev->state = USB_STATE_ATTACHED;
48135 dev->lpm_disable_count = 1;
48136- atomic_set(&dev->urbnum, 0);
48137+ atomic_set_unchecked(&dev->urbnum, 0);
48138
48139 INIT_LIST_HEAD(&dev->ep0.urb_list);
48140 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
48141diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
48142index f77083f..f3e2e34 100644
48143--- a/drivers/usb/dwc3/gadget.c
48144+++ b/drivers/usb/dwc3/gadget.c
48145@@ -550,8 +550,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
48146 if (!usb_endpoint_xfer_isoc(desc))
48147 return 0;
48148
48149- memset(&trb_link, 0, sizeof(trb_link));
48150-
48151 /* Link TRB for ISOC. The HWO bit is never reset */
48152 trb_st_hw = &dep->trb_pool[0];
48153
48154diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
48155index 5e29dde..eca992f 100644
48156--- a/drivers/usb/early/ehci-dbgp.c
48157+++ b/drivers/usb/early/ehci-dbgp.c
48158@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
48159
48160 #ifdef CONFIG_KGDB
48161 static struct kgdb_io kgdbdbgp_io_ops;
48162-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
48163+static struct kgdb_io kgdbdbgp_io_ops_console;
48164+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
48165 #else
48166 #define dbgp_kgdb_mode (0)
48167 #endif
48168@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
48169 .write_char = kgdbdbgp_write_char,
48170 };
48171
48172+static struct kgdb_io kgdbdbgp_io_ops_console = {
48173+ .name = "kgdbdbgp",
48174+ .read_char = kgdbdbgp_read_char,
48175+ .write_char = kgdbdbgp_write_char,
48176+ .is_console = 1
48177+};
48178+
48179 static int kgdbdbgp_wait_time;
48180
48181 static int __init kgdbdbgp_parse_config(char *str)
48182@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
48183 ptr++;
48184 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
48185 }
48186- kgdb_register_io_module(&kgdbdbgp_io_ops);
48187- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
48188+ if (early_dbgp_console.index != -1)
48189+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
48190+ else
48191+ kgdb_register_io_module(&kgdbdbgp_io_ops);
48192
48193 return 0;
48194 }
48195diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
48196index b369292..9f3ba40 100644
48197--- a/drivers/usb/gadget/u_serial.c
48198+++ b/drivers/usb/gadget/u_serial.c
48199@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
48200 spin_lock_irq(&port->port_lock);
48201
48202 /* already open? Great. */
48203- if (port->port.count) {
48204+ if (atomic_read(&port->port.count)) {
48205 status = 0;
48206- port->port.count++;
48207+ atomic_inc(&port->port.count);
48208
48209 /* currently opening/closing? wait ... */
48210 } else if (port->openclose) {
48211@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
48212 tty->driver_data = port;
48213 port->port.tty = tty;
48214
48215- port->port.count = 1;
48216+ atomic_set(&port->port.count, 1);
48217 port->openclose = false;
48218
48219 /* if connected, start the I/O stream */
48220@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
48221
48222 spin_lock_irq(&port->port_lock);
48223
48224- if (port->port.count != 1) {
48225- if (port->port.count == 0)
48226+ if (atomic_read(&port->port.count) != 1) {
48227+ if (atomic_read(&port->port.count) == 0)
48228 WARN_ON(1);
48229 else
48230- --port->port.count;
48231+ atomic_dec(&port->port.count);
48232 goto exit;
48233 }
48234
48235@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
48236 * and sleep if necessary
48237 */
48238 port->openclose = true;
48239- port->port.count = 0;
48240+ atomic_set(&port->port.count, 0);
48241
48242 gser = port->port_usb;
48243 if (gser && gser->disconnect)
48244@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
48245 int cond;
48246
48247 spin_lock_irq(&port->port_lock);
48248- cond = (port->port.count == 0) && !port->openclose;
48249+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
48250 spin_unlock_irq(&port->port_lock);
48251 return cond;
48252 }
48253@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
48254 /* if it's already open, start I/O ... and notify the serial
48255 * protocol about open/close status (connect/disconnect).
48256 */
48257- if (port->port.count) {
48258+ if (atomic_read(&port->port.count)) {
48259 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
48260 gs_start_io(port);
48261 if (gser->connect)
48262@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
48263
48264 port->port_usb = NULL;
48265 gser->ioport = NULL;
48266- if (port->port.count > 0 || port->openclose) {
48267+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
48268 wake_up_interruptible(&port->drain_wait);
48269 if (port->port.tty)
48270 tty_hangup(port->port.tty);
48271@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
48272
48273 /* finally, free any unused/unusable I/O buffers */
48274 spin_lock_irqsave(&port->port_lock, flags);
48275- if (port->port.count == 0 && !port->openclose)
48276+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
48277 gs_buf_free(&port->port_write_buf);
48278 gs_free_requests(gser->out, &port->read_pool, NULL);
48279 gs_free_requests(gser->out, &port->read_queue, NULL);
48280diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
48281index 5f3bcd3..bfca43f 100644
48282--- a/drivers/usb/serial/console.c
48283+++ b/drivers/usb/serial/console.c
48284@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
48285
48286 info->port = port;
48287
48288- ++port->port.count;
48289+ atomic_inc(&port->port.count);
48290 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
48291 if (serial->type->set_termios) {
48292 /*
48293@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
48294 }
48295 /* Now that any required fake tty operations are completed restore
48296 * the tty port count */
48297- --port->port.count;
48298+ atomic_dec(&port->port.count);
48299 /* The console is special in terms of closing the device so
48300 * indicate this port is now acting as a system console. */
48301 port->port.console = 1;
48302@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
48303 free_tty:
48304 kfree(tty);
48305 reset_open_count:
48306- port->port.count = 0;
48307+ atomic_set(&port->port.count, 0);
48308 usb_autopm_put_interface(serial->interface);
48309 error_get_interface:
48310 usb_serial_put(serial);
48311diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
48312index 75f70f0..d467e1a 100644
48313--- a/drivers/usb/storage/usb.h
48314+++ b/drivers/usb/storage/usb.h
48315@@ -63,7 +63,7 @@ struct us_unusual_dev {
48316 __u8 useProtocol;
48317 __u8 useTransport;
48318 int (*initFunction)(struct us_data *);
48319-};
48320+} __do_const;
48321
48322
48323 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
48324diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
48325index d6bea3e..60b250e 100644
48326--- a/drivers/usb/wusbcore/wa-hc.h
48327+++ b/drivers/usb/wusbcore/wa-hc.h
48328@@ -192,7 +192,7 @@ struct wahc {
48329 struct list_head xfer_delayed_list;
48330 spinlock_t xfer_list_lock;
48331 struct work_struct xfer_work;
48332- atomic_t xfer_id_count;
48333+ atomic_unchecked_t xfer_id_count;
48334 };
48335
48336
48337@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
48338 INIT_LIST_HEAD(&wa->xfer_delayed_list);
48339 spin_lock_init(&wa->xfer_list_lock);
48340 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
48341- atomic_set(&wa->xfer_id_count, 1);
48342+ atomic_set_unchecked(&wa->xfer_id_count, 1);
48343 }
48344
48345 /**
48346diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
48347index 028fc83..65bb105 100644
48348--- a/drivers/usb/wusbcore/wa-xfer.c
48349+++ b/drivers/usb/wusbcore/wa-xfer.c
48350@@ -296,7 +296,7 @@ out:
48351 */
48352 static void wa_xfer_id_init(struct wa_xfer *xfer)
48353 {
48354- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
48355+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
48356 }
48357
48358 /*
48359diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
48360index 5174eba..86e764a 100644
48361--- a/drivers/vhost/vringh.c
48362+++ b/drivers/vhost/vringh.c
48363@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
48364
48365 static inline int putu16_kern(u16 *p, u16 val)
48366 {
48367- ACCESS_ONCE(*p) = val;
48368+ ACCESS_ONCE_RW(*p) = val;
48369 return 0;
48370 }
48371
48372diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
48373index 8c55011..eed4ae1a 100644
48374--- a/drivers/video/aty/aty128fb.c
48375+++ b/drivers/video/aty/aty128fb.c
48376@@ -149,7 +149,7 @@ enum {
48377 };
48378
48379 /* Must match above enum */
48380-static char * const r128_family[] = {
48381+static const char * const r128_family[] = {
48382 "AGP",
48383 "PCI",
48384 "PRO AGP",
48385diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
48386index 4f27fdc..d3537e6 100644
48387--- a/drivers/video/aty/atyfb_base.c
48388+++ b/drivers/video/aty/atyfb_base.c
48389@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
48390 par->accel_flags = var->accel_flags; /* hack */
48391
48392 if (var->accel_flags) {
48393- info->fbops->fb_sync = atyfb_sync;
48394+ pax_open_kernel();
48395+ *(void **)&info->fbops->fb_sync = atyfb_sync;
48396+ pax_close_kernel();
48397 info->flags &= ~FBINFO_HWACCEL_DISABLED;
48398 } else {
48399- info->fbops->fb_sync = NULL;
48400+ pax_open_kernel();
48401+ *(void **)&info->fbops->fb_sync = NULL;
48402+ pax_close_kernel();
48403 info->flags |= FBINFO_HWACCEL_DISABLED;
48404 }
48405
48406diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
48407index 95ec042..e6affdd 100644
48408--- a/drivers/video/aty/mach64_cursor.c
48409+++ b/drivers/video/aty/mach64_cursor.c
48410@@ -7,6 +7,7 @@
48411 #include <linux/string.h>
48412
48413 #include <asm/io.h>
48414+#include <asm/pgtable.h>
48415
48416 #ifdef __sparc__
48417 #include <asm/fbio.h>
48418@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
48419 info->sprite.buf_align = 16; /* and 64 lines tall. */
48420 info->sprite.flags = FB_PIXMAP_IO;
48421
48422- info->fbops->fb_cursor = atyfb_cursor;
48423+ pax_open_kernel();
48424+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
48425+ pax_close_kernel();
48426
48427 return 0;
48428 }
48429diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
48430index c74e7aa..e3c2790 100644
48431--- a/drivers/video/backlight/backlight.c
48432+++ b/drivers/video/backlight/backlight.c
48433@@ -304,7 +304,7 @@ struct backlight_device *backlight_device_register(const char *name,
48434 new_bd->dev.class = backlight_class;
48435 new_bd->dev.parent = parent;
48436 new_bd->dev.release = bl_device_release;
48437- dev_set_name(&new_bd->dev, name);
48438+ dev_set_name(&new_bd->dev, "%s", name);
48439 dev_set_drvdata(&new_bd->dev, devdata);
48440
48441 /* Set default properties */
48442diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
48443index bca6ccc..252107e 100644
48444--- a/drivers/video/backlight/kb3886_bl.c
48445+++ b/drivers/video/backlight/kb3886_bl.c
48446@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
48447 static unsigned long kb3886bl_flags;
48448 #define KB3886BL_SUSPENDED 0x01
48449
48450-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
48451+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
48452 {
48453 .ident = "Sahara Touch-iT",
48454 .matches = {
48455diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
48456index 34fb6bd..3649fd9 100644
48457--- a/drivers/video/backlight/lcd.c
48458+++ b/drivers/video/backlight/lcd.c
48459@@ -219,7 +219,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
48460 new_ld->dev.class = lcd_class;
48461 new_ld->dev.parent = parent;
48462 new_ld->dev.release = lcd_device_release;
48463- dev_set_name(&new_ld->dev, name);
48464+ dev_set_name(&new_ld->dev, "%s", name);
48465 dev_set_drvdata(&new_ld->dev, devdata);
48466
48467 rc = device_register(&new_ld->dev);
48468diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
48469index 900aa4e..6d49418 100644
48470--- a/drivers/video/fb_defio.c
48471+++ b/drivers/video/fb_defio.c
48472@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
48473
48474 BUG_ON(!fbdefio);
48475 mutex_init(&fbdefio->lock);
48476- info->fbops->fb_mmap = fb_deferred_io_mmap;
48477+ pax_open_kernel();
48478+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
48479+ pax_close_kernel();
48480 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
48481 INIT_LIST_HEAD(&fbdefio->pagelist);
48482 if (fbdefio->delay == 0) /* set a default of 1 s */
48483@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
48484 page->mapping = NULL;
48485 }
48486
48487- info->fbops->fb_mmap = NULL;
48488+ *(void **)&info->fbops->fb_mmap = NULL;
48489 mutex_destroy(&fbdefio->lock);
48490 }
48491 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
48492diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
48493index 5c3960d..15cf8fc 100644
48494--- a/drivers/video/fbcmap.c
48495+++ b/drivers/video/fbcmap.c
48496@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
48497 rc = -ENODEV;
48498 goto out;
48499 }
48500- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
48501- !info->fbops->fb_setcmap)) {
48502+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
48503 rc = -EINVAL;
48504 goto out1;
48505 }
48506diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
48507index 098bfc6..796841d 100644
48508--- a/drivers/video/fbmem.c
48509+++ b/drivers/video/fbmem.c
48510@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
48511 image->dx += image->width + 8;
48512 }
48513 } else if (rotate == FB_ROTATE_UD) {
48514- for (x = 0; x < num && image->dx >= 0; x++) {
48515+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
48516 info->fbops->fb_imageblit(info, image);
48517 image->dx -= image->width + 8;
48518 }
48519@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
48520 image->dy += image->height + 8;
48521 }
48522 } else if (rotate == FB_ROTATE_CCW) {
48523- for (x = 0; x < num && image->dy >= 0; x++) {
48524+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
48525 info->fbops->fb_imageblit(info, image);
48526 image->dy -= image->height + 8;
48527 }
48528@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
48529 return -EFAULT;
48530 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
48531 return -EINVAL;
48532- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
48533+ if (con2fb.framebuffer >= FB_MAX)
48534 return -EINVAL;
48535 if (!registered_fb[con2fb.framebuffer])
48536 request_module("fb%d", con2fb.framebuffer);
48537diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
48538index 7672d2e..b56437f 100644
48539--- a/drivers/video/i810/i810_accel.c
48540+++ b/drivers/video/i810/i810_accel.c
48541@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
48542 }
48543 }
48544 printk("ringbuffer lockup!!!\n");
48545+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
48546 i810_report_error(mmio);
48547 par->dev_flags |= LOCKUP;
48548 info->pixmap.scan_align = 1;
48549diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
48550index 3c14e43..eafa544 100644
48551--- a/drivers/video/logo/logo_linux_clut224.ppm
48552+++ b/drivers/video/logo/logo_linux_clut224.ppm
48553@@ -1,1604 +1,1123 @@
48554 P3
48555-# Standard 224-color Linux logo
48556 80 80
48557 255
48558- 0 0 0 0 0 0 0 0 0 0 0 0
48559- 0 0 0 0 0 0 0 0 0 0 0 0
48560- 0 0 0 0 0 0 0 0 0 0 0 0
48561- 0 0 0 0 0 0 0 0 0 0 0 0
48562- 0 0 0 0 0 0 0 0 0 0 0 0
48563- 0 0 0 0 0 0 0 0 0 0 0 0
48564- 0 0 0 0 0 0 0 0 0 0 0 0
48565- 0 0 0 0 0 0 0 0 0 0 0 0
48566- 0 0 0 0 0 0 0 0 0 0 0 0
48567- 6 6 6 6 6 6 10 10 10 10 10 10
48568- 10 10 10 6 6 6 6 6 6 6 6 6
48569- 0 0 0 0 0 0 0 0 0 0 0 0
48570- 0 0 0 0 0 0 0 0 0 0 0 0
48571- 0 0 0 0 0 0 0 0 0 0 0 0
48572- 0 0 0 0 0 0 0 0 0 0 0 0
48573- 0 0 0 0 0 0 0 0 0 0 0 0
48574- 0 0 0 0 0 0 0 0 0 0 0 0
48575- 0 0 0 0 0 0 0 0 0 0 0 0
48576- 0 0 0 0 0 0 0 0 0 0 0 0
48577- 0 0 0 0 0 0 0 0 0 0 0 0
48578- 0 0 0 0 0 0 0 0 0 0 0 0
48579- 0 0 0 0 0 0 0 0 0 0 0 0
48580- 0 0 0 0 0 0 0 0 0 0 0 0
48581- 0 0 0 0 0 0 0 0 0 0 0 0
48582- 0 0 0 0 0 0 0 0 0 0 0 0
48583- 0 0 0 0 0 0 0 0 0 0 0 0
48584- 0 0 0 0 0 0 0 0 0 0 0 0
48585- 0 0 0 0 0 0 0 0 0 0 0 0
48586- 0 0 0 6 6 6 10 10 10 14 14 14
48587- 22 22 22 26 26 26 30 30 30 34 34 34
48588- 30 30 30 30 30 30 26 26 26 18 18 18
48589- 14 14 14 10 10 10 6 6 6 0 0 0
48590- 0 0 0 0 0 0 0 0 0 0 0 0
48591- 0 0 0 0 0 0 0 0 0 0 0 0
48592- 0 0 0 0 0 0 0 0 0 0 0 0
48593- 0 0 0 0 0 0 0 0 0 0 0 0
48594- 0 0 0 0 0 0 0 0 0 0 0 0
48595- 0 0 0 0 0 0 0 0 0 0 0 0
48596- 0 0 0 0 0 0 0 0 0 0 0 0
48597- 0 0 0 0 0 0 0 0 0 0 0 0
48598- 0 0 0 0 0 0 0 0 0 0 0 0
48599- 0 0 0 0 0 1 0 0 1 0 0 0
48600- 0 0 0 0 0 0 0 0 0 0 0 0
48601- 0 0 0 0 0 0 0 0 0 0 0 0
48602- 0 0 0 0 0 0 0 0 0 0 0 0
48603- 0 0 0 0 0 0 0 0 0 0 0 0
48604- 0 0 0 0 0 0 0 0 0 0 0 0
48605- 0 0 0 0 0 0 0 0 0 0 0 0
48606- 6 6 6 14 14 14 26 26 26 42 42 42
48607- 54 54 54 66 66 66 78 78 78 78 78 78
48608- 78 78 78 74 74 74 66 66 66 54 54 54
48609- 42 42 42 26 26 26 18 18 18 10 10 10
48610- 6 6 6 0 0 0 0 0 0 0 0 0
48611- 0 0 0 0 0 0 0 0 0 0 0 0
48612- 0 0 0 0 0 0 0 0 0 0 0 0
48613- 0 0 0 0 0 0 0 0 0 0 0 0
48614- 0 0 0 0 0 0 0 0 0 0 0 0
48615- 0 0 0 0 0 0 0 0 0 0 0 0
48616- 0 0 0 0 0 0 0 0 0 0 0 0
48617- 0 0 0 0 0 0 0 0 0 0 0 0
48618- 0 0 0 0 0 0 0 0 0 0 0 0
48619- 0 0 1 0 0 0 0 0 0 0 0 0
48620- 0 0 0 0 0 0 0 0 0 0 0 0
48621- 0 0 0 0 0 0 0 0 0 0 0 0
48622- 0 0 0 0 0 0 0 0 0 0 0 0
48623- 0 0 0 0 0 0 0 0 0 0 0 0
48624- 0 0 0 0 0 0 0 0 0 0 0 0
48625- 0 0 0 0 0 0 0 0 0 10 10 10
48626- 22 22 22 42 42 42 66 66 66 86 86 86
48627- 66 66 66 38 38 38 38 38 38 22 22 22
48628- 26 26 26 34 34 34 54 54 54 66 66 66
48629- 86 86 86 70 70 70 46 46 46 26 26 26
48630- 14 14 14 6 6 6 0 0 0 0 0 0
48631- 0 0 0 0 0 0 0 0 0 0 0 0
48632- 0 0 0 0 0 0 0 0 0 0 0 0
48633- 0 0 0 0 0 0 0 0 0 0 0 0
48634- 0 0 0 0 0 0 0 0 0 0 0 0
48635- 0 0 0 0 0 0 0 0 0 0 0 0
48636- 0 0 0 0 0 0 0 0 0 0 0 0
48637- 0 0 0 0 0 0 0 0 0 0 0 0
48638- 0 0 0 0 0 0 0 0 0 0 0 0
48639- 0 0 1 0 0 1 0 0 1 0 0 0
48640- 0 0 0 0 0 0 0 0 0 0 0 0
48641- 0 0 0 0 0 0 0 0 0 0 0 0
48642- 0 0 0 0 0 0 0 0 0 0 0 0
48643- 0 0 0 0 0 0 0 0 0 0 0 0
48644- 0 0 0 0 0 0 0 0 0 0 0 0
48645- 0 0 0 0 0 0 10 10 10 26 26 26
48646- 50 50 50 82 82 82 58 58 58 6 6 6
48647- 2 2 6 2 2 6 2 2 6 2 2 6
48648- 2 2 6 2 2 6 2 2 6 2 2 6
48649- 6 6 6 54 54 54 86 86 86 66 66 66
48650- 38 38 38 18 18 18 6 6 6 0 0 0
48651- 0 0 0 0 0 0 0 0 0 0 0 0
48652- 0 0 0 0 0 0 0 0 0 0 0 0
48653- 0 0 0 0 0 0 0 0 0 0 0 0
48654- 0 0 0 0 0 0 0 0 0 0 0 0
48655- 0 0 0 0 0 0 0 0 0 0 0 0
48656- 0 0 0 0 0 0 0 0 0 0 0 0
48657- 0 0 0 0 0 0 0 0 0 0 0 0
48658- 0 0 0 0 0 0 0 0 0 0 0 0
48659- 0 0 0 0 0 0 0 0 0 0 0 0
48660- 0 0 0 0 0 0 0 0 0 0 0 0
48661- 0 0 0 0 0 0 0 0 0 0 0 0
48662- 0 0 0 0 0 0 0 0 0 0 0 0
48663- 0 0 0 0 0 0 0 0 0 0 0 0
48664- 0 0 0 0 0 0 0 0 0 0 0 0
48665- 0 0 0 6 6 6 22 22 22 50 50 50
48666- 78 78 78 34 34 34 2 2 6 2 2 6
48667- 2 2 6 2 2 6 2 2 6 2 2 6
48668- 2 2 6 2 2 6 2 2 6 2 2 6
48669- 2 2 6 2 2 6 6 6 6 70 70 70
48670- 78 78 78 46 46 46 22 22 22 6 6 6
48671- 0 0 0 0 0 0 0 0 0 0 0 0
48672- 0 0 0 0 0 0 0 0 0 0 0 0
48673- 0 0 0 0 0 0 0 0 0 0 0 0
48674- 0 0 0 0 0 0 0 0 0 0 0 0
48675- 0 0 0 0 0 0 0 0 0 0 0 0
48676- 0 0 0 0 0 0 0 0 0 0 0 0
48677- 0 0 0 0 0 0 0 0 0 0 0 0
48678- 0 0 0 0 0 0 0 0 0 0 0 0
48679- 0 0 1 0 0 1 0 0 1 0 0 0
48680- 0 0 0 0 0 0 0 0 0 0 0 0
48681- 0 0 0 0 0 0 0 0 0 0 0 0
48682- 0 0 0 0 0 0 0 0 0 0 0 0
48683- 0 0 0 0 0 0 0 0 0 0 0 0
48684- 0 0 0 0 0 0 0 0 0 0 0 0
48685- 6 6 6 18 18 18 42 42 42 82 82 82
48686- 26 26 26 2 2 6 2 2 6 2 2 6
48687- 2 2 6 2 2 6 2 2 6 2 2 6
48688- 2 2 6 2 2 6 2 2 6 14 14 14
48689- 46 46 46 34 34 34 6 6 6 2 2 6
48690- 42 42 42 78 78 78 42 42 42 18 18 18
48691- 6 6 6 0 0 0 0 0 0 0 0 0
48692- 0 0 0 0 0 0 0 0 0 0 0 0
48693- 0 0 0 0 0 0 0 0 0 0 0 0
48694- 0 0 0 0 0 0 0 0 0 0 0 0
48695- 0 0 0 0 0 0 0 0 0 0 0 0
48696- 0 0 0 0 0 0 0 0 0 0 0 0
48697- 0 0 0 0 0 0 0 0 0 0 0 0
48698- 0 0 0 0 0 0 0 0 0 0 0 0
48699- 0 0 1 0 0 0 0 0 1 0 0 0
48700- 0 0 0 0 0 0 0 0 0 0 0 0
48701- 0 0 0 0 0 0 0 0 0 0 0 0
48702- 0 0 0 0 0 0 0 0 0 0 0 0
48703- 0 0 0 0 0 0 0 0 0 0 0 0
48704- 0 0 0 0 0 0 0 0 0 0 0 0
48705- 10 10 10 30 30 30 66 66 66 58 58 58
48706- 2 2 6 2 2 6 2 2 6 2 2 6
48707- 2 2 6 2 2 6 2 2 6 2 2 6
48708- 2 2 6 2 2 6 2 2 6 26 26 26
48709- 86 86 86 101 101 101 46 46 46 10 10 10
48710- 2 2 6 58 58 58 70 70 70 34 34 34
48711- 10 10 10 0 0 0 0 0 0 0 0 0
48712- 0 0 0 0 0 0 0 0 0 0 0 0
48713- 0 0 0 0 0 0 0 0 0 0 0 0
48714- 0 0 0 0 0 0 0 0 0 0 0 0
48715- 0 0 0 0 0 0 0 0 0 0 0 0
48716- 0 0 0 0 0 0 0 0 0 0 0 0
48717- 0 0 0 0 0 0 0 0 0 0 0 0
48718- 0 0 0 0 0 0 0 0 0 0 0 0
48719- 0 0 1 0 0 1 0 0 1 0 0 0
48720- 0 0 0 0 0 0 0 0 0 0 0 0
48721- 0 0 0 0 0 0 0 0 0 0 0 0
48722- 0 0 0 0 0 0 0 0 0 0 0 0
48723- 0 0 0 0 0 0 0 0 0 0 0 0
48724- 0 0 0 0 0 0 0 0 0 0 0 0
48725- 14 14 14 42 42 42 86 86 86 10 10 10
48726- 2 2 6 2 2 6 2 2 6 2 2 6
48727- 2 2 6 2 2 6 2 2 6 2 2 6
48728- 2 2 6 2 2 6 2 2 6 30 30 30
48729- 94 94 94 94 94 94 58 58 58 26 26 26
48730- 2 2 6 6 6 6 78 78 78 54 54 54
48731- 22 22 22 6 6 6 0 0 0 0 0 0
48732- 0 0 0 0 0 0 0 0 0 0 0 0
48733- 0 0 0 0 0 0 0 0 0 0 0 0
48734- 0 0 0 0 0 0 0 0 0 0 0 0
48735- 0 0 0 0 0 0 0 0 0 0 0 0
48736- 0 0 0 0 0 0 0 0 0 0 0 0
48737- 0 0 0 0 0 0 0 0 0 0 0 0
48738- 0 0 0 0 0 0 0 0 0 0 0 0
48739- 0 0 0 0 0 0 0 0 0 0 0 0
48740- 0 0 0 0 0 0 0 0 0 0 0 0
48741- 0 0 0 0 0 0 0 0 0 0 0 0
48742- 0 0 0 0 0 0 0 0 0 0 0 0
48743- 0 0 0 0 0 0 0 0 0 0 0 0
48744- 0 0 0 0 0 0 0 0 0 6 6 6
48745- 22 22 22 62 62 62 62 62 62 2 2 6
48746- 2 2 6 2 2 6 2 2 6 2 2 6
48747- 2 2 6 2 2 6 2 2 6 2 2 6
48748- 2 2 6 2 2 6 2 2 6 26 26 26
48749- 54 54 54 38 38 38 18 18 18 10 10 10
48750- 2 2 6 2 2 6 34 34 34 82 82 82
48751- 38 38 38 14 14 14 0 0 0 0 0 0
48752- 0 0 0 0 0 0 0 0 0 0 0 0
48753- 0 0 0 0 0 0 0 0 0 0 0 0
48754- 0 0 0 0 0 0 0 0 0 0 0 0
48755- 0 0 0 0 0 0 0 0 0 0 0 0
48756- 0 0 0 0 0 0 0 0 0 0 0 0
48757- 0 0 0 0 0 0 0 0 0 0 0 0
48758- 0 0 0 0 0 0 0 0 0 0 0 0
48759- 0 0 0 0 0 1 0 0 1 0 0 0
48760- 0 0 0 0 0 0 0 0 0 0 0 0
48761- 0 0 0 0 0 0 0 0 0 0 0 0
48762- 0 0 0 0 0 0 0 0 0 0 0 0
48763- 0 0 0 0 0 0 0 0 0 0 0 0
48764- 0 0 0 0 0 0 0 0 0 6 6 6
48765- 30 30 30 78 78 78 30 30 30 2 2 6
48766- 2 2 6 2 2 6 2 2 6 2 2 6
48767- 2 2 6 2 2 6 2 2 6 2 2 6
48768- 2 2 6 2 2 6 2 2 6 10 10 10
48769- 10 10 10 2 2 6 2 2 6 2 2 6
48770- 2 2 6 2 2 6 2 2 6 78 78 78
48771- 50 50 50 18 18 18 6 6 6 0 0 0
48772- 0 0 0 0 0 0 0 0 0 0 0 0
48773- 0 0 0 0 0 0 0 0 0 0 0 0
48774- 0 0 0 0 0 0 0 0 0 0 0 0
48775- 0 0 0 0 0 0 0 0 0 0 0 0
48776- 0 0 0 0 0 0 0 0 0 0 0 0
48777- 0 0 0 0 0 0 0 0 0 0 0 0
48778- 0 0 0 0 0 0 0 0 0 0 0 0
48779- 0 0 1 0 0 0 0 0 0 0 0 0
48780- 0 0 0 0 0 0 0 0 0 0 0 0
48781- 0 0 0 0 0 0 0 0 0 0 0 0
48782- 0 0 0 0 0 0 0 0 0 0 0 0
48783- 0 0 0 0 0 0 0 0 0 0 0 0
48784- 0 0 0 0 0 0 0 0 0 10 10 10
48785- 38 38 38 86 86 86 14 14 14 2 2 6
48786- 2 2 6 2 2 6 2 2 6 2 2 6
48787- 2 2 6 2 2 6 2 2 6 2 2 6
48788- 2 2 6 2 2 6 2 2 6 2 2 6
48789- 2 2 6 2 2 6 2 2 6 2 2 6
48790- 2 2 6 2 2 6 2 2 6 54 54 54
48791- 66 66 66 26 26 26 6 6 6 0 0 0
48792- 0 0 0 0 0 0 0 0 0 0 0 0
48793- 0 0 0 0 0 0 0 0 0 0 0 0
48794- 0 0 0 0 0 0 0 0 0 0 0 0
48795- 0 0 0 0 0 0 0 0 0 0 0 0
48796- 0 0 0 0 0 0 0 0 0 0 0 0
48797- 0 0 0 0 0 0 0 0 0 0 0 0
48798- 0 0 0 0 0 0 0 0 0 0 0 0
48799- 0 0 0 0 0 1 0 0 1 0 0 0
48800- 0 0 0 0 0 0 0 0 0 0 0 0
48801- 0 0 0 0 0 0 0 0 0 0 0 0
48802- 0 0 0 0 0 0 0 0 0 0 0 0
48803- 0 0 0 0 0 0 0 0 0 0 0 0
48804- 0 0 0 0 0 0 0 0 0 14 14 14
48805- 42 42 42 82 82 82 2 2 6 2 2 6
48806- 2 2 6 6 6 6 10 10 10 2 2 6
48807- 2 2 6 2 2 6 2 2 6 2 2 6
48808- 2 2 6 2 2 6 2 2 6 6 6 6
48809- 14 14 14 10 10 10 2 2 6 2 2 6
48810- 2 2 6 2 2 6 2 2 6 18 18 18
48811- 82 82 82 34 34 34 10 10 10 0 0 0
48812- 0 0 0 0 0 0 0 0 0 0 0 0
48813- 0 0 0 0 0 0 0 0 0 0 0 0
48814- 0 0 0 0 0 0 0 0 0 0 0 0
48815- 0 0 0 0 0 0 0 0 0 0 0 0
48816- 0 0 0 0 0 0 0 0 0 0 0 0
48817- 0 0 0 0 0 0 0 0 0 0 0 0
48818- 0 0 0 0 0 0 0 0 0 0 0 0
48819- 0 0 1 0 0 0 0 0 0 0 0 0
48820- 0 0 0 0 0 0 0 0 0 0 0 0
48821- 0 0 0 0 0 0 0 0 0 0 0 0
48822- 0 0 0 0 0 0 0 0 0 0 0 0
48823- 0 0 0 0 0 0 0 0 0 0 0 0
48824- 0 0 0 0 0 0 0 0 0 14 14 14
48825- 46 46 46 86 86 86 2 2 6 2 2 6
48826- 6 6 6 6 6 6 22 22 22 34 34 34
48827- 6 6 6 2 2 6 2 2 6 2 2 6
48828- 2 2 6 2 2 6 18 18 18 34 34 34
48829- 10 10 10 50 50 50 22 22 22 2 2 6
48830- 2 2 6 2 2 6 2 2 6 10 10 10
48831- 86 86 86 42 42 42 14 14 14 0 0 0
48832- 0 0 0 0 0 0 0 0 0 0 0 0
48833- 0 0 0 0 0 0 0 0 0 0 0 0
48834- 0 0 0 0 0 0 0 0 0 0 0 0
48835- 0 0 0 0 0 0 0 0 0 0 0 0
48836- 0 0 0 0 0 0 0 0 0 0 0 0
48837- 0 0 0 0 0 0 0 0 0 0 0 0
48838- 0 0 0 0 0 0 0 0 0 0 0 0
48839- 0 0 1 0 0 1 0 0 1 0 0 0
48840- 0 0 0 0 0 0 0 0 0 0 0 0
48841- 0 0 0 0 0 0 0 0 0 0 0 0
48842- 0 0 0 0 0 0 0 0 0 0 0 0
48843- 0 0 0 0 0 0 0 0 0 0 0 0
48844- 0 0 0 0 0 0 0 0 0 14 14 14
48845- 46 46 46 86 86 86 2 2 6 2 2 6
48846- 38 38 38 116 116 116 94 94 94 22 22 22
48847- 22 22 22 2 2 6 2 2 6 2 2 6
48848- 14 14 14 86 86 86 138 138 138 162 162 162
48849-154 154 154 38 38 38 26 26 26 6 6 6
48850- 2 2 6 2 2 6 2 2 6 2 2 6
48851- 86 86 86 46 46 46 14 14 14 0 0 0
48852- 0 0 0 0 0 0 0 0 0 0 0 0
48853- 0 0 0 0 0 0 0 0 0 0 0 0
48854- 0 0 0 0 0 0 0 0 0 0 0 0
48855- 0 0 0 0 0 0 0 0 0 0 0 0
48856- 0 0 0 0 0 0 0 0 0 0 0 0
48857- 0 0 0 0 0 0 0 0 0 0 0 0
48858- 0 0 0 0 0 0 0 0 0 0 0 0
48859- 0 0 0 0 0 0 0 0 0 0 0 0
48860- 0 0 0 0 0 0 0 0 0 0 0 0
48861- 0 0 0 0 0 0 0 0 0 0 0 0
48862- 0 0 0 0 0 0 0 0 0 0 0 0
48863- 0 0 0 0 0 0 0 0 0 0 0 0
48864- 0 0 0 0 0 0 0 0 0 14 14 14
48865- 46 46 46 86 86 86 2 2 6 14 14 14
48866-134 134 134 198 198 198 195 195 195 116 116 116
48867- 10 10 10 2 2 6 2 2 6 6 6 6
48868-101 98 89 187 187 187 210 210 210 218 218 218
48869-214 214 214 134 134 134 14 14 14 6 6 6
48870- 2 2 6 2 2 6 2 2 6 2 2 6
48871- 86 86 86 50 50 50 18 18 18 6 6 6
48872- 0 0 0 0 0 0 0 0 0 0 0 0
48873- 0 0 0 0 0 0 0 0 0 0 0 0
48874- 0 0 0 0 0 0 0 0 0 0 0 0
48875- 0 0 0 0 0 0 0 0 0 0 0 0
48876- 0 0 0 0 0 0 0 0 0 0 0 0
48877- 0 0 0 0 0 0 0 0 0 0 0 0
48878- 0 0 0 0 0 0 0 0 1 0 0 0
48879- 0 0 1 0 0 1 0 0 1 0 0 0
48880- 0 0 0 0 0 0 0 0 0 0 0 0
48881- 0 0 0 0 0 0 0 0 0 0 0 0
48882- 0 0 0 0 0 0 0 0 0 0 0 0
48883- 0 0 0 0 0 0 0 0 0 0 0 0
48884- 0 0 0 0 0 0 0 0 0 14 14 14
48885- 46 46 46 86 86 86 2 2 6 54 54 54
48886-218 218 218 195 195 195 226 226 226 246 246 246
48887- 58 58 58 2 2 6 2 2 6 30 30 30
48888-210 210 210 253 253 253 174 174 174 123 123 123
48889-221 221 221 234 234 234 74 74 74 2 2 6
48890- 2 2 6 2 2 6 2 2 6 2 2 6
48891- 70 70 70 58 58 58 22 22 22 6 6 6
48892- 0 0 0 0 0 0 0 0 0 0 0 0
48893- 0 0 0 0 0 0 0 0 0 0 0 0
48894- 0 0 0 0 0 0 0 0 0 0 0 0
48895- 0 0 0 0 0 0 0 0 0 0 0 0
48896- 0 0 0 0 0 0 0 0 0 0 0 0
48897- 0 0 0 0 0 0 0 0 0 0 0 0
48898- 0 0 0 0 0 0 0 0 0 0 0 0
48899- 0 0 0 0 0 0 0 0 0 0 0 0
48900- 0 0 0 0 0 0 0 0 0 0 0 0
48901- 0 0 0 0 0 0 0 0 0 0 0 0
48902- 0 0 0 0 0 0 0 0 0 0 0 0
48903- 0 0 0 0 0 0 0 0 0 0 0 0
48904- 0 0 0 0 0 0 0 0 0 14 14 14
48905- 46 46 46 82 82 82 2 2 6 106 106 106
48906-170 170 170 26 26 26 86 86 86 226 226 226
48907-123 123 123 10 10 10 14 14 14 46 46 46
48908-231 231 231 190 190 190 6 6 6 70 70 70
48909- 90 90 90 238 238 238 158 158 158 2 2 6
48910- 2 2 6 2 2 6 2 2 6 2 2 6
48911- 70 70 70 58 58 58 22 22 22 6 6 6
48912- 0 0 0 0 0 0 0 0 0 0 0 0
48913- 0 0 0 0 0 0 0 0 0 0 0 0
48914- 0 0 0 0 0 0 0 0 0 0 0 0
48915- 0 0 0 0 0 0 0 0 0 0 0 0
48916- 0 0 0 0 0 0 0 0 0 0 0 0
48917- 0 0 0 0 0 0 0 0 0 0 0 0
48918- 0 0 0 0 0 0 0 0 1 0 0 0
48919- 0 0 1 0 0 1 0 0 1 0 0 0
48920- 0 0 0 0 0 0 0 0 0 0 0 0
48921- 0 0 0 0 0 0 0 0 0 0 0 0
48922- 0 0 0 0 0 0 0 0 0 0 0 0
48923- 0 0 0 0 0 0 0 0 0 0 0 0
48924- 0 0 0 0 0 0 0 0 0 14 14 14
48925- 42 42 42 86 86 86 6 6 6 116 116 116
48926-106 106 106 6 6 6 70 70 70 149 149 149
48927-128 128 128 18 18 18 38 38 38 54 54 54
48928-221 221 221 106 106 106 2 2 6 14 14 14
48929- 46 46 46 190 190 190 198 198 198 2 2 6
48930- 2 2 6 2 2 6 2 2 6 2 2 6
48931- 74 74 74 62 62 62 22 22 22 6 6 6
48932- 0 0 0 0 0 0 0 0 0 0 0 0
48933- 0 0 0 0 0 0 0 0 0 0 0 0
48934- 0 0 0 0 0 0 0 0 0 0 0 0
48935- 0 0 0 0 0 0 0 0 0 0 0 0
48936- 0 0 0 0 0 0 0 0 0 0 0 0
48937- 0 0 0 0 0 0 0 0 0 0 0 0
48938- 0 0 0 0 0 0 0 0 1 0 0 0
48939- 0 0 1 0 0 0 0 0 1 0 0 0
48940- 0 0 0 0 0 0 0 0 0 0 0 0
48941- 0 0 0 0 0 0 0 0 0 0 0 0
48942- 0 0 0 0 0 0 0 0 0 0 0 0
48943- 0 0 0 0 0 0 0 0 0 0 0 0
48944- 0 0 0 0 0 0 0 0 0 14 14 14
48945- 42 42 42 94 94 94 14 14 14 101 101 101
48946-128 128 128 2 2 6 18 18 18 116 116 116
48947-118 98 46 121 92 8 121 92 8 98 78 10
48948-162 162 162 106 106 106 2 2 6 2 2 6
48949- 2 2 6 195 195 195 195 195 195 6 6 6
48950- 2 2 6 2 2 6 2 2 6 2 2 6
48951- 74 74 74 62 62 62 22 22 22 6 6 6
48952- 0 0 0 0 0 0 0 0 0 0 0 0
48953- 0 0 0 0 0 0 0 0 0 0 0 0
48954- 0 0 0 0 0 0 0 0 0 0 0 0
48955- 0 0 0 0 0 0 0 0 0 0 0 0
48956- 0 0 0 0 0 0 0 0 0 0 0 0
48957- 0 0 0 0 0 0 0 0 0 0 0 0
48958- 0 0 0 0 0 0 0 0 1 0 0 1
48959- 0 0 1 0 0 0 0 0 1 0 0 0
48960- 0 0 0 0 0 0 0 0 0 0 0 0
48961- 0 0 0 0 0 0 0 0 0 0 0 0
48962- 0 0 0 0 0 0 0 0 0 0 0 0
48963- 0 0 0 0 0 0 0 0 0 0 0 0
48964- 0 0 0 0 0 0 0 0 0 10 10 10
48965- 38 38 38 90 90 90 14 14 14 58 58 58
48966-210 210 210 26 26 26 54 38 6 154 114 10
48967-226 170 11 236 186 11 225 175 15 184 144 12
48968-215 174 15 175 146 61 37 26 9 2 2 6
48969- 70 70 70 246 246 246 138 138 138 2 2 6
48970- 2 2 6 2 2 6 2 2 6 2 2 6
48971- 70 70 70 66 66 66 26 26 26 6 6 6
48972- 0 0 0 0 0 0 0 0 0 0 0 0
48973- 0 0 0 0 0 0 0 0 0 0 0 0
48974- 0 0 0 0 0 0 0 0 0 0 0 0
48975- 0 0 0 0 0 0 0 0 0 0 0 0
48976- 0 0 0 0 0 0 0 0 0 0 0 0
48977- 0 0 0 0 0 0 0 0 0 0 0 0
48978- 0 0 0 0 0 0 0 0 0 0 0 0
48979- 0 0 0 0 0 0 0 0 0 0 0 0
48980- 0 0 0 0 0 0 0 0 0 0 0 0
48981- 0 0 0 0 0 0 0 0 0 0 0 0
48982- 0 0 0 0 0 0 0 0 0 0 0 0
48983- 0 0 0 0 0 0 0 0 0 0 0 0
48984- 0 0 0 0 0 0 0 0 0 10 10 10
48985- 38 38 38 86 86 86 14 14 14 10 10 10
48986-195 195 195 188 164 115 192 133 9 225 175 15
48987-239 182 13 234 190 10 232 195 16 232 200 30
48988-245 207 45 241 208 19 232 195 16 184 144 12
48989-218 194 134 211 206 186 42 42 42 2 2 6
48990- 2 2 6 2 2 6 2 2 6 2 2 6
48991- 50 50 50 74 74 74 30 30 30 6 6 6
48992- 0 0 0 0 0 0 0 0 0 0 0 0
48993- 0 0 0 0 0 0 0 0 0 0 0 0
48994- 0 0 0 0 0 0 0 0 0 0 0 0
48995- 0 0 0 0 0 0 0 0 0 0 0 0
48996- 0 0 0 0 0 0 0 0 0 0 0 0
48997- 0 0 0 0 0 0 0 0 0 0 0 0
48998- 0 0 0 0 0 0 0 0 0 0 0 0
48999- 0 0 0 0 0 0 0 0 0 0 0 0
49000- 0 0 0 0 0 0 0 0 0 0 0 0
49001- 0 0 0 0 0 0 0 0 0 0 0 0
49002- 0 0 0 0 0 0 0 0 0 0 0 0
49003- 0 0 0 0 0 0 0 0 0 0 0 0
49004- 0 0 0 0 0 0 0 0 0 10 10 10
49005- 34 34 34 86 86 86 14 14 14 2 2 6
49006-121 87 25 192 133 9 219 162 10 239 182 13
49007-236 186 11 232 195 16 241 208 19 244 214 54
49008-246 218 60 246 218 38 246 215 20 241 208 19
49009-241 208 19 226 184 13 121 87 25 2 2 6
49010- 2 2 6 2 2 6 2 2 6 2 2 6
49011- 50 50 50 82 82 82 34 34 34 10 10 10
49012- 0 0 0 0 0 0 0 0 0 0 0 0
49013- 0 0 0 0 0 0 0 0 0 0 0 0
49014- 0 0 0 0 0 0 0 0 0 0 0 0
49015- 0 0 0 0 0 0 0 0 0 0 0 0
49016- 0 0 0 0 0 0 0 0 0 0 0 0
49017- 0 0 0 0 0 0 0 0 0 0 0 0
49018- 0 0 0 0 0 0 0 0 0 0 0 0
49019- 0 0 0 0 0 0 0 0 0 0 0 0
49020- 0 0 0 0 0 0 0 0 0 0 0 0
49021- 0 0 0 0 0 0 0 0 0 0 0 0
49022- 0 0 0 0 0 0 0 0 0 0 0 0
49023- 0 0 0 0 0 0 0 0 0 0 0 0
49024- 0 0 0 0 0 0 0 0 0 10 10 10
49025- 34 34 34 82 82 82 30 30 30 61 42 6
49026-180 123 7 206 145 10 230 174 11 239 182 13
49027-234 190 10 238 202 15 241 208 19 246 218 74
49028-246 218 38 246 215 20 246 215 20 246 215 20
49029-226 184 13 215 174 15 184 144 12 6 6 6
49030- 2 2 6 2 2 6 2 2 6 2 2 6
49031- 26 26 26 94 94 94 42 42 42 14 14 14
49032- 0 0 0 0 0 0 0 0 0 0 0 0
49033- 0 0 0 0 0 0 0 0 0 0 0 0
49034- 0 0 0 0 0 0 0 0 0 0 0 0
49035- 0 0 0 0 0 0 0 0 0 0 0 0
49036- 0 0 0 0 0 0 0 0 0 0 0 0
49037- 0 0 0 0 0 0 0 0 0 0 0 0
49038- 0 0 0 0 0 0 0 0 0 0 0 0
49039- 0 0 0 0 0 0 0 0 0 0 0 0
49040- 0 0 0 0 0 0 0 0 0 0 0 0
49041- 0 0 0 0 0 0 0 0 0 0 0 0
49042- 0 0 0 0 0 0 0 0 0 0 0 0
49043- 0 0 0 0 0 0 0 0 0 0 0 0
49044- 0 0 0 0 0 0 0 0 0 10 10 10
49045- 30 30 30 78 78 78 50 50 50 104 69 6
49046-192 133 9 216 158 10 236 178 12 236 186 11
49047-232 195 16 241 208 19 244 214 54 245 215 43
49048-246 215 20 246 215 20 241 208 19 198 155 10
49049-200 144 11 216 158 10 156 118 10 2 2 6
49050- 2 2 6 2 2 6 2 2 6 2 2 6
49051- 6 6 6 90 90 90 54 54 54 18 18 18
49052- 6 6 6 0 0 0 0 0 0 0 0 0
49053- 0 0 0 0 0 0 0 0 0 0 0 0
49054- 0 0 0 0 0 0 0 0 0 0 0 0
49055- 0 0 0 0 0 0 0 0 0 0 0 0
49056- 0 0 0 0 0 0 0 0 0 0 0 0
49057- 0 0 0 0 0 0 0 0 0 0 0 0
49058- 0 0 0 0 0 0 0 0 0 0 0 0
49059- 0 0 0 0 0 0 0 0 0 0 0 0
49060- 0 0 0 0 0 0 0 0 0 0 0 0
49061- 0 0 0 0 0 0 0 0 0 0 0 0
49062- 0 0 0 0 0 0 0 0 0 0 0 0
49063- 0 0 0 0 0 0 0 0 0 0 0 0
49064- 0 0 0 0 0 0 0 0 0 10 10 10
49065- 30 30 30 78 78 78 46 46 46 22 22 22
49066-137 92 6 210 162 10 239 182 13 238 190 10
49067-238 202 15 241 208 19 246 215 20 246 215 20
49068-241 208 19 203 166 17 185 133 11 210 150 10
49069-216 158 10 210 150 10 102 78 10 2 2 6
49070- 6 6 6 54 54 54 14 14 14 2 2 6
49071- 2 2 6 62 62 62 74 74 74 30 30 30
49072- 10 10 10 0 0 0 0 0 0 0 0 0
49073- 0 0 0 0 0 0 0 0 0 0 0 0
49074- 0 0 0 0 0 0 0 0 0 0 0 0
49075- 0 0 0 0 0 0 0 0 0 0 0 0
49076- 0 0 0 0 0 0 0 0 0 0 0 0
49077- 0 0 0 0 0 0 0 0 0 0 0 0
49078- 0 0 0 0 0 0 0 0 0 0 0 0
49079- 0 0 0 0 0 0 0 0 0 0 0 0
49080- 0 0 0 0 0 0 0 0 0 0 0 0
49081- 0 0 0 0 0 0 0 0 0 0 0 0
49082- 0 0 0 0 0 0 0 0 0 0 0 0
49083- 0 0 0 0 0 0 0 0 0 0 0 0
49084- 0 0 0 0 0 0 0 0 0 10 10 10
49085- 34 34 34 78 78 78 50 50 50 6 6 6
49086- 94 70 30 139 102 15 190 146 13 226 184 13
49087-232 200 30 232 195 16 215 174 15 190 146 13
49088-168 122 10 192 133 9 210 150 10 213 154 11
49089-202 150 34 182 157 106 101 98 89 2 2 6
49090- 2 2 6 78 78 78 116 116 116 58 58 58
49091- 2 2 6 22 22 22 90 90 90 46 46 46
49092- 18 18 18 6 6 6 0 0 0 0 0 0
49093- 0 0 0 0 0 0 0 0 0 0 0 0
49094- 0 0 0 0 0 0 0 0 0 0 0 0
49095- 0 0 0 0 0 0 0 0 0 0 0 0
49096- 0 0 0 0 0 0 0 0 0 0 0 0
49097- 0 0 0 0 0 0 0 0 0 0 0 0
49098- 0 0 0 0 0 0 0 0 0 0 0 0
49099- 0 0 0 0 0 0 0 0 0 0 0 0
49100- 0 0 0 0 0 0 0 0 0 0 0 0
49101- 0 0 0 0 0 0 0 0 0 0 0 0
49102- 0 0 0 0 0 0 0 0 0 0 0 0
49103- 0 0 0 0 0 0 0 0 0 0 0 0
49104- 0 0 0 0 0 0 0 0 0 10 10 10
49105- 38 38 38 86 86 86 50 50 50 6 6 6
49106-128 128 128 174 154 114 156 107 11 168 122 10
49107-198 155 10 184 144 12 197 138 11 200 144 11
49108-206 145 10 206 145 10 197 138 11 188 164 115
49109-195 195 195 198 198 198 174 174 174 14 14 14
49110- 2 2 6 22 22 22 116 116 116 116 116 116
49111- 22 22 22 2 2 6 74 74 74 70 70 70
49112- 30 30 30 10 10 10 0 0 0 0 0 0
49113- 0 0 0 0 0 0 0 0 0 0 0 0
49114- 0 0 0 0 0 0 0 0 0 0 0 0
49115- 0 0 0 0 0 0 0 0 0 0 0 0
49116- 0 0 0 0 0 0 0 0 0 0 0 0
49117- 0 0 0 0 0 0 0 0 0 0 0 0
49118- 0 0 0 0 0 0 0 0 0 0 0 0
49119- 0 0 0 0 0 0 0 0 0 0 0 0
49120- 0 0 0 0 0 0 0 0 0 0 0 0
49121- 0 0 0 0 0 0 0 0 0 0 0 0
49122- 0 0 0 0 0 0 0 0 0 0 0 0
49123- 0 0 0 0 0 0 0 0 0 0 0 0
49124- 0 0 0 0 0 0 6 6 6 18 18 18
49125- 50 50 50 101 101 101 26 26 26 10 10 10
49126-138 138 138 190 190 190 174 154 114 156 107 11
49127-197 138 11 200 144 11 197 138 11 192 133 9
49128-180 123 7 190 142 34 190 178 144 187 187 187
49129-202 202 202 221 221 221 214 214 214 66 66 66
49130- 2 2 6 2 2 6 50 50 50 62 62 62
49131- 6 6 6 2 2 6 10 10 10 90 90 90
49132- 50 50 50 18 18 18 6 6 6 0 0 0
49133- 0 0 0 0 0 0 0 0 0 0 0 0
49134- 0 0 0 0 0 0 0 0 0 0 0 0
49135- 0 0 0 0 0 0 0 0 0 0 0 0
49136- 0 0 0 0 0 0 0 0 0 0 0 0
49137- 0 0 0 0 0 0 0 0 0 0 0 0
49138- 0 0 0 0 0 0 0 0 0 0 0 0
49139- 0 0 0 0 0 0 0 0 0 0 0 0
49140- 0 0 0 0 0 0 0 0 0 0 0 0
49141- 0 0 0 0 0 0 0 0 0 0 0 0
49142- 0 0 0 0 0 0 0 0 0 0 0 0
49143- 0 0 0 0 0 0 0 0 0 0 0 0
49144- 0 0 0 0 0 0 10 10 10 34 34 34
49145- 74 74 74 74 74 74 2 2 6 6 6 6
49146-144 144 144 198 198 198 190 190 190 178 166 146
49147-154 121 60 156 107 11 156 107 11 168 124 44
49148-174 154 114 187 187 187 190 190 190 210 210 210
49149-246 246 246 253 253 253 253 253 253 182 182 182
49150- 6 6 6 2 2 6 2 2 6 2 2 6
49151- 2 2 6 2 2 6 2 2 6 62 62 62
49152- 74 74 74 34 34 34 14 14 14 0 0 0
49153- 0 0 0 0 0 0 0 0 0 0 0 0
49154- 0 0 0 0 0 0 0 0 0 0 0 0
49155- 0 0 0 0 0 0 0 0 0 0 0 0
49156- 0 0 0 0 0 0 0 0 0 0 0 0
49157- 0 0 0 0 0 0 0 0 0 0 0 0
49158- 0 0 0 0 0 0 0 0 0 0 0 0
49159- 0 0 0 0 0 0 0 0 0 0 0 0
49160- 0 0 0 0 0 0 0 0 0 0 0 0
49161- 0 0 0 0 0 0 0 0 0 0 0 0
49162- 0 0 0 0 0 0 0 0 0 0 0 0
49163- 0 0 0 0 0 0 0 0 0 0 0 0
49164- 0 0 0 10 10 10 22 22 22 54 54 54
49165- 94 94 94 18 18 18 2 2 6 46 46 46
49166-234 234 234 221 221 221 190 190 190 190 190 190
49167-190 190 190 187 187 187 187 187 187 190 190 190
49168-190 190 190 195 195 195 214 214 214 242 242 242
49169-253 253 253 253 253 253 253 253 253 253 253 253
49170- 82 82 82 2 2 6 2 2 6 2 2 6
49171- 2 2 6 2 2 6 2 2 6 14 14 14
49172- 86 86 86 54 54 54 22 22 22 6 6 6
49173- 0 0 0 0 0 0 0 0 0 0 0 0
49174- 0 0 0 0 0 0 0 0 0 0 0 0
49175- 0 0 0 0 0 0 0 0 0 0 0 0
49176- 0 0 0 0 0 0 0 0 0 0 0 0
49177- 0 0 0 0 0 0 0 0 0 0 0 0
49178- 0 0 0 0 0 0 0 0 0 0 0 0
49179- 0 0 0 0 0 0 0 0 0 0 0 0
49180- 0 0 0 0 0 0 0 0 0 0 0 0
49181- 0 0 0 0 0 0 0 0 0 0 0 0
49182- 0 0 0 0 0 0 0 0 0 0 0 0
49183- 0 0 0 0 0 0 0 0 0 0 0 0
49184- 6 6 6 18 18 18 46 46 46 90 90 90
49185- 46 46 46 18 18 18 6 6 6 182 182 182
49186-253 253 253 246 246 246 206 206 206 190 190 190
49187-190 190 190 190 190 190 190 190 190 190 190 190
49188-206 206 206 231 231 231 250 250 250 253 253 253
49189-253 253 253 253 253 253 253 253 253 253 253 253
49190-202 202 202 14 14 14 2 2 6 2 2 6
49191- 2 2 6 2 2 6 2 2 6 2 2 6
49192- 42 42 42 86 86 86 42 42 42 18 18 18
49193- 6 6 6 0 0 0 0 0 0 0 0 0
49194- 0 0 0 0 0 0 0 0 0 0 0 0
49195- 0 0 0 0 0 0 0 0 0 0 0 0
49196- 0 0 0 0 0 0 0 0 0 0 0 0
49197- 0 0 0 0 0 0 0 0 0 0 0 0
49198- 0 0 0 0 0 0 0 0 0 0 0 0
49199- 0 0 0 0 0 0 0 0 0 0 0 0
49200- 0 0 0 0 0 0 0 0 0 0 0 0
49201- 0 0 0 0 0 0 0 0 0 0 0 0
49202- 0 0 0 0 0 0 0 0 0 0 0 0
49203- 0 0 0 0 0 0 0 0 0 6 6 6
49204- 14 14 14 38 38 38 74 74 74 66 66 66
49205- 2 2 6 6 6 6 90 90 90 250 250 250
49206-253 253 253 253 253 253 238 238 238 198 198 198
49207-190 190 190 190 190 190 195 195 195 221 221 221
49208-246 246 246 253 253 253 253 253 253 253 253 253
49209-253 253 253 253 253 253 253 253 253 253 253 253
49210-253 253 253 82 82 82 2 2 6 2 2 6
49211- 2 2 6 2 2 6 2 2 6 2 2 6
49212- 2 2 6 78 78 78 70 70 70 34 34 34
49213- 14 14 14 6 6 6 0 0 0 0 0 0
49214- 0 0 0 0 0 0 0 0 0 0 0 0
49215- 0 0 0 0 0 0 0 0 0 0 0 0
49216- 0 0 0 0 0 0 0 0 0 0 0 0
49217- 0 0 0 0 0 0 0 0 0 0 0 0
49218- 0 0 0 0 0 0 0 0 0 0 0 0
49219- 0 0 0 0 0 0 0 0 0 0 0 0
49220- 0 0 0 0 0 0 0 0 0 0 0 0
49221- 0 0 0 0 0 0 0 0 0 0 0 0
49222- 0 0 0 0 0 0 0 0 0 0 0 0
49223- 0 0 0 0 0 0 0 0 0 14 14 14
49224- 34 34 34 66 66 66 78 78 78 6 6 6
49225- 2 2 6 18 18 18 218 218 218 253 253 253
49226-253 253 253 253 253 253 253 253 253 246 246 246
49227-226 226 226 231 231 231 246 246 246 253 253 253
49228-253 253 253 253 253 253 253 253 253 253 253 253
49229-253 253 253 253 253 253 253 253 253 253 253 253
49230-253 253 253 178 178 178 2 2 6 2 2 6
49231- 2 2 6 2 2 6 2 2 6 2 2 6
49232- 2 2 6 18 18 18 90 90 90 62 62 62
49233- 30 30 30 10 10 10 0 0 0 0 0 0
49234- 0 0 0 0 0 0 0 0 0 0 0 0
49235- 0 0 0 0 0 0 0 0 0 0 0 0
49236- 0 0 0 0 0 0 0 0 0 0 0 0
49237- 0 0 0 0 0 0 0 0 0 0 0 0
49238- 0 0 0 0 0 0 0 0 0 0 0 0
49239- 0 0 0 0 0 0 0 0 0 0 0 0
49240- 0 0 0 0 0 0 0 0 0 0 0 0
49241- 0 0 0 0 0 0 0 0 0 0 0 0
49242- 0 0 0 0 0 0 0 0 0 0 0 0
49243- 0 0 0 0 0 0 10 10 10 26 26 26
49244- 58 58 58 90 90 90 18 18 18 2 2 6
49245- 2 2 6 110 110 110 253 253 253 253 253 253
49246-253 253 253 253 253 253 253 253 253 253 253 253
49247-250 250 250 253 253 253 253 253 253 253 253 253
49248-253 253 253 253 253 253 253 253 253 253 253 253
49249-253 253 253 253 253 253 253 253 253 253 253 253
49250-253 253 253 231 231 231 18 18 18 2 2 6
49251- 2 2 6 2 2 6 2 2 6 2 2 6
49252- 2 2 6 2 2 6 18 18 18 94 94 94
49253- 54 54 54 26 26 26 10 10 10 0 0 0
49254- 0 0 0 0 0 0 0 0 0 0 0 0
49255- 0 0 0 0 0 0 0 0 0 0 0 0
49256- 0 0 0 0 0 0 0 0 0 0 0 0
49257- 0 0 0 0 0 0 0 0 0 0 0 0
49258- 0 0 0 0 0 0 0 0 0 0 0 0
49259- 0 0 0 0 0 0 0 0 0 0 0 0
49260- 0 0 0 0 0 0 0 0 0 0 0 0
49261- 0 0 0 0 0 0 0 0 0 0 0 0
49262- 0 0 0 0 0 0 0 0 0 0 0 0
49263- 0 0 0 6 6 6 22 22 22 50 50 50
49264- 90 90 90 26 26 26 2 2 6 2 2 6
49265- 14 14 14 195 195 195 250 250 250 253 253 253
49266-253 253 253 253 253 253 253 253 253 253 253 253
49267-253 253 253 253 253 253 253 253 253 253 253 253
49268-253 253 253 253 253 253 253 253 253 253 253 253
49269-253 253 253 253 253 253 253 253 253 253 253 253
49270-250 250 250 242 242 242 54 54 54 2 2 6
49271- 2 2 6 2 2 6 2 2 6 2 2 6
49272- 2 2 6 2 2 6 2 2 6 38 38 38
49273- 86 86 86 50 50 50 22 22 22 6 6 6
49274- 0 0 0 0 0 0 0 0 0 0 0 0
49275- 0 0 0 0 0 0 0 0 0 0 0 0
49276- 0 0 0 0 0 0 0 0 0 0 0 0
49277- 0 0 0 0 0 0 0 0 0 0 0 0
49278- 0 0 0 0 0 0 0 0 0 0 0 0
49279- 0 0 0 0 0 0 0 0 0 0 0 0
49280- 0 0 0 0 0 0 0 0 0 0 0 0
49281- 0 0 0 0 0 0 0 0 0 0 0 0
49282- 0 0 0 0 0 0 0 0 0 0 0 0
49283- 6 6 6 14 14 14 38 38 38 82 82 82
49284- 34 34 34 2 2 6 2 2 6 2 2 6
49285- 42 42 42 195 195 195 246 246 246 253 253 253
49286-253 253 253 253 253 253 253 253 253 250 250 250
49287-242 242 242 242 242 242 250 250 250 253 253 253
49288-253 253 253 253 253 253 253 253 253 253 253 253
49289-253 253 253 250 250 250 246 246 246 238 238 238
49290-226 226 226 231 231 231 101 101 101 6 6 6
49291- 2 2 6 2 2 6 2 2 6 2 2 6
49292- 2 2 6 2 2 6 2 2 6 2 2 6
49293- 38 38 38 82 82 82 42 42 42 14 14 14
49294- 6 6 6 0 0 0 0 0 0 0 0 0
49295- 0 0 0 0 0 0 0 0 0 0 0 0
49296- 0 0 0 0 0 0 0 0 0 0 0 0
49297- 0 0 0 0 0 0 0 0 0 0 0 0
49298- 0 0 0 0 0 0 0 0 0 0 0 0
49299- 0 0 0 0 0 0 0 0 0 0 0 0
49300- 0 0 0 0 0 0 0 0 0 0 0 0
49301- 0 0 0 0 0 0 0 0 0 0 0 0
49302- 0 0 0 0 0 0 0 0 0 0 0 0
49303- 10 10 10 26 26 26 62 62 62 66 66 66
49304- 2 2 6 2 2 6 2 2 6 6 6 6
49305- 70 70 70 170 170 170 206 206 206 234 234 234
49306-246 246 246 250 250 250 250 250 250 238 238 238
49307-226 226 226 231 231 231 238 238 238 250 250 250
49308-250 250 250 250 250 250 246 246 246 231 231 231
49309-214 214 214 206 206 206 202 202 202 202 202 202
49310-198 198 198 202 202 202 182 182 182 18 18 18
49311- 2 2 6 2 2 6 2 2 6 2 2 6
49312- 2 2 6 2 2 6 2 2 6 2 2 6
49313- 2 2 6 62 62 62 66 66 66 30 30 30
49314- 10 10 10 0 0 0 0 0 0 0 0 0
49315- 0 0 0 0 0 0 0 0 0 0 0 0
49316- 0 0 0 0 0 0 0 0 0 0 0 0
49317- 0 0 0 0 0 0 0 0 0 0 0 0
49318- 0 0 0 0 0 0 0 0 0 0 0 0
49319- 0 0 0 0 0 0 0 0 0 0 0 0
49320- 0 0 0 0 0 0 0 0 0 0 0 0
49321- 0 0 0 0 0 0 0 0 0 0 0 0
49322- 0 0 0 0 0 0 0 0 0 0 0 0
49323- 14 14 14 42 42 42 82 82 82 18 18 18
49324- 2 2 6 2 2 6 2 2 6 10 10 10
49325- 94 94 94 182 182 182 218 218 218 242 242 242
49326-250 250 250 253 253 253 253 253 253 250 250 250
49327-234 234 234 253 253 253 253 253 253 253 253 253
49328-253 253 253 253 253 253 253 253 253 246 246 246
49329-238 238 238 226 226 226 210 210 210 202 202 202
49330-195 195 195 195 195 195 210 210 210 158 158 158
49331- 6 6 6 14 14 14 50 50 50 14 14 14
49332- 2 2 6 2 2 6 2 2 6 2 2 6
49333- 2 2 6 6 6 6 86 86 86 46 46 46
49334- 18 18 18 6 6 6 0 0 0 0 0 0
49335- 0 0 0 0 0 0 0 0 0 0 0 0
49336- 0 0 0 0 0 0 0 0 0 0 0 0
49337- 0 0 0 0 0 0 0 0 0 0 0 0
49338- 0 0 0 0 0 0 0 0 0 0 0 0
49339- 0 0 0 0 0 0 0 0 0 0 0 0
49340- 0 0 0 0 0 0 0 0 0 0 0 0
49341- 0 0 0 0 0 0 0 0 0 0 0 0
49342- 0 0 0 0 0 0 0 0 0 6 6 6
49343- 22 22 22 54 54 54 70 70 70 2 2 6
49344- 2 2 6 10 10 10 2 2 6 22 22 22
49345-166 166 166 231 231 231 250 250 250 253 253 253
49346-253 253 253 253 253 253 253 253 253 250 250 250
49347-242 242 242 253 253 253 253 253 253 253 253 253
49348-253 253 253 253 253 253 253 253 253 253 253 253
49349-253 253 253 253 253 253 253 253 253 246 246 246
49350-231 231 231 206 206 206 198 198 198 226 226 226
49351- 94 94 94 2 2 6 6 6 6 38 38 38
49352- 30 30 30 2 2 6 2 2 6 2 2 6
49353- 2 2 6 2 2 6 62 62 62 66 66 66
49354- 26 26 26 10 10 10 0 0 0 0 0 0
49355- 0 0 0 0 0 0 0 0 0 0 0 0
49356- 0 0 0 0 0 0 0 0 0 0 0 0
49357- 0 0 0 0 0 0 0 0 0 0 0 0
49358- 0 0 0 0 0 0 0 0 0 0 0 0
49359- 0 0 0 0 0 0 0 0 0 0 0 0
49360- 0 0 0 0 0 0 0 0 0 0 0 0
49361- 0 0 0 0 0 0 0 0 0 0 0 0
49362- 0 0 0 0 0 0 0 0 0 10 10 10
49363- 30 30 30 74 74 74 50 50 50 2 2 6
49364- 26 26 26 26 26 26 2 2 6 106 106 106
49365-238 238 238 253 253 253 253 253 253 253 253 253
49366-253 253 253 253 253 253 253 253 253 253 253 253
49367-253 253 253 253 253 253 253 253 253 253 253 253
49368-253 253 253 253 253 253 253 253 253 253 253 253
49369-253 253 253 253 253 253 253 253 253 253 253 253
49370-253 253 253 246 246 246 218 218 218 202 202 202
49371-210 210 210 14 14 14 2 2 6 2 2 6
49372- 30 30 30 22 22 22 2 2 6 2 2 6
49373- 2 2 6 2 2 6 18 18 18 86 86 86
49374- 42 42 42 14 14 14 0 0 0 0 0 0
49375- 0 0 0 0 0 0 0 0 0 0 0 0
49376- 0 0 0 0 0 0 0 0 0 0 0 0
49377- 0 0 0 0 0 0 0 0 0 0 0 0
49378- 0 0 0 0 0 0 0 0 0 0 0 0
49379- 0 0 0 0 0 0 0 0 0 0 0 0
49380- 0 0 0 0 0 0 0 0 0 0 0 0
49381- 0 0 0 0 0 0 0 0 0 0 0 0
49382- 0 0 0 0 0 0 0 0 0 14 14 14
49383- 42 42 42 90 90 90 22 22 22 2 2 6
49384- 42 42 42 2 2 6 18 18 18 218 218 218
49385-253 253 253 253 253 253 253 253 253 253 253 253
49386-253 253 253 253 253 253 253 253 253 253 253 253
49387-253 253 253 253 253 253 253 253 253 253 253 253
49388-253 253 253 253 253 253 253 253 253 253 253 253
49389-253 253 253 253 253 253 253 253 253 253 253 253
49390-253 253 253 253 253 253 250 250 250 221 221 221
49391-218 218 218 101 101 101 2 2 6 14 14 14
49392- 18 18 18 38 38 38 10 10 10 2 2 6
49393- 2 2 6 2 2 6 2 2 6 78 78 78
49394- 58 58 58 22 22 22 6 6 6 0 0 0
49395- 0 0 0 0 0 0 0 0 0 0 0 0
49396- 0 0 0 0 0 0 0 0 0 0 0 0
49397- 0 0 0 0 0 0 0 0 0 0 0 0
49398- 0 0 0 0 0 0 0 0 0 0 0 0
49399- 0 0 0 0 0 0 0 0 0 0 0 0
49400- 0 0 0 0 0 0 0 0 0 0 0 0
49401- 0 0 0 0 0 0 0 0 0 0 0 0
49402- 0 0 0 0 0 0 6 6 6 18 18 18
49403- 54 54 54 82 82 82 2 2 6 26 26 26
49404- 22 22 22 2 2 6 123 123 123 253 253 253
49405-253 253 253 253 253 253 253 253 253 253 253 253
49406-253 253 253 253 253 253 253 253 253 253 253 253
49407-253 253 253 253 253 253 253 253 253 253 253 253
49408-253 253 253 253 253 253 253 253 253 253 253 253
49409-253 253 253 253 253 253 253 253 253 253 253 253
49410-253 253 253 253 253 253 253 253 253 250 250 250
49411-238 238 238 198 198 198 6 6 6 38 38 38
49412- 58 58 58 26 26 26 38 38 38 2 2 6
49413- 2 2 6 2 2 6 2 2 6 46 46 46
49414- 78 78 78 30 30 30 10 10 10 0 0 0
49415- 0 0 0 0 0 0 0 0 0 0 0 0
49416- 0 0 0 0 0 0 0 0 0 0 0 0
49417- 0 0 0 0 0 0 0 0 0 0 0 0
49418- 0 0 0 0 0 0 0 0 0 0 0 0
49419- 0 0 0 0 0 0 0 0 0 0 0 0
49420- 0 0 0 0 0 0 0 0 0 0 0 0
49421- 0 0 0 0 0 0 0 0 0 0 0 0
49422- 0 0 0 0 0 0 10 10 10 30 30 30
49423- 74 74 74 58 58 58 2 2 6 42 42 42
49424- 2 2 6 22 22 22 231 231 231 253 253 253
49425-253 253 253 253 253 253 253 253 253 253 253 253
49426-253 253 253 253 253 253 253 253 253 250 250 250
49427-253 253 253 253 253 253 253 253 253 253 253 253
49428-253 253 253 253 253 253 253 253 253 253 253 253
49429-253 253 253 253 253 253 253 253 253 253 253 253
49430-253 253 253 253 253 253 253 253 253 253 253 253
49431-253 253 253 246 246 246 46 46 46 38 38 38
49432- 42 42 42 14 14 14 38 38 38 14 14 14
49433- 2 2 6 2 2 6 2 2 6 6 6 6
49434- 86 86 86 46 46 46 14 14 14 0 0 0
49435- 0 0 0 0 0 0 0 0 0 0 0 0
49436- 0 0 0 0 0 0 0 0 0 0 0 0
49437- 0 0 0 0 0 0 0 0 0 0 0 0
49438- 0 0 0 0 0 0 0 0 0 0 0 0
49439- 0 0 0 0 0 0 0 0 0 0 0 0
49440- 0 0 0 0 0 0 0 0 0 0 0 0
49441- 0 0 0 0 0 0 0 0 0 0 0 0
49442- 0 0 0 6 6 6 14 14 14 42 42 42
49443- 90 90 90 18 18 18 18 18 18 26 26 26
49444- 2 2 6 116 116 116 253 253 253 253 253 253
49445-253 253 253 253 253 253 253 253 253 253 253 253
49446-253 253 253 253 253 253 250 250 250 238 238 238
49447-253 253 253 253 253 253 253 253 253 253 253 253
49448-253 253 253 253 253 253 253 253 253 253 253 253
49449-253 253 253 253 253 253 253 253 253 253 253 253
49450-253 253 253 253 253 253 253 253 253 253 253 253
49451-253 253 253 253 253 253 94 94 94 6 6 6
49452- 2 2 6 2 2 6 10 10 10 34 34 34
49453- 2 2 6 2 2 6 2 2 6 2 2 6
49454- 74 74 74 58 58 58 22 22 22 6 6 6
49455- 0 0 0 0 0 0 0 0 0 0 0 0
49456- 0 0 0 0 0 0 0 0 0 0 0 0
49457- 0 0 0 0 0 0 0 0 0 0 0 0
49458- 0 0 0 0 0 0 0 0 0 0 0 0
49459- 0 0 0 0 0 0 0 0 0 0 0 0
49460- 0 0 0 0 0 0 0 0 0 0 0 0
49461- 0 0 0 0 0 0 0 0 0 0 0 0
49462- 0 0 0 10 10 10 26 26 26 66 66 66
49463- 82 82 82 2 2 6 38 38 38 6 6 6
49464- 14 14 14 210 210 210 253 253 253 253 253 253
49465-253 253 253 253 253 253 253 253 253 253 253 253
49466-253 253 253 253 253 253 246 246 246 242 242 242
49467-253 253 253 253 253 253 253 253 253 253 253 253
49468-253 253 253 253 253 253 253 253 253 253 253 253
49469-253 253 253 253 253 253 253 253 253 253 253 253
49470-253 253 253 253 253 253 253 253 253 253 253 253
49471-253 253 253 253 253 253 144 144 144 2 2 6
49472- 2 2 6 2 2 6 2 2 6 46 46 46
49473- 2 2 6 2 2 6 2 2 6 2 2 6
49474- 42 42 42 74 74 74 30 30 30 10 10 10
49475- 0 0 0 0 0 0 0 0 0 0 0 0
49476- 0 0 0 0 0 0 0 0 0 0 0 0
49477- 0 0 0 0 0 0 0 0 0 0 0 0
49478- 0 0 0 0 0 0 0 0 0 0 0 0
49479- 0 0 0 0 0 0 0 0 0 0 0 0
49480- 0 0 0 0 0 0 0 0 0 0 0 0
49481- 0 0 0 0 0 0 0 0 0 0 0 0
49482- 6 6 6 14 14 14 42 42 42 90 90 90
49483- 26 26 26 6 6 6 42 42 42 2 2 6
49484- 74 74 74 250 250 250 253 253 253 253 253 253
49485-253 253 253 253 253 253 253 253 253 253 253 253
49486-253 253 253 253 253 253 242 242 242 242 242 242
49487-253 253 253 253 253 253 253 253 253 253 253 253
49488-253 253 253 253 253 253 253 253 253 253 253 253
49489-253 253 253 253 253 253 253 253 253 253 253 253
49490-253 253 253 253 253 253 253 253 253 253 253 253
49491-253 253 253 253 253 253 182 182 182 2 2 6
49492- 2 2 6 2 2 6 2 2 6 46 46 46
49493- 2 2 6 2 2 6 2 2 6 2 2 6
49494- 10 10 10 86 86 86 38 38 38 10 10 10
49495- 0 0 0 0 0 0 0 0 0 0 0 0
49496- 0 0 0 0 0 0 0 0 0 0 0 0
49497- 0 0 0 0 0 0 0 0 0 0 0 0
49498- 0 0 0 0 0 0 0 0 0 0 0 0
49499- 0 0 0 0 0 0 0 0 0 0 0 0
49500- 0 0 0 0 0 0 0 0 0 0 0 0
49501- 0 0 0 0 0 0 0 0 0 0 0 0
49502- 10 10 10 26 26 26 66 66 66 82 82 82
49503- 2 2 6 22 22 22 18 18 18 2 2 6
49504-149 149 149 253 253 253 253 253 253 253 253 253
49505-253 253 253 253 253 253 253 253 253 253 253 253
49506-253 253 253 253 253 253 234 234 234 242 242 242
49507-253 253 253 253 253 253 253 253 253 253 253 253
49508-253 253 253 253 253 253 253 253 253 253 253 253
49509-253 253 253 253 253 253 253 253 253 253 253 253
49510-253 253 253 253 253 253 253 253 253 253 253 253
49511-253 253 253 253 253 253 206 206 206 2 2 6
49512- 2 2 6 2 2 6 2 2 6 38 38 38
49513- 2 2 6 2 2 6 2 2 6 2 2 6
49514- 6 6 6 86 86 86 46 46 46 14 14 14
49515- 0 0 0 0 0 0 0 0 0 0 0 0
49516- 0 0 0 0 0 0 0 0 0 0 0 0
49517- 0 0 0 0 0 0 0 0 0 0 0 0
49518- 0 0 0 0 0 0 0 0 0 0 0 0
49519- 0 0 0 0 0 0 0 0 0 0 0 0
49520- 0 0 0 0 0 0 0 0 0 0 0 0
49521- 0 0 0 0 0 0 0 0 0 6 6 6
49522- 18 18 18 46 46 46 86 86 86 18 18 18
49523- 2 2 6 34 34 34 10 10 10 6 6 6
49524-210 210 210 253 253 253 253 253 253 253 253 253
49525-253 253 253 253 253 253 253 253 253 253 253 253
49526-253 253 253 253 253 253 234 234 234 242 242 242
49527-253 253 253 253 253 253 253 253 253 253 253 253
49528-253 253 253 253 253 253 253 253 253 253 253 253
49529-253 253 253 253 253 253 253 253 253 253 253 253
49530-253 253 253 253 253 253 253 253 253 253 253 253
49531-253 253 253 253 253 253 221 221 221 6 6 6
49532- 2 2 6 2 2 6 6 6 6 30 30 30
49533- 2 2 6 2 2 6 2 2 6 2 2 6
49534- 2 2 6 82 82 82 54 54 54 18 18 18
49535- 6 6 6 0 0 0 0 0 0 0 0 0
49536- 0 0 0 0 0 0 0 0 0 0 0 0
49537- 0 0 0 0 0 0 0 0 0 0 0 0
49538- 0 0 0 0 0 0 0 0 0 0 0 0
49539- 0 0 0 0 0 0 0 0 0 0 0 0
49540- 0 0 0 0 0 0 0 0 0 0 0 0
49541- 0 0 0 0 0 0 0 0 0 10 10 10
49542- 26 26 26 66 66 66 62 62 62 2 2 6
49543- 2 2 6 38 38 38 10 10 10 26 26 26
49544-238 238 238 253 253 253 253 253 253 253 253 253
49545-253 253 253 253 253 253 253 253 253 253 253 253
49546-253 253 253 253 253 253 231 231 231 238 238 238
49547-253 253 253 253 253 253 253 253 253 253 253 253
49548-253 253 253 253 253 253 253 253 253 253 253 253
49549-253 253 253 253 253 253 253 253 253 253 253 253
49550-253 253 253 253 253 253 253 253 253 253 253 253
49551-253 253 253 253 253 253 231 231 231 6 6 6
49552- 2 2 6 2 2 6 10 10 10 30 30 30
49553- 2 2 6 2 2 6 2 2 6 2 2 6
49554- 2 2 6 66 66 66 58 58 58 22 22 22
49555- 6 6 6 0 0 0 0 0 0 0 0 0
49556- 0 0 0 0 0 0 0 0 0 0 0 0
49557- 0 0 0 0 0 0 0 0 0 0 0 0
49558- 0 0 0 0 0 0 0 0 0 0 0 0
49559- 0 0 0 0 0 0 0 0 0 0 0 0
49560- 0 0 0 0 0 0 0 0 0 0 0 0
49561- 0 0 0 0 0 0 0 0 0 10 10 10
49562- 38 38 38 78 78 78 6 6 6 2 2 6
49563- 2 2 6 46 46 46 14 14 14 42 42 42
49564-246 246 246 253 253 253 253 253 253 253 253 253
49565-253 253 253 253 253 253 253 253 253 253 253 253
49566-253 253 253 253 253 253 231 231 231 242 242 242
49567-253 253 253 253 253 253 253 253 253 253 253 253
49568-253 253 253 253 253 253 253 253 253 253 253 253
49569-253 253 253 253 253 253 253 253 253 253 253 253
49570-253 253 253 253 253 253 253 253 253 253 253 253
49571-253 253 253 253 253 253 234 234 234 10 10 10
49572- 2 2 6 2 2 6 22 22 22 14 14 14
49573- 2 2 6 2 2 6 2 2 6 2 2 6
49574- 2 2 6 66 66 66 62 62 62 22 22 22
49575- 6 6 6 0 0 0 0 0 0 0 0 0
49576- 0 0 0 0 0 0 0 0 0 0 0 0
49577- 0 0 0 0 0 0 0 0 0 0 0 0
49578- 0 0 0 0 0 0 0 0 0 0 0 0
49579- 0 0 0 0 0 0 0 0 0 0 0 0
49580- 0 0 0 0 0 0 0 0 0 0 0 0
49581- 0 0 0 0 0 0 6 6 6 18 18 18
49582- 50 50 50 74 74 74 2 2 6 2 2 6
49583- 14 14 14 70 70 70 34 34 34 62 62 62
49584-250 250 250 253 253 253 253 253 253 253 253 253
49585-253 253 253 253 253 253 253 253 253 253 253 253
49586-253 253 253 253 253 253 231 231 231 246 246 246
49587-253 253 253 253 253 253 253 253 253 253 253 253
49588-253 253 253 253 253 253 253 253 253 253 253 253
49589-253 253 253 253 253 253 253 253 253 253 253 253
49590-253 253 253 253 253 253 253 253 253 253 253 253
49591-253 253 253 253 253 253 234 234 234 14 14 14
49592- 2 2 6 2 2 6 30 30 30 2 2 6
49593- 2 2 6 2 2 6 2 2 6 2 2 6
49594- 2 2 6 66 66 66 62 62 62 22 22 22
49595- 6 6 6 0 0 0 0 0 0 0 0 0
49596- 0 0 0 0 0 0 0 0 0 0 0 0
49597- 0 0 0 0 0 0 0 0 0 0 0 0
49598- 0 0 0 0 0 0 0 0 0 0 0 0
49599- 0 0 0 0 0 0 0 0 0 0 0 0
49600- 0 0 0 0 0 0 0 0 0 0 0 0
49601- 0 0 0 0 0 0 6 6 6 18 18 18
49602- 54 54 54 62 62 62 2 2 6 2 2 6
49603- 2 2 6 30 30 30 46 46 46 70 70 70
49604-250 250 250 253 253 253 253 253 253 253 253 253
49605-253 253 253 253 253 253 253 253 253 253 253 253
49606-253 253 253 253 253 253 231 231 231 246 246 246
49607-253 253 253 253 253 253 253 253 253 253 253 253
49608-253 253 253 253 253 253 253 253 253 253 253 253
49609-253 253 253 253 253 253 253 253 253 253 253 253
49610-253 253 253 253 253 253 253 253 253 253 253 253
49611-253 253 253 253 253 253 226 226 226 10 10 10
49612- 2 2 6 6 6 6 30 30 30 2 2 6
49613- 2 2 6 2 2 6 2 2 6 2 2 6
49614- 2 2 6 66 66 66 58 58 58 22 22 22
49615- 6 6 6 0 0 0 0 0 0 0 0 0
49616- 0 0 0 0 0 0 0 0 0 0 0 0
49617- 0 0 0 0 0 0 0 0 0 0 0 0
49618- 0 0 0 0 0 0 0 0 0 0 0 0
49619- 0 0 0 0 0 0 0 0 0 0 0 0
49620- 0 0 0 0 0 0 0 0 0 0 0 0
49621- 0 0 0 0 0 0 6 6 6 22 22 22
49622- 58 58 58 62 62 62 2 2 6 2 2 6
49623- 2 2 6 2 2 6 30 30 30 78 78 78
49624-250 250 250 253 253 253 253 253 253 253 253 253
49625-253 253 253 253 253 253 253 253 253 253 253 253
49626-253 253 253 253 253 253 231 231 231 246 246 246
49627-253 253 253 253 253 253 253 253 253 253 253 253
49628-253 253 253 253 253 253 253 253 253 253 253 253
49629-253 253 253 253 253 253 253 253 253 253 253 253
49630-253 253 253 253 253 253 253 253 253 253 253 253
49631-253 253 253 253 253 253 206 206 206 2 2 6
49632- 22 22 22 34 34 34 18 14 6 22 22 22
49633- 26 26 26 18 18 18 6 6 6 2 2 6
49634- 2 2 6 82 82 82 54 54 54 18 18 18
49635- 6 6 6 0 0 0 0 0 0 0 0 0
49636- 0 0 0 0 0 0 0 0 0 0 0 0
49637- 0 0 0 0 0 0 0 0 0 0 0 0
49638- 0 0 0 0 0 0 0 0 0 0 0 0
49639- 0 0 0 0 0 0 0 0 0 0 0 0
49640- 0 0 0 0 0 0 0 0 0 0 0 0
49641- 0 0 0 0 0 0 6 6 6 26 26 26
49642- 62 62 62 106 106 106 74 54 14 185 133 11
49643-210 162 10 121 92 8 6 6 6 62 62 62
49644-238 238 238 253 253 253 253 253 253 253 253 253
49645-253 253 253 253 253 253 253 253 253 253 253 253
49646-253 253 253 253 253 253 231 231 231 246 246 246
49647-253 253 253 253 253 253 253 253 253 253 253 253
49648-253 253 253 253 253 253 253 253 253 253 253 253
49649-253 253 253 253 253 253 253 253 253 253 253 253
49650-253 253 253 253 253 253 253 253 253 253 253 253
49651-253 253 253 253 253 253 158 158 158 18 18 18
49652- 14 14 14 2 2 6 2 2 6 2 2 6
49653- 6 6 6 18 18 18 66 66 66 38 38 38
49654- 6 6 6 94 94 94 50 50 50 18 18 18
49655- 6 6 6 0 0 0 0 0 0 0 0 0
49656- 0 0 0 0 0 0 0 0 0 0 0 0
49657- 0 0 0 0 0 0 0 0 0 0 0 0
49658- 0 0 0 0 0 0 0 0 0 0 0 0
49659- 0 0 0 0 0 0 0 0 0 0 0 0
49660- 0 0 0 0 0 0 0 0 0 6 6 6
49661- 10 10 10 10 10 10 18 18 18 38 38 38
49662- 78 78 78 142 134 106 216 158 10 242 186 14
49663-246 190 14 246 190 14 156 118 10 10 10 10
49664- 90 90 90 238 238 238 253 253 253 253 253 253
49665-253 253 253 253 253 253 253 253 253 253 253 253
49666-253 253 253 253 253 253 231 231 231 250 250 250
49667-253 253 253 253 253 253 253 253 253 253 253 253
49668-253 253 253 253 253 253 253 253 253 253 253 253
49669-253 253 253 253 253 253 253 253 253 253 253 253
49670-253 253 253 253 253 253 253 253 253 246 230 190
49671-238 204 91 238 204 91 181 142 44 37 26 9
49672- 2 2 6 2 2 6 2 2 6 2 2 6
49673- 2 2 6 2 2 6 38 38 38 46 46 46
49674- 26 26 26 106 106 106 54 54 54 18 18 18
49675- 6 6 6 0 0 0 0 0 0 0 0 0
49676- 0 0 0 0 0 0 0 0 0 0 0 0
49677- 0 0 0 0 0 0 0 0 0 0 0 0
49678- 0 0 0 0 0 0 0 0 0 0 0 0
49679- 0 0 0 0 0 0 0 0 0 0 0 0
49680- 0 0 0 6 6 6 14 14 14 22 22 22
49681- 30 30 30 38 38 38 50 50 50 70 70 70
49682-106 106 106 190 142 34 226 170 11 242 186 14
49683-246 190 14 246 190 14 246 190 14 154 114 10
49684- 6 6 6 74 74 74 226 226 226 253 253 253
49685-253 253 253 253 253 253 253 253 253 253 253 253
49686-253 253 253 253 253 253 231 231 231 250 250 250
49687-253 253 253 253 253 253 253 253 253 253 253 253
49688-253 253 253 253 253 253 253 253 253 253 253 253
49689-253 253 253 253 253 253 253 253 253 253 253 253
49690-253 253 253 253 253 253 253 253 253 228 184 62
49691-241 196 14 241 208 19 232 195 16 38 30 10
49692- 2 2 6 2 2 6 2 2 6 2 2 6
49693- 2 2 6 6 6 6 30 30 30 26 26 26
49694-203 166 17 154 142 90 66 66 66 26 26 26
49695- 6 6 6 0 0 0 0 0 0 0 0 0
49696- 0 0 0 0 0 0 0 0 0 0 0 0
49697- 0 0 0 0 0 0 0 0 0 0 0 0
49698- 0 0 0 0 0 0 0 0 0 0 0 0
49699- 0 0 0 0 0 0 0 0 0 0 0 0
49700- 6 6 6 18 18 18 38 38 38 58 58 58
49701- 78 78 78 86 86 86 101 101 101 123 123 123
49702-175 146 61 210 150 10 234 174 13 246 186 14
49703-246 190 14 246 190 14 246 190 14 238 190 10
49704-102 78 10 2 2 6 46 46 46 198 198 198
49705-253 253 253 253 253 253 253 253 253 253 253 253
49706-253 253 253 253 253 253 234 234 234 242 242 242
49707-253 253 253 253 253 253 253 253 253 253 253 253
49708-253 253 253 253 253 253 253 253 253 253 253 253
49709-253 253 253 253 253 253 253 253 253 253 253 253
49710-253 253 253 253 253 253 253 253 253 224 178 62
49711-242 186 14 241 196 14 210 166 10 22 18 6
49712- 2 2 6 2 2 6 2 2 6 2 2 6
49713- 2 2 6 2 2 6 6 6 6 121 92 8
49714-238 202 15 232 195 16 82 82 82 34 34 34
49715- 10 10 10 0 0 0 0 0 0 0 0 0
49716- 0 0 0 0 0 0 0 0 0 0 0 0
49717- 0 0 0 0 0 0 0 0 0 0 0 0
49718- 0 0 0 0 0 0 0 0 0 0 0 0
49719- 0 0 0 0 0 0 0 0 0 0 0 0
49720- 14 14 14 38 38 38 70 70 70 154 122 46
49721-190 142 34 200 144 11 197 138 11 197 138 11
49722-213 154 11 226 170 11 242 186 14 246 190 14
49723-246 190 14 246 190 14 246 190 14 246 190 14
49724-225 175 15 46 32 6 2 2 6 22 22 22
49725-158 158 158 250 250 250 253 253 253 253 253 253
49726-253 253 253 253 253 253 253 253 253 253 253 253
49727-253 253 253 253 253 253 253 253 253 253 253 253
49728-253 253 253 253 253 253 253 253 253 253 253 253
49729-253 253 253 253 253 253 253 253 253 253 253 253
49730-253 253 253 250 250 250 242 242 242 224 178 62
49731-239 182 13 236 186 11 213 154 11 46 32 6
49732- 2 2 6 2 2 6 2 2 6 2 2 6
49733- 2 2 6 2 2 6 61 42 6 225 175 15
49734-238 190 10 236 186 11 112 100 78 42 42 42
49735- 14 14 14 0 0 0 0 0 0 0 0 0
49736- 0 0 0 0 0 0 0 0 0 0 0 0
49737- 0 0 0 0 0 0 0 0 0 0 0 0
49738- 0 0 0 0 0 0 0 0 0 0 0 0
49739- 0 0 0 0 0 0 0 0 0 6 6 6
49740- 22 22 22 54 54 54 154 122 46 213 154 11
49741-226 170 11 230 174 11 226 170 11 226 170 11
49742-236 178 12 242 186 14 246 190 14 246 190 14
49743-246 190 14 246 190 14 246 190 14 246 190 14
49744-241 196 14 184 144 12 10 10 10 2 2 6
49745- 6 6 6 116 116 116 242 242 242 253 253 253
49746-253 253 253 253 253 253 253 253 253 253 253 253
49747-253 253 253 253 253 253 253 253 253 253 253 253
49748-253 253 253 253 253 253 253 253 253 253 253 253
49749-253 253 253 253 253 253 253 253 253 253 253 253
49750-253 253 253 231 231 231 198 198 198 214 170 54
49751-236 178 12 236 178 12 210 150 10 137 92 6
49752- 18 14 6 2 2 6 2 2 6 2 2 6
49753- 6 6 6 70 47 6 200 144 11 236 178 12
49754-239 182 13 239 182 13 124 112 88 58 58 58
49755- 22 22 22 6 6 6 0 0 0 0 0 0
49756- 0 0 0 0 0 0 0 0 0 0 0 0
49757- 0 0 0 0 0 0 0 0 0 0 0 0
49758- 0 0 0 0 0 0 0 0 0 0 0 0
49759- 0 0 0 0 0 0 0 0 0 10 10 10
49760- 30 30 30 70 70 70 180 133 36 226 170 11
49761-239 182 13 242 186 14 242 186 14 246 186 14
49762-246 190 14 246 190 14 246 190 14 246 190 14
49763-246 190 14 246 190 14 246 190 14 246 190 14
49764-246 190 14 232 195 16 98 70 6 2 2 6
49765- 2 2 6 2 2 6 66 66 66 221 221 221
49766-253 253 253 253 253 253 253 253 253 253 253 253
49767-253 253 253 253 253 253 253 253 253 253 253 253
49768-253 253 253 253 253 253 253 253 253 253 253 253
49769-253 253 253 253 253 253 253 253 253 253 253 253
49770-253 253 253 206 206 206 198 198 198 214 166 58
49771-230 174 11 230 174 11 216 158 10 192 133 9
49772-163 110 8 116 81 8 102 78 10 116 81 8
49773-167 114 7 197 138 11 226 170 11 239 182 13
49774-242 186 14 242 186 14 162 146 94 78 78 78
49775- 34 34 34 14 14 14 6 6 6 0 0 0
49776- 0 0 0 0 0 0 0 0 0 0 0 0
49777- 0 0 0 0 0 0 0 0 0 0 0 0
49778- 0 0 0 0 0 0 0 0 0 0 0 0
49779- 0 0 0 0 0 0 0 0 0 6 6 6
49780- 30 30 30 78 78 78 190 142 34 226 170 11
49781-239 182 13 246 190 14 246 190 14 246 190 14
49782-246 190 14 246 190 14 246 190 14 246 190 14
49783-246 190 14 246 190 14 246 190 14 246 190 14
49784-246 190 14 241 196 14 203 166 17 22 18 6
49785- 2 2 6 2 2 6 2 2 6 38 38 38
49786-218 218 218 253 253 253 253 253 253 253 253 253
49787-253 253 253 253 253 253 253 253 253 253 253 253
49788-253 253 253 253 253 253 253 253 253 253 253 253
49789-253 253 253 253 253 253 253 253 253 253 253 253
49790-250 250 250 206 206 206 198 198 198 202 162 69
49791-226 170 11 236 178 12 224 166 10 210 150 10
49792-200 144 11 197 138 11 192 133 9 197 138 11
49793-210 150 10 226 170 11 242 186 14 246 190 14
49794-246 190 14 246 186 14 225 175 15 124 112 88
49795- 62 62 62 30 30 30 14 14 14 6 6 6
49796- 0 0 0 0 0 0 0 0 0 0 0 0
49797- 0 0 0 0 0 0 0 0 0 0 0 0
49798- 0 0 0 0 0 0 0 0 0 0 0 0
49799- 0 0 0 0 0 0 0 0 0 10 10 10
49800- 30 30 30 78 78 78 174 135 50 224 166 10
49801-239 182 13 246 190 14 246 190 14 246 190 14
49802-246 190 14 246 190 14 246 190 14 246 190 14
49803-246 190 14 246 190 14 246 190 14 246 190 14
49804-246 190 14 246 190 14 241 196 14 139 102 15
49805- 2 2 6 2 2 6 2 2 6 2 2 6
49806- 78 78 78 250 250 250 253 253 253 253 253 253
49807-253 253 253 253 253 253 253 253 253 253 253 253
49808-253 253 253 253 253 253 253 253 253 253 253 253
49809-253 253 253 253 253 253 253 253 253 253 253 253
49810-250 250 250 214 214 214 198 198 198 190 150 46
49811-219 162 10 236 178 12 234 174 13 224 166 10
49812-216 158 10 213 154 11 213 154 11 216 158 10
49813-226 170 11 239 182 13 246 190 14 246 190 14
49814-246 190 14 246 190 14 242 186 14 206 162 42
49815-101 101 101 58 58 58 30 30 30 14 14 14
49816- 6 6 6 0 0 0 0 0 0 0 0 0
49817- 0 0 0 0 0 0 0 0 0 0 0 0
49818- 0 0 0 0 0 0 0 0 0 0 0 0
49819- 0 0 0 0 0 0 0 0 0 10 10 10
49820- 30 30 30 74 74 74 174 135 50 216 158 10
49821-236 178 12 246 190 14 246 190 14 246 190 14
49822-246 190 14 246 190 14 246 190 14 246 190 14
49823-246 190 14 246 190 14 246 190 14 246 190 14
49824-246 190 14 246 190 14 241 196 14 226 184 13
49825- 61 42 6 2 2 6 2 2 6 2 2 6
49826- 22 22 22 238 238 238 253 253 253 253 253 253
49827-253 253 253 253 253 253 253 253 253 253 253 253
49828-253 253 253 253 253 253 253 253 253 253 253 253
49829-253 253 253 253 253 253 253 253 253 253 253 253
49830-253 253 253 226 226 226 187 187 187 180 133 36
49831-216 158 10 236 178 12 239 182 13 236 178 12
49832-230 174 11 226 170 11 226 170 11 230 174 11
49833-236 178 12 242 186 14 246 190 14 246 190 14
49834-246 190 14 246 190 14 246 186 14 239 182 13
49835-206 162 42 106 106 106 66 66 66 34 34 34
49836- 14 14 14 6 6 6 0 0 0 0 0 0
49837- 0 0 0 0 0 0 0 0 0 0 0 0
49838- 0 0 0 0 0 0 0 0 0 0 0 0
49839- 0 0 0 0 0 0 0 0 0 6 6 6
49840- 26 26 26 70 70 70 163 133 67 213 154 11
49841-236 178 12 246 190 14 246 190 14 246 190 14
49842-246 190 14 246 190 14 246 190 14 246 190 14
49843-246 190 14 246 190 14 246 190 14 246 190 14
49844-246 190 14 246 190 14 246 190 14 241 196 14
49845-190 146 13 18 14 6 2 2 6 2 2 6
49846- 46 46 46 246 246 246 253 253 253 253 253 253
49847-253 253 253 253 253 253 253 253 253 253 253 253
49848-253 253 253 253 253 253 253 253 253 253 253 253
49849-253 253 253 253 253 253 253 253 253 253 253 253
49850-253 253 253 221 221 221 86 86 86 156 107 11
49851-216 158 10 236 178 12 242 186 14 246 186 14
49852-242 186 14 239 182 13 239 182 13 242 186 14
49853-242 186 14 246 186 14 246 190 14 246 190 14
49854-246 190 14 246 190 14 246 190 14 246 190 14
49855-242 186 14 225 175 15 142 122 72 66 66 66
49856- 30 30 30 10 10 10 0 0 0 0 0 0
49857- 0 0 0 0 0 0 0 0 0 0 0 0
49858- 0 0 0 0 0 0 0 0 0 0 0 0
49859- 0 0 0 0 0 0 0 0 0 6 6 6
49860- 26 26 26 70 70 70 163 133 67 210 150 10
49861-236 178 12 246 190 14 246 190 14 246 190 14
49862-246 190 14 246 190 14 246 190 14 246 190 14
49863-246 190 14 246 190 14 246 190 14 246 190 14
49864-246 190 14 246 190 14 246 190 14 246 190 14
49865-232 195 16 121 92 8 34 34 34 106 106 106
49866-221 221 221 253 253 253 253 253 253 253 253 253
49867-253 253 253 253 253 253 253 253 253 253 253 253
49868-253 253 253 253 253 253 253 253 253 253 253 253
49869-253 253 253 253 253 253 253 253 253 253 253 253
49870-242 242 242 82 82 82 18 14 6 163 110 8
49871-216 158 10 236 178 12 242 186 14 246 190 14
49872-246 190 14 246 190 14 246 190 14 246 190 14
49873-246 190 14 246 190 14 246 190 14 246 190 14
49874-246 190 14 246 190 14 246 190 14 246 190 14
49875-246 190 14 246 190 14 242 186 14 163 133 67
49876- 46 46 46 18 18 18 6 6 6 0 0 0
49877- 0 0 0 0 0 0 0 0 0 0 0 0
49878- 0 0 0 0 0 0 0 0 0 0 0 0
49879- 0 0 0 0 0 0 0 0 0 10 10 10
49880- 30 30 30 78 78 78 163 133 67 210 150 10
49881-236 178 12 246 186 14 246 190 14 246 190 14
49882-246 190 14 246 190 14 246 190 14 246 190 14
49883-246 190 14 246 190 14 246 190 14 246 190 14
49884-246 190 14 246 190 14 246 190 14 246 190 14
49885-241 196 14 215 174 15 190 178 144 253 253 253
49886-253 253 253 253 253 253 253 253 253 253 253 253
49887-253 253 253 253 253 253 253 253 253 253 253 253
49888-253 253 253 253 253 253 253 253 253 253 253 253
49889-253 253 253 253 253 253 253 253 253 218 218 218
49890- 58 58 58 2 2 6 22 18 6 167 114 7
49891-216 158 10 236 178 12 246 186 14 246 190 14
49892-246 190 14 246 190 14 246 190 14 246 190 14
49893-246 190 14 246 190 14 246 190 14 246 190 14
49894-246 190 14 246 190 14 246 190 14 246 190 14
49895-246 190 14 246 186 14 242 186 14 190 150 46
49896- 54 54 54 22 22 22 6 6 6 0 0 0
49897- 0 0 0 0 0 0 0 0 0 0 0 0
49898- 0 0 0 0 0 0 0 0 0 0 0 0
49899- 0 0 0 0 0 0 0 0 0 14 14 14
49900- 38 38 38 86 86 86 180 133 36 213 154 11
49901-236 178 12 246 186 14 246 190 14 246 190 14
49902-246 190 14 246 190 14 246 190 14 246 190 14
49903-246 190 14 246 190 14 246 190 14 246 190 14
49904-246 190 14 246 190 14 246 190 14 246 190 14
49905-246 190 14 232 195 16 190 146 13 214 214 214
49906-253 253 253 253 253 253 253 253 253 253 253 253
49907-253 253 253 253 253 253 253 253 253 253 253 253
49908-253 253 253 253 253 253 253 253 253 253 253 253
49909-253 253 253 250 250 250 170 170 170 26 26 26
49910- 2 2 6 2 2 6 37 26 9 163 110 8
49911-219 162 10 239 182 13 246 186 14 246 190 14
49912-246 190 14 246 190 14 246 190 14 246 190 14
49913-246 190 14 246 190 14 246 190 14 246 190 14
49914-246 190 14 246 190 14 246 190 14 246 190 14
49915-246 186 14 236 178 12 224 166 10 142 122 72
49916- 46 46 46 18 18 18 6 6 6 0 0 0
49917- 0 0 0 0 0 0 0 0 0 0 0 0
49918- 0 0 0 0 0 0 0 0 0 0 0 0
49919- 0 0 0 0 0 0 6 6 6 18 18 18
49920- 50 50 50 109 106 95 192 133 9 224 166 10
49921-242 186 14 246 190 14 246 190 14 246 190 14
49922-246 190 14 246 190 14 246 190 14 246 190 14
49923-246 190 14 246 190 14 246 190 14 246 190 14
49924-246 190 14 246 190 14 246 190 14 246 190 14
49925-242 186 14 226 184 13 210 162 10 142 110 46
49926-226 226 226 253 253 253 253 253 253 253 253 253
49927-253 253 253 253 253 253 253 253 253 253 253 253
49928-253 253 253 253 253 253 253 253 253 253 253 253
49929-198 198 198 66 66 66 2 2 6 2 2 6
49930- 2 2 6 2 2 6 50 34 6 156 107 11
49931-219 162 10 239 182 13 246 186 14 246 190 14
49932-246 190 14 246 190 14 246 190 14 246 190 14
49933-246 190 14 246 190 14 246 190 14 246 190 14
49934-246 190 14 246 190 14 246 190 14 242 186 14
49935-234 174 13 213 154 11 154 122 46 66 66 66
49936- 30 30 30 10 10 10 0 0 0 0 0 0
49937- 0 0 0 0 0 0 0 0 0 0 0 0
49938- 0 0 0 0 0 0 0 0 0 0 0 0
49939- 0 0 0 0 0 0 6 6 6 22 22 22
49940- 58 58 58 154 121 60 206 145 10 234 174 13
49941-242 186 14 246 186 14 246 190 14 246 190 14
49942-246 190 14 246 190 14 246 190 14 246 190 14
49943-246 190 14 246 190 14 246 190 14 246 190 14
49944-246 190 14 246 190 14 246 190 14 246 190 14
49945-246 186 14 236 178 12 210 162 10 163 110 8
49946- 61 42 6 138 138 138 218 218 218 250 250 250
49947-253 253 253 253 253 253 253 253 253 250 250 250
49948-242 242 242 210 210 210 144 144 144 66 66 66
49949- 6 6 6 2 2 6 2 2 6 2 2 6
49950- 2 2 6 2 2 6 61 42 6 163 110 8
49951-216 158 10 236 178 12 246 190 14 246 190 14
49952-246 190 14 246 190 14 246 190 14 246 190 14
49953-246 190 14 246 190 14 246 190 14 246 190 14
49954-246 190 14 239 182 13 230 174 11 216 158 10
49955-190 142 34 124 112 88 70 70 70 38 38 38
49956- 18 18 18 6 6 6 0 0 0 0 0 0
49957- 0 0 0 0 0 0 0 0 0 0 0 0
49958- 0 0 0 0 0 0 0 0 0 0 0 0
49959- 0 0 0 0 0 0 6 6 6 22 22 22
49960- 62 62 62 168 124 44 206 145 10 224 166 10
49961-236 178 12 239 182 13 242 186 14 242 186 14
49962-246 186 14 246 190 14 246 190 14 246 190 14
49963-246 190 14 246 190 14 246 190 14 246 190 14
49964-246 190 14 246 190 14 246 190 14 246 190 14
49965-246 190 14 236 178 12 216 158 10 175 118 6
49966- 80 54 7 2 2 6 6 6 6 30 30 30
49967- 54 54 54 62 62 62 50 50 50 38 38 38
49968- 14 14 14 2 2 6 2 2 6 2 2 6
49969- 2 2 6 2 2 6 2 2 6 2 2 6
49970- 2 2 6 6 6 6 80 54 7 167 114 7
49971-213 154 11 236 178 12 246 190 14 246 190 14
49972-246 190 14 246 190 14 246 190 14 246 190 14
49973-246 190 14 242 186 14 239 182 13 239 182 13
49974-230 174 11 210 150 10 174 135 50 124 112 88
49975- 82 82 82 54 54 54 34 34 34 18 18 18
49976- 6 6 6 0 0 0 0 0 0 0 0 0
49977- 0 0 0 0 0 0 0 0 0 0 0 0
49978- 0 0 0 0 0 0 0 0 0 0 0 0
49979- 0 0 0 0 0 0 6 6 6 18 18 18
49980- 50 50 50 158 118 36 192 133 9 200 144 11
49981-216 158 10 219 162 10 224 166 10 226 170 11
49982-230 174 11 236 178 12 239 182 13 239 182 13
49983-242 186 14 246 186 14 246 190 14 246 190 14
49984-246 190 14 246 190 14 246 190 14 246 190 14
49985-246 186 14 230 174 11 210 150 10 163 110 8
49986-104 69 6 10 10 10 2 2 6 2 2 6
49987- 2 2 6 2 2 6 2 2 6 2 2 6
49988- 2 2 6 2 2 6 2 2 6 2 2 6
49989- 2 2 6 2 2 6 2 2 6 2 2 6
49990- 2 2 6 6 6 6 91 60 6 167 114 7
49991-206 145 10 230 174 11 242 186 14 246 190 14
49992-246 190 14 246 190 14 246 186 14 242 186 14
49993-239 182 13 230 174 11 224 166 10 213 154 11
49994-180 133 36 124 112 88 86 86 86 58 58 58
49995- 38 38 38 22 22 22 10 10 10 6 6 6
49996- 0 0 0 0 0 0 0 0 0 0 0 0
49997- 0 0 0 0 0 0 0 0 0 0 0 0
49998- 0 0 0 0 0 0 0 0 0 0 0 0
49999- 0 0 0 0 0 0 0 0 0 14 14 14
50000- 34 34 34 70 70 70 138 110 50 158 118 36
50001-167 114 7 180 123 7 192 133 9 197 138 11
50002-200 144 11 206 145 10 213 154 11 219 162 10
50003-224 166 10 230 174 11 239 182 13 242 186 14
50004-246 186 14 246 186 14 246 186 14 246 186 14
50005-239 182 13 216 158 10 185 133 11 152 99 6
50006-104 69 6 18 14 6 2 2 6 2 2 6
50007- 2 2 6 2 2 6 2 2 6 2 2 6
50008- 2 2 6 2 2 6 2 2 6 2 2 6
50009- 2 2 6 2 2 6 2 2 6 2 2 6
50010- 2 2 6 6 6 6 80 54 7 152 99 6
50011-192 133 9 219 162 10 236 178 12 239 182 13
50012-246 186 14 242 186 14 239 182 13 236 178 12
50013-224 166 10 206 145 10 192 133 9 154 121 60
50014- 94 94 94 62 62 62 42 42 42 22 22 22
50015- 14 14 14 6 6 6 0 0 0 0 0 0
50016- 0 0 0 0 0 0 0 0 0 0 0 0
50017- 0 0 0 0 0 0 0 0 0 0 0 0
50018- 0 0 0 0 0 0 0 0 0 0 0 0
50019- 0 0 0 0 0 0 0 0 0 6 6 6
50020- 18 18 18 34 34 34 58 58 58 78 78 78
50021-101 98 89 124 112 88 142 110 46 156 107 11
50022-163 110 8 167 114 7 175 118 6 180 123 7
50023-185 133 11 197 138 11 210 150 10 219 162 10
50024-226 170 11 236 178 12 236 178 12 234 174 13
50025-219 162 10 197 138 11 163 110 8 130 83 6
50026- 91 60 6 10 10 10 2 2 6 2 2 6
50027- 18 18 18 38 38 38 38 38 38 38 38 38
50028- 38 38 38 38 38 38 38 38 38 38 38 38
50029- 38 38 38 38 38 38 26 26 26 2 2 6
50030- 2 2 6 6 6 6 70 47 6 137 92 6
50031-175 118 6 200 144 11 219 162 10 230 174 11
50032-234 174 13 230 174 11 219 162 10 210 150 10
50033-192 133 9 163 110 8 124 112 88 82 82 82
50034- 50 50 50 30 30 30 14 14 14 6 6 6
50035- 0 0 0 0 0 0 0 0 0 0 0 0
50036- 0 0 0 0 0 0 0 0 0 0 0 0
50037- 0 0 0 0 0 0 0 0 0 0 0 0
50038- 0 0 0 0 0 0 0 0 0 0 0 0
50039- 0 0 0 0 0 0 0 0 0 0 0 0
50040- 6 6 6 14 14 14 22 22 22 34 34 34
50041- 42 42 42 58 58 58 74 74 74 86 86 86
50042-101 98 89 122 102 70 130 98 46 121 87 25
50043-137 92 6 152 99 6 163 110 8 180 123 7
50044-185 133 11 197 138 11 206 145 10 200 144 11
50045-180 123 7 156 107 11 130 83 6 104 69 6
50046- 50 34 6 54 54 54 110 110 110 101 98 89
50047- 86 86 86 82 82 82 78 78 78 78 78 78
50048- 78 78 78 78 78 78 78 78 78 78 78 78
50049- 78 78 78 82 82 82 86 86 86 94 94 94
50050-106 106 106 101 101 101 86 66 34 124 80 6
50051-156 107 11 180 123 7 192 133 9 200 144 11
50052-206 145 10 200 144 11 192 133 9 175 118 6
50053-139 102 15 109 106 95 70 70 70 42 42 42
50054- 22 22 22 10 10 10 0 0 0 0 0 0
50055- 0 0 0 0 0 0 0 0 0 0 0 0
50056- 0 0 0 0 0 0 0 0 0 0 0 0
50057- 0 0 0 0 0 0 0 0 0 0 0 0
50058- 0 0 0 0 0 0 0 0 0 0 0 0
50059- 0 0 0 0 0 0 0 0 0 0 0 0
50060- 0 0 0 0 0 0 6 6 6 10 10 10
50061- 14 14 14 22 22 22 30 30 30 38 38 38
50062- 50 50 50 62 62 62 74 74 74 90 90 90
50063-101 98 89 112 100 78 121 87 25 124 80 6
50064-137 92 6 152 99 6 152 99 6 152 99 6
50065-138 86 6 124 80 6 98 70 6 86 66 30
50066-101 98 89 82 82 82 58 58 58 46 46 46
50067- 38 38 38 34 34 34 34 34 34 34 34 34
50068- 34 34 34 34 34 34 34 34 34 34 34 34
50069- 34 34 34 34 34 34 38 38 38 42 42 42
50070- 54 54 54 82 82 82 94 86 76 91 60 6
50071-134 86 6 156 107 11 167 114 7 175 118 6
50072-175 118 6 167 114 7 152 99 6 121 87 25
50073-101 98 89 62 62 62 34 34 34 18 18 18
50074- 6 6 6 0 0 0 0 0 0 0 0 0
50075- 0 0 0 0 0 0 0 0 0 0 0 0
50076- 0 0 0 0 0 0 0 0 0 0 0 0
50077- 0 0 0 0 0 0 0 0 0 0 0 0
50078- 0 0 0 0 0 0 0 0 0 0 0 0
50079- 0 0 0 0 0 0 0 0 0 0 0 0
50080- 0 0 0 0 0 0 0 0 0 0 0 0
50081- 0 0 0 6 6 6 6 6 6 10 10 10
50082- 18 18 18 22 22 22 30 30 30 42 42 42
50083- 50 50 50 66 66 66 86 86 86 101 98 89
50084-106 86 58 98 70 6 104 69 6 104 69 6
50085-104 69 6 91 60 6 82 62 34 90 90 90
50086- 62 62 62 38 38 38 22 22 22 14 14 14
50087- 10 10 10 10 10 10 10 10 10 10 10 10
50088- 10 10 10 10 10 10 6 6 6 10 10 10
50089- 10 10 10 10 10 10 10 10 10 14 14 14
50090- 22 22 22 42 42 42 70 70 70 89 81 66
50091- 80 54 7 104 69 6 124 80 6 137 92 6
50092-134 86 6 116 81 8 100 82 52 86 86 86
50093- 58 58 58 30 30 30 14 14 14 6 6 6
50094- 0 0 0 0 0 0 0 0 0 0 0 0
50095- 0 0 0 0 0 0 0 0 0 0 0 0
50096- 0 0 0 0 0 0 0 0 0 0 0 0
50097- 0 0 0 0 0 0 0 0 0 0 0 0
50098- 0 0 0 0 0 0 0 0 0 0 0 0
50099- 0 0 0 0 0 0 0 0 0 0 0 0
50100- 0 0 0 0 0 0 0 0 0 0 0 0
50101- 0 0 0 0 0 0 0 0 0 0 0 0
50102- 0 0 0 6 6 6 10 10 10 14 14 14
50103- 18 18 18 26 26 26 38 38 38 54 54 54
50104- 70 70 70 86 86 86 94 86 76 89 81 66
50105- 89 81 66 86 86 86 74 74 74 50 50 50
50106- 30 30 30 14 14 14 6 6 6 0 0 0
50107- 0 0 0 0 0 0 0 0 0 0 0 0
50108- 0 0 0 0 0 0 0 0 0 0 0 0
50109- 0 0 0 0 0 0 0 0 0 0 0 0
50110- 6 6 6 18 18 18 34 34 34 58 58 58
50111- 82 82 82 89 81 66 89 81 66 89 81 66
50112- 94 86 66 94 86 76 74 74 74 50 50 50
50113- 26 26 26 14 14 14 6 6 6 0 0 0
50114- 0 0 0 0 0 0 0 0 0 0 0 0
50115- 0 0 0 0 0 0 0 0 0 0 0 0
50116- 0 0 0 0 0 0 0 0 0 0 0 0
50117- 0 0 0 0 0 0 0 0 0 0 0 0
50118- 0 0 0 0 0 0 0 0 0 0 0 0
50119- 0 0 0 0 0 0 0 0 0 0 0 0
50120- 0 0 0 0 0 0 0 0 0 0 0 0
50121- 0 0 0 0 0 0 0 0 0 0 0 0
50122- 0 0 0 0 0 0 0 0 0 0 0 0
50123- 6 6 6 6 6 6 14 14 14 18 18 18
50124- 30 30 30 38 38 38 46 46 46 54 54 54
50125- 50 50 50 42 42 42 30 30 30 18 18 18
50126- 10 10 10 0 0 0 0 0 0 0 0 0
50127- 0 0 0 0 0 0 0 0 0 0 0 0
50128- 0 0 0 0 0 0 0 0 0 0 0 0
50129- 0 0 0 0 0 0 0 0 0 0 0 0
50130- 0 0 0 6 6 6 14 14 14 26 26 26
50131- 38 38 38 50 50 50 58 58 58 58 58 58
50132- 54 54 54 42 42 42 30 30 30 18 18 18
50133- 10 10 10 0 0 0 0 0 0 0 0 0
50134- 0 0 0 0 0 0 0 0 0 0 0 0
50135- 0 0 0 0 0 0 0 0 0 0 0 0
50136- 0 0 0 0 0 0 0 0 0 0 0 0
50137- 0 0 0 0 0 0 0 0 0 0 0 0
50138- 0 0 0 0 0 0 0 0 0 0 0 0
50139- 0 0 0 0 0 0 0 0 0 0 0 0
50140- 0 0 0 0 0 0 0 0 0 0 0 0
50141- 0 0 0 0 0 0 0 0 0 0 0 0
50142- 0 0 0 0 0 0 0 0 0 0 0 0
50143- 0 0 0 0 0 0 0 0 0 6 6 6
50144- 6 6 6 10 10 10 14 14 14 18 18 18
50145- 18 18 18 14 14 14 10 10 10 6 6 6
50146- 0 0 0 0 0 0 0 0 0 0 0 0
50147- 0 0 0 0 0 0 0 0 0 0 0 0
50148- 0 0 0 0 0 0 0 0 0 0 0 0
50149- 0 0 0 0 0 0 0 0 0 0 0 0
50150- 0 0 0 0 0 0 0 0 0 6 6 6
50151- 14 14 14 18 18 18 22 22 22 22 22 22
50152- 18 18 18 14 14 14 10 10 10 6 6 6
50153- 0 0 0 0 0 0 0 0 0 0 0 0
50154- 0 0 0 0 0 0 0 0 0 0 0 0
50155- 0 0 0 0 0 0 0 0 0 0 0 0
50156- 0 0 0 0 0 0 0 0 0 0 0 0
50157- 0 0 0 0 0 0 0 0 0 0 0 0
50158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50171+4 4 4 4 4 4
50172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50185+4 4 4 4 4 4
50186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50199+4 4 4 4 4 4
50200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50213+4 4 4 4 4 4
50214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50227+4 4 4 4 4 4
50228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50241+4 4 4 4 4 4
50242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50246+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
50247+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
50248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50251+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
50252+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50253+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
50254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50255+4 4 4 4 4 4
50256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50260+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
50261+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
50262+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50265+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
50266+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
50267+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
50268+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50269+4 4 4 4 4 4
50270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50274+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
50275+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
50276+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50279+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
50280+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
50281+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
50282+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
50283+4 4 4 4 4 4
50284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50287+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
50288+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
50289+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
50290+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
50291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50292+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
50293+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
50294+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
50295+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
50296+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
50297+4 4 4 4 4 4
50298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50301+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
50302+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
50303+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
50304+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
50305+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
50306+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
50307+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
50308+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
50309+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
50310+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
50311+4 4 4 4 4 4
50312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
50315+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
50316+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
50317+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
50318+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
50319+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
50320+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
50321+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
50322+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
50323+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
50324+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
50325+4 4 4 4 4 4
50326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50328+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
50329+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
50330+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
50331+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
50332+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
50333+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
50334+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
50335+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
50336+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
50337+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
50338+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
50339+4 4 4 4 4 4
50340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50342+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
50343+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
50344+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
50345+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
50346+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
50347+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
50348+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
50349+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
50350+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
50351+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
50352+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
50353+4 4 4 4 4 4
50354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50356+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
50357+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
50358+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
50359+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
50360+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
50361+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
50362+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
50363+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
50364+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
50365+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
50366+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
50367+4 4 4 4 4 4
50368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50370+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
50371+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
50372+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
50373+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
50374+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
50375+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
50376+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
50377+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
50378+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
50379+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
50380+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
50381+4 4 4 4 4 4
50382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50383+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
50384+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
50385+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
50386+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
50387+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
50388+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
50389+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
50390+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
50391+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
50392+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
50393+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
50394+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
50395+4 4 4 4 4 4
50396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50397+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
50398+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
50399+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
50400+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
50401+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
50402+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
50403+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
50404+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
50405+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
50406+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
50407+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
50408+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
50409+0 0 0 4 4 4
50410+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
50411+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
50412+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
50413+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
50414+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
50415+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
50416+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
50417+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
50418+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
50419+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
50420+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
50421+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
50422+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
50423+2 0 0 0 0 0
50424+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
50425+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
50426+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
50427+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
50428+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
50429+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
50430+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
50431+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
50432+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
50433+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
50434+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
50435+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
50436+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
50437+37 38 37 0 0 0
50438+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
50439+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
50440+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
50441+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
50442+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
50443+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
50444+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
50445+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
50446+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
50447+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
50448+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
50449+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
50450+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
50451+85 115 134 4 0 0
50452+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
50453+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
50454+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
50455+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
50456+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
50457+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
50458+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
50459+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
50460+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
50461+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
50462+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
50463+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
50464+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
50465+60 73 81 4 0 0
50466+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
50467+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
50468+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
50469+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
50470+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
50471+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
50472+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
50473+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
50474+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
50475+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
50476+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
50477+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
50478+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
50479+16 19 21 4 0 0
50480+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
50481+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
50482+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
50483+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
50484+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
50485+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
50486+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
50487+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
50488+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
50489+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
50490+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
50491+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
50492+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
50493+4 0 0 4 3 3
50494+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
50495+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
50496+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
50497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
50498+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
50499+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
50500+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
50501+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
50502+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
50503+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
50504+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
50505+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
50506+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
50507+3 2 2 4 4 4
50508+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
50509+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
50510+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
50511+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
50512+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
50513+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
50514+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
50515+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
50516+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
50517+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
50518+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
50519+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
50520+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
50521+4 4 4 4 4 4
50522+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
50523+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
50524+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
50525+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
50526+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
50527+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
50528+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
50529+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
50530+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
50531+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
50532+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
50533+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
50534+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
50535+4 4 4 4 4 4
50536+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
50537+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
50538+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
50539+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
50540+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
50541+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
50542+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
50543+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
50544+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
50545+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
50546+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
50547+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
50548+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
50549+5 5 5 5 5 5
50550+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
50551+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
50552+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
50553+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
50554+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
50555+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50556+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
50557+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
50558+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
50559+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
50560+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
50561+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
50562+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
50563+5 5 5 4 4 4
50564+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
50565+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
50566+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
50567+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
50568+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50569+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
50570+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
50571+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
50572+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
50573+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
50574+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
50575+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
50576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50577+4 4 4 4 4 4
50578+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
50579+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
50580+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
50581+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
50582+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
50583+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50584+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50585+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
50586+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
50587+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
50588+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
50589+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
50590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50591+4 4 4 4 4 4
50592+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
50593+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
50594+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
50595+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
50596+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50597+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
50598+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
50599+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
50600+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
50601+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
50602+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
50603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50605+4 4 4 4 4 4
50606+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
50607+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
50608+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
50609+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
50610+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50611+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50612+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
50613+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
50614+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
50615+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
50616+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
50617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50619+4 4 4 4 4 4
50620+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
50621+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
50622+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
50623+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
50624+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50625+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
50626+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
50627+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
50628+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
50629+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
50630+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50633+4 4 4 4 4 4
50634+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
50635+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
50636+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
50637+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
50638+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
50639+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
50640+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
50641+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
50642+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
50643+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
50644+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
50645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50647+4 4 4 4 4 4
50648+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
50649+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
50650+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
50651+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
50652+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
50653+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
50654+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
50655+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
50656+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
50657+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
50658+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
50659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50661+4 4 4 4 4 4
50662+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
50663+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
50664+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
50665+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
50666+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
50667+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
50668+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
50669+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
50670+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
50671+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
50672+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50675+4 4 4 4 4 4
50676+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
50677+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
50678+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
50679+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
50680+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50681+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
50682+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
50683+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
50684+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
50685+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
50686+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50689+4 4 4 4 4 4
50690+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
50691+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
50692+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
50693+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
50694+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50695+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
50696+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
50697+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
50698+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
50699+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
50700+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50703+4 4 4 4 4 4
50704+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
50705+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
50706+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
50707+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
50708+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50709+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
50710+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
50711+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
50712+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
50713+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50714+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50717+4 4 4 4 4 4
50718+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
50719+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
50720+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
50721+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
50722+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
50723+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
50724+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
50725+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
50726+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50727+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50728+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50731+4 4 4 4 4 4
50732+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
50733+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
50734+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
50735+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
50736+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50737+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
50738+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
50739+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
50740+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50741+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50742+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50745+4 4 4 4 4 4
50746+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
50747+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
50748+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
50749+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
50750+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
50751+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
50752+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
50753+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
50754+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50755+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50756+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50759+4 4 4 4 4 4
50760+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
50761+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
50762+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50763+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
50764+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
50765+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
50766+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
50767+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
50768+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
50769+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50770+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50773+4 4 4 4 4 4
50774+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
50775+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
50776+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
50777+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
50778+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
50779+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
50780+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
50781+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
50782+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50783+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50784+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50787+4 4 4 4 4 4
50788+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
50789+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
50790+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50791+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
50792+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
50793+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
50794+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
50795+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
50796+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
50797+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50798+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50801+4 4 4 4 4 4
50802+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
50803+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
50804+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
50805+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
50806+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
50807+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
50808+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
50809+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
50810+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50811+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50812+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50815+4 4 4 4 4 4
50816+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50817+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
50818+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
50819+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
50820+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
50821+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
50822+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
50823+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
50824+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50825+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50826+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50829+4 4 4 4 4 4
50830+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
50831+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
50832+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
50833+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
50834+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
50835+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
50836+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50837+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
50838+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
50839+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50840+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50843+4 4 4 4 4 4
50844+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50845+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
50846+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
50847+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
50848+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
50849+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
50850+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
50851+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
50852+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
50853+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50854+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50857+4 4 4 4 4 4
50858+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
50859+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
50860+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
50861+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
50862+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
50863+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
50864+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
50865+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
50866+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
50867+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50868+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50871+4 4 4 4 4 4
50872+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50873+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
50874+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
50875+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
50876+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
50877+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
50878+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
50879+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
50880+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
50881+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50882+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50885+4 4 4 4 4 4
50886+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
50887+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
50888+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
50889+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
50890+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
50891+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
50892+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
50893+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
50894+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
50895+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50896+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50899+4 4 4 4 4 4
50900+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
50901+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
50902+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
50903+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
50904+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
50905+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
50906+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
50907+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
50908+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
50909+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
50910+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50913+4 4 4 4 4 4
50914+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
50915+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
50916+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
50917+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
50918+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
50919+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
50920+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
50921+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
50922+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
50923+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
50924+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
50925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50927+4 4 4 4 4 4
50928+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
50929+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
50930+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
50931+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
50932+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
50933+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
50934+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
50935+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
50936+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
50937+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
50938+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50941+4 4 4 4 4 4
50942+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
50943+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
50944+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
50945+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
50946+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
50947+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
50948+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50949+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
50950+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
50951+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
50952+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
50953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50955+4 4 4 4 4 4
50956+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
50957+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
50958+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
50959+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
50960+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
50961+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
50962+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
50963+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
50964+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
50965+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
50966+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50969+4 4 4 4 4 4
50970+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
50971+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
50972+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
50973+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
50974+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
50975+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
50976+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
50977+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
50978+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
50979+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
50980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50983+4 4 4 4 4 4
50984+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
50985+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
50986+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
50987+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
50988+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
50989+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
50990+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
50991+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
50992+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
50993+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
50994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
50997+4 4 4 4 4 4
50998+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
50999+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
51000+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
51001+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
51002+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
51003+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
51004+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
51005+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
51006+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
51007+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
51008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51011+4 4 4 4 4 4
51012+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
51013+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
51014+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
51015+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
51016+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
51017+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
51018+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
51019+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
51020+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
51021+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51025+4 4 4 4 4 4
51026+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
51027+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
51028+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
51029+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
51030+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
51031+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
51032+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
51033+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
51034+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
51035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51039+4 4 4 4 4 4
51040+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
51041+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
51042+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
51043+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
51044+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
51045+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
51046+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
51047+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
51048+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
51049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51053+4 4 4 4 4 4
51054+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
51055+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
51056+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
51057+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
51058+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
51059+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
51060+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
51061+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
51062+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51067+4 4 4 4 4 4
51068+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
51069+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
51070+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
51071+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
51072+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
51073+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
51074+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
51075+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
51076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51081+4 4 4 4 4 4
51082+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
51083+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
51084+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
51085+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
51086+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
51087+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
51088+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
51089+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
51090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51095+4 4 4 4 4 4
51096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51097+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
51098+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51099+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
51100+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
51101+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
51102+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
51103+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
51104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51109+4 4 4 4 4 4
51110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51111+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
51112+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
51113+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
51114+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
51115+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
51116+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
51117+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
51118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51123+4 4 4 4 4 4
51124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51125+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
51126+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
51127+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
51128+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
51129+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
51130+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
51131+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51137+4 4 4 4 4 4
51138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51140+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
51141+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
51142+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
51143+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
51144+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
51145+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51151+4 4 4 4 4 4
51152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51155+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51156+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
51157+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
51158+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
51159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51165+4 4 4 4 4 4
51166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51169+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
51170+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
51171+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
51172+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
51173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51179+4 4 4 4 4 4
51180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51183+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
51184+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
51185+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
51186+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
51187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51193+4 4 4 4 4 4
51194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51197+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
51198+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
51199+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
51200+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
51201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51207+4 4 4 4 4 4
51208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51212+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
51213+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
51214+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
51215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51221+4 4 4 4 4 4
51222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51226+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
51227+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
51228+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
51229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51235+4 4 4 4 4 4
51236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51240+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
51241+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
51242+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51249+4 4 4 4 4 4
51250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51254+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
51255+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
51256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51263+4 4 4 4 4 4
51264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51268+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
51269+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
51270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51277+4 4 4 4 4 4
51278diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
51279index fe92eed..106e085 100644
51280--- a/drivers/video/mb862xx/mb862xxfb_accel.c
51281+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
51282@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
51283 struct mb862xxfb_par *par = info->par;
51284
51285 if (info->var.bits_per_pixel == 32) {
51286- info->fbops->fb_fillrect = cfb_fillrect;
51287- info->fbops->fb_copyarea = cfb_copyarea;
51288- info->fbops->fb_imageblit = cfb_imageblit;
51289+ pax_open_kernel();
51290+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
51291+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
51292+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
51293+ pax_close_kernel();
51294 } else {
51295 outreg(disp, GC_L0EM, 3);
51296- info->fbops->fb_fillrect = mb86290fb_fillrect;
51297- info->fbops->fb_copyarea = mb86290fb_copyarea;
51298- info->fbops->fb_imageblit = mb86290fb_imageblit;
51299+ pax_open_kernel();
51300+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
51301+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
51302+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
51303+ pax_close_kernel();
51304 }
51305 outreg(draw, GDC_REG_DRAW_BASE, 0);
51306 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
51307diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
51308index ff22871..b129bed 100644
51309--- a/drivers/video/nvidia/nvidia.c
51310+++ b/drivers/video/nvidia/nvidia.c
51311@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
51312 info->fix.line_length = (info->var.xres_virtual *
51313 info->var.bits_per_pixel) >> 3;
51314 if (info->var.accel_flags) {
51315- info->fbops->fb_imageblit = nvidiafb_imageblit;
51316- info->fbops->fb_fillrect = nvidiafb_fillrect;
51317- info->fbops->fb_copyarea = nvidiafb_copyarea;
51318- info->fbops->fb_sync = nvidiafb_sync;
51319+ pax_open_kernel();
51320+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
51321+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
51322+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
51323+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
51324+ pax_close_kernel();
51325 info->pixmap.scan_align = 4;
51326 info->flags &= ~FBINFO_HWACCEL_DISABLED;
51327 info->flags |= FBINFO_READS_FAST;
51328 NVResetGraphics(info);
51329 } else {
51330- info->fbops->fb_imageblit = cfb_imageblit;
51331- info->fbops->fb_fillrect = cfb_fillrect;
51332- info->fbops->fb_copyarea = cfb_copyarea;
51333- info->fbops->fb_sync = NULL;
51334+ pax_open_kernel();
51335+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
51336+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
51337+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
51338+ *(void **)&info->fbops->fb_sync = NULL;
51339+ pax_close_kernel();
51340 info->pixmap.scan_align = 1;
51341 info->flags |= FBINFO_HWACCEL_DISABLED;
51342 info->flags &= ~FBINFO_READS_FAST;
51343@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
51344 info->pixmap.size = 8 * 1024;
51345 info->pixmap.flags = FB_PIXMAP_SYSTEM;
51346
51347- if (!hwcur)
51348- info->fbops->fb_cursor = NULL;
51349+ if (!hwcur) {
51350+ pax_open_kernel();
51351+ *(void **)&info->fbops->fb_cursor = NULL;
51352+ pax_close_kernel();
51353+ }
51354
51355 info->var.accel_flags = (!noaccel);
51356
51357diff --git a/drivers/video/output.c b/drivers/video/output.c
51358index 0d6f2cd..6285b97 100644
51359--- a/drivers/video/output.c
51360+++ b/drivers/video/output.c
51361@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
51362 new_dev->props = op;
51363 new_dev->dev.class = &video_output_class;
51364 new_dev->dev.parent = dev;
51365- dev_set_name(&new_dev->dev, name);
51366+ dev_set_name(&new_dev->dev, "%s", name);
51367 dev_set_drvdata(&new_dev->dev, devdata);
51368 ret_code = device_register(&new_dev->dev);
51369 if (ret_code) {
51370diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
51371index 05c2dc3..ea1f391 100644
51372--- a/drivers/video/s1d13xxxfb.c
51373+++ b/drivers/video/s1d13xxxfb.c
51374@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
51375
51376 switch(prod_id) {
51377 case S1D13506_PROD_ID: /* activate acceleration */
51378- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
51379- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
51380+ pax_open_kernel();
51381+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
51382+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
51383+ pax_close_kernel();
51384 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
51385 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
51386 break;
51387diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
51388index b2b33fc..f9f4658 100644
51389--- a/drivers/video/smscufx.c
51390+++ b/drivers/video/smscufx.c
51391@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
51392 fb_deferred_io_cleanup(info);
51393 kfree(info->fbdefio);
51394 info->fbdefio = NULL;
51395- info->fbops->fb_mmap = ufx_ops_mmap;
51396+ pax_open_kernel();
51397+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
51398+ pax_close_kernel();
51399 }
51400
51401 pr_debug("released /dev/fb%d user=%d count=%d",
51402diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
51403index ec03e72..f578436 100644
51404--- a/drivers/video/udlfb.c
51405+++ b/drivers/video/udlfb.c
51406@@ -623,11 +623,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
51407 dlfb_urb_completion(urb);
51408
51409 error:
51410- atomic_add(bytes_sent, &dev->bytes_sent);
51411- atomic_add(bytes_identical, &dev->bytes_identical);
51412- atomic_add(width*height*2, &dev->bytes_rendered);
51413+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
51414+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
51415+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
51416 end_cycles = get_cycles();
51417- atomic_add(((unsigned int) ((end_cycles - start_cycles)
51418+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
51419 >> 10)), /* Kcycles */
51420 &dev->cpu_kcycles_used);
51421
51422@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
51423 dlfb_urb_completion(urb);
51424
51425 error:
51426- atomic_add(bytes_sent, &dev->bytes_sent);
51427- atomic_add(bytes_identical, &dev->bytes_identical);
51428- atomic_add(bytes_rendered, &dev->bytes_rendered);
51429+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
51430+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
51431+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
51432 end_cycles = get_cycles();
51433- atomic_add(((unsigned int) ((end_cycles - start_cycles)
51434+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
51435 >> 10)), /* Kcycles */
51436 &dev->cpu_kcycles_used);
51437 }
51438@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
51439 fb_deferred_io_cleanup(info);
51440 kfree(info->fbdefio);
51441 info->fbdefio = NULL;
51442- info->fbops->fb_mmap = dlfb_ops_mmap;
51443+ pax_open_kernel();
51444+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
51445+ pax_close_kernel();
51446 }
51447
51448 pr_warn("released /dev/fb%d user=%d count=%d\n",
51449@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
51450 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51451 struct dlfb_data *dev = fb_info->par;
51452 return snprintf(buf, PAGE_SIZE, "%u\n",
51453- atomic_read(&dev->bytes_rendered));
51454+ atomic_read_unchecked(&dev->bytes_rendered));
51455 }
51456
51457 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
51458@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
51459 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51460 struct dlfb_data *dev = fb_info->par;
51461 return snprintf(buf, PAGE_SIZE, "%u\n",
51462- atomic_read(&dev->bytes_identical));
51463+ atomic_read_unchecked(&dev->bytes_identical));
51464 }
51465
51466 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
51467@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
51468 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51469 struct dlfb_data *dev = fb_info->par;
51470 return snprintf(buf, PAGE_SIZE, "%u\n",
51471- atomic_read(&dev->bytes_sent));
51472+ atomic_read_unchecked(&dev->bytes_sent));
51473 }
51474
51475 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
51476@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
51477 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51478 struct dlfb_data *dev = fb_info->par;
51479 return snprintf(buf, PAGE_SIZE, "%u\n",
51480- atomic_read(&dev->cpu_kcycles_used));
51481+ atomic_read_unchecked(&dev->cpu_kcycles_used));
51482 }
51483
51484 static ssize_t edid_show(
51485@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
51486 struct fb_info *fb_info = dev_get_drvdata(fbdev);
51487 struct dlfb_data *dev = fb_info->par;
51488
51489- atomic_set(&dev->bytes_rendered, 0);
51490- atomic_set(&dev->bytes_identical, 0);
51491- atomic_set(&dev->bytes_sent, 0);
51492- atomic_set(&dev->cpu_kcycles_used, 0);
51493+ atomic_set_unchecked(&dev->bytes_rendered, 0);
51494+ atomic_set_unchecked(&dev->bytes_identical, 0);
51495+ atomic_set_unchecked(&dev->bytes_sent, 0);
51496+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
51497
51498 return count;
51499 }
51500diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
51501index e328a61..1b08ecb 100644
51502--- a/drivers/video/uvesafb.c
51503+++ b/drivers/video/uvesafb.c
51504@@ -19,6 +19,7 @@
51505 #include <linux/io.h>
51506 #include <linux/mutex.h>
51507 #include <linux/slab.h>
51508+#include <linux/moduleloader.h>
51509 #include <video/edid.h>
51510 #include <video/uvesafb.h>
51511 #ifdef CONFIG_X86
51512@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
51513 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
51514 par->pmi_setpal = par->ypan = 0;
51515 } else {
51516+
51517+#ifdef CONFIG_PAX_KERNEXEC
51518+#ifdef CONFIG_MODULES
51519+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
51520+#endif
51521+ if (!par->pmi_code) {
51522+ par->pmi_setpal = par->ypan = 0;
51523+ return 0;
51524+ }
51525+#endif
51526+
51527 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
51528 + task->t.regs.edi);
51529+
51530+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51531+ pax_open_kernel();
51532+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
51533+ pax_close_kernel();
51534+
51535+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
51536+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
51537+#else
51538 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
51539 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
51540+#endif
51541+
51542 printk(KERN_INFO "uvesafb: protected mode interface info at "
51543 "%04x:%04x\n",
51544 (u16)task->t.regs.es, (u16)task->t.regs.edi);
51545@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
51546 par->ypan = ypan;
51547
51548 if (par->pmi_setpal || par->ypan) {
51549+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
51550 if (__supported_pte_mask & _PAGE_NX) {
51551 par->pmi_setpal = par->ypan = 0;
51552 printk(KERN_WARNING "uvesafb: NX protection is actively."
51553 "We have better not to use the PMI.\n");
51554- } else {
51555+ } else
51556+#endif
51557 uvesafb_vbe_getpmi(task, par);
51558- }
51559 }
51560 #else
51561 /* The protected mode interface is not available on non-x86. */
51562@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
51563 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
51564
51565 /* Disable blanking if the user requested so. */
51566- if (!blank)
51567- info->fbops->fb_blank = NULL;
51568+ if (!blank) {
51569+ pax_open_kernel();
51570+ *(void **)&info->fbops->fb_blank = NULL;
51571+ pax_close_kernel();
51572+ }
51573
51574 /*
51575 * Find out how much IO memory is required for the mode with
51576@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
51577 info->flags = FBINFO_FLAG_DEFAULT |
51578 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
51579
51580- if (!par->ypan)
51581- info->fbops->fb_pan_display = NULL;
51582+ if (!par->ypan) {
51583+ pax_open_kernel();
51584+ *(void **)&info->fbops->fb_pan_display = NULL;
51585+ pax_close_kernel();
51586+ }
51587 }
51588
51589 static void uvesafb_init_mtrr(struct fb_info *info)
51590@@ -1836,6 +1866,11 @@ out:
51591 if (par->vbe_modes)
51592 kfree(par->vbe_modes);
51593
51594+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51595+ if (par->pmi_code)
51596+ module_free_exec(NULL, par->pmi_code);
51597+#endif
51598+
51599 framebuffer_release(info);
51600 return err;
51601 }
51602@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
51603 kfree(par->vbe_state_orig);
51604 if (par->vbe_state_saved)
51605 kfree(par->vbe_state_saved);
51606+
51607+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51608+ if (par->pmi_code)
51609+ module_free_exec(NULL, par->pmi_code);
51610+#endif
51611+
51612 }
51613
51614 framebuffer_release(info);
51615diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
51616index 501b340..d80aa17 100644
51617--- a/drivers/video/vesafb.c
51618+++ b/drivers/video/vesafb.c
51619@@ -9,6 +9,7 @@
51620 */
51621
51622 #include <linux/module.h>
51623+#include <linux/moduleloader.h>
51624 #include <linux/kernel.h>
51625 #include <linux/errno.h>
51626 #include <linux/string.h>
51627@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
51628 static int vram_total __initdata; /* Set total amount of memory */
51629 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
51630 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
51631-static void (*pmi_start)(void) __read_mostly;
51632-static void (*pmi_pal) (void) __read_mostly;
51633+static void (*pmi_start)(void) __read_only;
51634+static void (*pmi_pal) (void) __read_only;
51635 static int depth __read_mostly;
51636 static int vga_compat __read_mostly;
51637 /* --------------------------------------------------------------------- */
51638@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
51639 unsigned int size_vmode;
51640 unsigned int size_remap;
51641 unsigned int size_total;
51642+ void *pmi_code = NULL;
51643
51644 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
51645 return -ENODEV;
51646@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
51647 size_remap = size_total;
51648 vesafb_fix.smem_len = size_remap;
51649
51650-#ifndef __i386__
51651- screen_info.vesapm_seg = 0;
51652-#endif
51653-
51654 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
51655 printk(KERN_WARNING
51656 "vesafb: cannot reserve video memory at 0x%lx\n",
51657@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
51658 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
51659 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
51660
51661+#ifdef __i386__
51662+
51663+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51664+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
51665+ if (!pmi_code)
51666+#elif !defined(CONFIG_PAX_KERNEXEC)
51667+ if (0)
51668+#endif
51669+
51670+#endif
51671+ screen_info.vesapm_seg = 0;
51672+
51673 if (screen_info.vesapm_seg) {
51674- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
51675- screen_info.vesapm_seg,screen_info.vesapm_off);
51676+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
51677+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
51678 }
51679
51680 if (screen_info.vesapm_seg < 0xc000)
51681@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
51682
51683 if (ypan || pmi_setpal) {
51684 unsigned short *pmi_base;
51685+
51686 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
51687- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
51688- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
51689+
51690+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51691+ pax_open_kernel();
51692+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
51693+#else
51694+ pmi_code = pmi_base;
51695+#endif
51696+
51697+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
51698+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
51699+
51700+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51701+ pmi_start = ktva_ktla(pmi_start);
51702+ pmi_pal = ktva_ktla(pmi_pal);
51703+ pax_close_kernel();
51704+#endif
51705+
51706 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
51707 if (pmi_base[3]) {
51708 printk(KERN_INFO "vesafb: pmi: ports = ");
51709@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
51710 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
51711 (ypan ? FBINFO_HWACCEL_YPAN : 0);
51712
51713- if (!ypan)
51714- info->fbops->fb_pan_display = NULL;
51715+ if (!ypan) {
51716+ pax_open_kernel();
51717+ *(void **)&info->fbops->fb_pan_display = NULL;
51718+ pax_close_kernel();
51719+ }
51720
51721 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
51722 err = -ENOMEM;
51723@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
51724 info->node, info->fix.id);
51725 return 0;
51726 err:
51727+
51728+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
51729+ module_free_exec(NULL, pmi_code);
51730+#endif
51731+
51732 if (info->screen_base)
51733 iounmap(info->screen_base);
51734 framebuffer_release(info);
51735diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
51736index 88714ae..16c2e11 100644
51737--- a/drivers/video/via/via_clock.h
51738+++ b/drivers/video/via/via_clock.h
51739@@ -56,7 +56,7 @@ struct via_clock {
51740
51741 void (*set_engine_pll_state)(u8 state);
51742 void (*set_engine_pll)(struct via_pll_config config);
51743-};
51744+} __no_const;
51745
51746
51747 static inline u32 get_pll_internal_frequency(u32 ref_freq,
51748diff --git a/drivers/xen/events.c b/drivers/xen/events.c
51749index 6a6bbe4..c733886 100644
51750--- a/drivers/xen/events.c
51751+++ b/drivers/xen/events.c
51752@@ -346,7 +346,7 @@ static void init_evtchn_cpu_bindings(void)
51753
51754 for_each_possible_cpu(i)
51755 memset(per_cpu(cpu_evtchn_mask, i),
51756- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
51757+ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
51758 }
51759
51760 static inline void clear_evtchn(int port)
51761diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
51762index fef20db..d28b1ab 100644
51763--- a/drivers/xen/xenfs/xenstored.c
51764+++ b/drivers/xen/xenfs/xenstored.c
51765@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
51766 static int xsd_kva_open(struct inode *inode, struct file *file)
51767 {
51768 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
51769+#ifdef CONFIG_GRKERNSEC_HIDESYM
51770+ NULL);
51771+#else
51772 xen_store_interface);
51773+#endif
51774+
51775 if (!file->private_data)
51776 return -ENOMEM;
51777 return 0;
51778diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
51779index 055562c..fdfb10d 100644
51780--- a/fs/9p/vfs_addr.c
51781+++ b/fs/9p/vfs_addr.c
51782@@ -186,7 +186,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
51783
51784 retval = v9fs_file_write_internal(inode,
51785 v9inode->writeback_fid,
51786- (__force const char __user *)buffer,
51787+ (const char __force_user *)buffer,
51788 len, &offset, 0);
51789 if (retval > 0)
51790 retval = 0;
51791diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
51792index d86edc8..40ff2fb 100644
51793--- a/fs/9p/vfs_inode.c
51794+++ b/fs/9p/vfs_inode.c
51795@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
51796 void
51797 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
51798 {
51799- char *s = nd_get_link(nd);
51800+ const char *s = nd_get_link(nd);
51801
51802 p9_debug(P9_DEBUG_VFS, " %s %s\n",
51803 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
51804diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
51805index 370b24c..ff0be7b 100644
51806--- a/fs/Kconfig.binfmt
51807+++ b/fs/Kconfig.binfmt
51808@@ -103,7 +103,7 @@ config HAVE_AOUT
51809
51810 config BINFMT_AOUT
51811 tristate "Kernel support for a.out and ECOFF binaries"
51812- depends on HAVE_AOUT
51813+ depends on HAVE_AOUT && BROKEN
51814 ---help---
51815 A.out (Assembler.OUTput) is a set of formats for libraries and
51816 executables used in the earliest versions of UNIX. Linux used
51817diff --git a/fs/aio.c b/fs/aio.c
51818index 2bbcacf..8614116 100644
51819--- a/fs/aio.c
51820+++ b/fs/aio.c
51821@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
51822 size += sizeof(struct io_event) * nr_events;
51823 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
51824
51825- if (nr_pages < 0)
51826+ if (nr_pages <= 0)
51827 return -EINVAL;
51828
51829 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
51830@@ -950,6 +950,7 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
51831 static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
51832 {
51833 ssize_t ret;
51834+ struct iovec iovstack;
51835
51836 kiocb->ki_nr_segs = kiocb->ki_nbytes;
51837
51838@@ -957,17 +958,22 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
51839 if (compat)
51840 ret = compat_rw_copy_check_uvector(rw,
51841 (struct compat_iovec __user *)kiocb->ki_buf,
51842- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
51843+ kiocb->ki_nr_segs, 1, &iovstack,
51844 &kiocb->ki_iovec);
51845 else
51846 #endif
51847 ret = rw_copy_check_uvector(rw,
51848 (struct iovec __user *)kiocb->ki_buf,
51849- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
51850+ kiocb->ki_nr_segs, 1, &iovstack,
51851 &kiocb->ki_iovec);
51852 if (ret < 0)
51853 return ret;
51854
51855+ if (kiocb->ki_iovec == &iovstack) {
51856+ kiocb->ki_inline_vec = iovstack;
51857+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
51858+ }
51859+
51860 /* ki_nbytes now reflect bytes instead of segs */
51861 kiocb->ki_nbytes = ret;
51862 return 0;
51863diff --git a/fs/attr.c b/fs/attr.c
51864index 1449adb..a2038c2 100644
51865--- a/fs/attr.c
51866+++ b/fs/attr.c
51867@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
51868 unsigned long limit;
51869
51870 limit = rlimit(RLIMIT_FSIZE);
51871+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
51872 if (limit != RLIM_INFINITY && offset > limit)
51873 goto out_sig;
51874 if (offset > inode->i_sb->s_maxbytes)
51875diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
51876index 3db70da..7aeec5b 100644
51877--- a/fs/autofs4/waitq.c
51878+++ b/fs/autofs4/waitq.c
51879@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
51880 {
51881 unsigned long sigpipe, flags;
51882 mm_segment_t fs;
51883- const char *data = (const char *)addr;
51884+ const char __user *data = (const char __force_user *)addr;
51885 ssize_t wr = 0;
51886
51887 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
51888@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
51889 return 1;
51890 }
51891
51892+#ifdef CONFIG_GRKERNSEC_HIDESYM
51893+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
51894+#endif
51895+
51896 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
51897 enum autofs_notify notify)
51898 {
51899@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
51900
51901 /* If this is a direct mount request create a dummy name */
51902 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
51903+#ifdef CONFIG_GRKERNSEC_HIDESYM
51904+ /* this name does get written to userland via autofs4_write() */
51905+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
51906+#else
51907 qstr.len = sprintf(name, "%p", dentry);
51908+#endif
51909 else {
51910 qstr.len = autofs4_getpath(sbi, dentry, &name);
51911 if (!qstr.len) {
51912diff --git a/fs/befs/endian.h b/fs/befs/endian.h
51913index 2722387..c8dd2a7 100644
51914--- a/fs/befs/endian.h
51915+++ b/fs/befs/endian.h
51916@@ -11,7 +11,7 @@
51917
51918 #include <asm/byteorder.h>
51919
51920-static inline u64
51921+static inline u64 __intentional_overflow(-1)
51922 fs64_to_cpu(const struct super_block *sb, fs64 n)
51923 {
51924 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
51925@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
51926 return (__force fs64)cpu_to_be64(n);
51927 }
51928
51929-static inline u32
51930+static inline u32 __intentional_overflow(-1)
51931 fs32_to_cpu(const struct super_block *sb, fs32 n)
51932 {
51933 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
51934diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
51935index f95dddc..b1e2c1c 100644
51936--- a/fs/befs/linuxvfs.c
51937+++ b/fs/befs/linuxvfs.c
51938@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
51939 {
51940 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
51941 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
51942- char *link = nd_get_link(nd);
51943+ const char *link = nd_get_link(nd);
51944 if (!IS_ERR(link))
51945 kfree(link);
51946 }
51947diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
51948index bce8769..7fc7544 100644
51949--- a/fs/binfmt_aout.c
51950+++ b/fs/binfmt_aout.c
51951@@ -16,6 +16,7 @@
51952 #include <linux/string.h>
51953 #include <linux/fs.h>
51954 #include <linux/file.h>
51955+#include <linux/security.h>
51956 #include <linux/stat.h>
51957 #include <linux/fcntl.h>
51958 #include <linux/ptrace.h>
51959@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
51960 #endif
51961 # define START_STACK(u) ((void __user *)u.start_stack)
51962
51963+ memset(&dump, 0, sizeof(dump));
51964+
51965 fs = get_fs();
51966 set_fs(KERNEL_DS);
51967 has_dumped = 1;
51968@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
51969
51970 /* If the size of the dump file exceeds the rlimit, then see what would happen
51971 if we wrote the stack, but not the data area. */
51972+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
51973 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
51974 dump.u_dsize = 0;
51975
51976 /* Make sure we have enough room to write the stack and data areas. */
51977+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
51978 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
51979 dump.u_ssize = 0;
51980
51981@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
51982 rlim = rlimit(RLIMIT_DATA);
51983 if (rlim >= RLIM_INFINITY)
51984 rlim = ~0;
51985+
51986+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
51987 if (ex.a_data + ex.a_bss > rlim)
51988 return -ENOMEM;
51989
51990@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
51991
51992 install_exec_creds(bprm);
51993
51994+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
51995+ current->mm->pax_flags = 0UL;
51996+#endif
51997+
51998+#ifdef CONFIG_PAX_PAGEEXEC
51999+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
52000+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
52001+
52002+#ifdef CONFIG_PAX_EMUTRAMP
52003+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
52004+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
52005+#endif
52006+
52007+#ifdef CONFIG_PAX_MPROTECT
52008+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
52009+ current->mm->pax_flags |= MF_PAX_MPROTECT;
52010+#endif
52011+
52012+ }
52013+#endif
52014+
52015 if (N_MAGIC(ex) == OMAGIC) {
52016 unsigned long text_addr, map_size;
52017 loff_t pos;
52018@@ -324,7 +352,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
52019 }
52020
52021 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
52022- PROT_READ | PROT_WRITE | PROT_EXEC,
52023+ PROT_READ | PROT_WRITE,
52024 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
52025 fd_offset + ex.a_text);
52026 if (error != N_DATADDR(ex)) {
52027diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
52028index f8a0b0e..6f036ed 100644
52029--- a/fs/binfmt_elf.c
52030+++ b/fs/binfmt_elf.c
52031@@ -34,6 +34,7 @@
52032 #include <linux/utsname.h>
52033 #include <linux/coredump.h>
52034 #include <linux/sched.h>
52035+#include <linux/xattr.h>
52036 #include <asm/uaccess.h>
52037 #include <asm/param.h>
52038 #include <asm/page.h>
52039@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
52040 #define elf_core_dump NULL
52041 #endif
52042
52043+#ifdef CONFIG_PAX_MPROTECT
52044+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
52045+#endif
52046+
52047+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52048+static void elf_handle_mmap(struct file *file);
52049+#endif
52050+
52051 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
52052 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
52053 #else
52054@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
52055 .load_binary = load_elf_binary,
52056 .load_shlib = load_elf_library,
52057 .core_dump = elf_core_dump,
52058+
52059+#ifdef CONFIG_PAX_MPROTECT
52060+ .handle_mprotect= elf_handle_mprotect,
52061+#endif
52062+
52063+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52064+ .handle_mmap = elf_handle_mmap,
52065+#endif
52066+
52067 .min_coredump = ELF_EXEC_PAGESIZE,
52068 };
52069
52070@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
52071
52072 static int set_brk(unsigned long start, unsigned long end)
52073 {
52074+ unsigned long e = end;
52075+
52076 start = ELF_PAGEALIGN(start);
52077 end = ELF_PAGEALIGN(end);
52078 if (end > start) {
52079@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
52080 if (BAD_ADDR(addr))
52081 return addr;
52082 }
52083- current->mm->start_brk = current->mm->brk = end;
52084+ current->mm->start_brk = current->mm->brk = e;
52085 return 0;
52086 }
52087
52088@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52089 elf_addr_t __user *u_rand_bytes;
52090 const char *k_platform = ELF_PLATFORM;
52091 const char *k_base_platform = ELF_BASE_PLATFORM;
52092- unsigned char k_rand_bytes[16];
52093+ u32 k_rand_bytes[4];
52094 int items;
52095 elf_addr_t *elf_info;
52096 int ei_index = 0;
52097 const struct cred *cred = current_cred();
52098 struct vm_area_struct *vma;
52099+ unsigned long saved_auxv[AT_VECTOR_SIZE];
52100
52101 /*
52102 * In some cases (e.g. Hyper-Threading), we want to avoid L1
52103@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52104 * Generate 16 random bytes for userspace PRNG seeding.
52105 */
52106 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
52107- u_rand_bytes = (elf_addr_t __user *)
52108- STACK_ALLOC(p, sizeof(k_rand_bytes));
52109+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
52110+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
52111+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
52112+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
52113+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
52114+ u_rand_bytes = (elf_addr_t __user *) p;
52115 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
52116 return -EFAULT;
52117
52118@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
52119 return -EFAULT;
52120 current->mm->env_end = p;
52121
52122+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
52123+
52124 /* Put the elf_info on the stack in the right place. */
52125 sp = (elf_addr_t __user *)envp + 1;
52126- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
52127+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
52128 return -EFAULT;
52129 return 0;
52130 }
52131@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
52132 an ELF header */
52133
52134 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52135- struct file *interpreter, unsigned long *interp_map_addr,
52136- unsigned long no_base)
52137+ struct file *interpreter, unsigned long no_base)
52138 {
52139 struct elf_phdr *elf_phdata;
52140 struct elf_phdr *eppnt;
52141- unsigned long load_addr = 0;
52142+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
52143 int load_addr_set = 0;
52144 unsigned long last_bss = 0, elf_bss = 0;
52145- unsigned long error = ~0UL;
52146+ unsigned long error = -EINVAL;
52147 unsigned long total_size;
52148 int retval, i, size;
52149
52150@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52151 goto out_close;
52152 }
52153
52154+#ifdef CONFIG_PAX_SEGMEXEC
52155+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
52156+ pax_task_size = SEGMEXEC_TASK_SIZE;
52157+#endif
52158+
52159 eppnt = elf_phdata;
52160 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
52161 if (eppnt->p_type == PT_LOAD) {
52162@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52163 map_addr = elf_map(interpreter, load_addr + vaddr,
52164 eppnt, elf_prot, elf_type, total_size);
52165 total_size = 0;
52166- if (!*interp_map_addr)
52167- *interp_map_addr = map_addr;
52168 error = map_addr;
52169 if (BAD_ADDR(map_addr))
52170 goto out_close;
52171@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
52172 k = load_addr + eppnt->p_vaddr;
52173 if (BAD_ADDR(k) ||
52174 eppnt->p_filesz > eppnt->p_memsz ||
52175- eppnt->p_memsz > TASK_SIZE ||
52176- TASK_SIZE - eppnt->p_memsz < k) {
52177+ eppnt->p_memsz > pax_task_size ||
52178+ pax_task_size - eppnt->p_memsz < k) {
52179 error = -ENOMEM;
52180 goto out_close;
52181 }
52182@@ -538,6 +567,315 @@ out:
52183 return error;
52184 }
52185
52186+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52187+#ifdef CONFIG_PAX_SOFTMODE
52188+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
52189+{
52190+ unsigned long pax_flags = 0UL;
52191+
52192+#ifdef CONFIG_PAX_PAGEEXEC
52193+ if (elf_phdata->p_flags & PF_PAGEEXEC)
52194+ pax_flags |= MF_PAX_PAGEEXEC;
52195+#endif
52196+
52197+#ifdef CONFIG_PAX_SEGMEXEC
52198+ if (elf_phdata->p_flags & PF_SEGMEXEC)
52199+ pax_flags |= MF_PAX_SEGMEXEC;
52200+#endif
52201+
52202+#ifdef CONFIG_PAX_EMUTRAMP
52203+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
52204+ pax_flags |= MF_PAX_EMUTRAMP;
52205+#endif
52206+
52207+#ifdef CONFIG_PAX_MPROTECT
52208+ if (elf_phdata->p_flags & PF_MPROTECT)
52209+ pax_flags |= MF_PAX_MPROTECT;
52210+#endif
52211+
52212+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52213+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
52214+ pax_flags |= MF_PAX_RANDMMAP;
52215+#endif
52216+
52217+ return pax_flags;
52218+}
52219+#endif
52220+
52221+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
52222+{
52223+ unsigned long pax_flags = 0UL;
52224+
52225+#ifdef CONFIG_PAX_PAGEEXEC
52226+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
52227+ pax_flags |= MF_PAX_PAGEEXEC;
52228+#endif
52229+
52230+#ifdef CONFIG_PAX_SEGMEXEC
52231+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
52232+ pax_flags |= MF_PAX_SEGMEXEC;
52233+#endif
52234+
52235+#ifdef CONFIG_PAX_EMUTRAMP
52236+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
52237+ pax_flags |= MF_PAX_EMUTRAMP;
52238+#endif
52239+
52240+#ifdef CONFIG_PAX_MPROTECT
52241+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
52242+ pax_flags |= MF_PAX_MPROTECT;
52243+#endif
52244+
52245+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52246+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
52247+ pax_flags |= MF_PAX_RANDMMAP;
52248+#endif
52249+
52250+ return pax_flags;
52251+}
52252+#endif
52253+
52254+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
52255+#ifdef CONFIG_PAX_SOFTMODE
52256+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
52257+{
52258+ unsigned long pax_flags = 0UL;
52259+
52260+#ifdef CONFIG_PAX_PAGEEXEC
52261+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
52262+ pax_flags |= MF_PAX_PAGEEXEC;
52263+#endif
52264+
52265+#ifdef CONFIG_PAX_SEGMEXEC
52266+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
52267+ pax_flags |= MF_PAX_SEGMEXEC;
52268+#endif
52269+
52270+#ifdef CONFIG_PAX_EMUTRAMP
52271+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
52272+ pax_flags |= MF_PAX_EMUTRAMP;
52273+#endif
52274+
52275+#ifdef CONFIG_PAX_MPROTECT
52276+ if (pax_flags_softmode & MF_PAX_MPROTECT)
52277+ pax_flags |= MF_PAX_MPROTECT;
52278+#endif
52279+
52280+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52281+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
52282+ pax_flags |= MF_PAX_RANDMMAP;
52283+#endif
52284+
52285+ return pax_flags;
52286+}
52287+#endif
52288+
52289+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
52290+{
52291+ unsigned long pax_flags = 0UL;
52292+
52293+#ifdef CONFIG_PAX_PAGEEXEC
52294+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
52295+ pax_flags |= MF_PAX_PAGEEXEC;
52296+#endif
52297+
52298+#ifdef CONFIG_PAX_SEGMEXEC
52299+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
52300+ pax_flags |= MF_PAX_SEGMEXEC;
52301+#endif
52302+
52303+#ifdef CONFIG_PAX_EMUTRAMP
52304+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
52305+ pax_flags |= MF_PAX_EMUTRAMP;
52306+#endif
52307+
52308+#ifdef CONFIG_PAX_MPROTECT
52309+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
52310+ pax_flags |= MF_PAX_MPROTECT;
52311+#endif
52312+
52313+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
52314+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
52315+ pax_flags |= MF_PAX_RANDMMAP;
52316+#endif
52317+
52318+ return pax_flags;
52319+}
52320+#endif
52321+
52322+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52323+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
52324+{
52325+ unsigned long pax_flags = 0UL;
52326+
52327+#ifdef CONFIG_PAX_EI_PAX
52328+
52329+#ifdef CONFIG_PAX_PAGEEXEC
52330+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
52331+ pax_flags |= MF_PAX_PAGEEXEC;
52332+#endif
52333+
52334+#ifdef CONFIG_PAX_SEGMEXEC
52335+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
52336+ pax_flags |= MF_PAX_SEGMEXEC;
52337+#endif
52338+
52339+#ifdef CONFIG_PAX_EMUTRAMP
52340+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
52341+ pax_flags |= MF_PAX_EMUTRAMP;
52342+#endif
52343+
52344+#ifdef CONFIG_PAX_MPROTECT
52345+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
52346+ pax_flags |= MF_PAX_MPROTECT;
52347+#endif
52348+
52349+#ifdef CONFIG_PAX_ASLR
52350+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
52351+ pax_flags |= MF_PAX_RANDMMAP;
52352+#endif
52353+
52354+#else
52355+
52356+#ifdef CONFIG_PAX_PAGEEXEC
52357+ pax_flags |= MF_PAX_PAGEEXEC;
52358+#endif
52359+
52360+#ifdef CONFIG_PAX_SEGMEXEC
52361+ pax_flags |= MF_PAX_SEGMEXEC;
52362+#endif
52363+
52364+#ifdef CONFIG_PAX_MPROTECT
52365+ pax_flags |= MF_PAX_MPROTECT;
52366+#endif
52367+
52368+#ifdef CONFIG_PAX_RANDMMAP
52369+ if (randomize_va_space)
52370+ pax_flags |= MF_PAX_RANDMMAP;
52371+#endif
52372+
52373+#endif
52374+
52375+ return pax_flags;
52376+}
52377+
52378+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
52379+{
52380+
52381+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52382+ unsigned long i;
52383+
52384+ for (i = 0UL; i < elf_ex->e_phnum; i++)
52385+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
52386+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
52387+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
52388+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
52389+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
52390+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
52391+ return ~0UL;
52392+
52393+#ifdef CONFIG_PAX_SOFTMODE
52394+ if (pax_softmode)
52395+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
52396+ else
52397+#endif
52398+
52399+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
52400+ break;
52401+ }
52402+#endif
52403+
52404+ return ~0UL;
52405+}
52406+
52407+static unsigned long pax_parse_xattr_pax(struct file * const file)
52408+{
52409+
52410+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
52411+ ssize_t xattr_size, i;
52412+ unsigned char xattr_value[sizeof("pemrs") - 1];
52413+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
52414+
52415+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
52416+ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
52417+ return ~0UL;
52418+
52419+ for (i = 0; i < xattr_size; i++)
52420+ switch (xattr_value[i]) {
52421+ default:
52422+ return ~0UL;
52423+
52424+#define parse_flag(option1, option2, flag) \
52425+ case option1: \
52426+ if (pax_flags_hardmode & MF_PAX_##flag) \
52427+ return ~0UL; \
52428+ pax_flags_hardmode |= MF_PAX_##flag; \
52429+ break; \
52430+ case option2: \
52431+ if (pax_flags_softmode & MF_PAX_##flag) \
52432+ return ~0UL; \
52433+ pax_flags_softmode |= MF_PAX_##flag; \
52434+ break;
52435+
52436+ parse_flag('p', 'P', PAGEEXEC);
52437+ parse_flag('e', 'E', EMUTRAMP);
52438+ parse_flag('m', 'M', MPROTECT);
52439+ parse_flag('r', 'R', RANDMMAP);
52440+ parse_flag('s', 'S', SEGMEXEC);
52441+
52442+#undef parse_flag
52443+ }
52444+
52445+ if (pax_flags_hardmode & pax_flags_softmode)
52446+ return ~0UL;
52447+
52448+#ifdef CONFIG_PAX_SOFTMODE
52449+ if (pax_softmode)
52450+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
52451+ else
52452+#endif
52453+
52454+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
52455+#else
52456+ return ~0UL;
52457+#endif
52458+
52459+}
52460+
52461+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
52462+{
52463+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
52464+
52465+ pax_flags = pax_parse_ei_pax(elf_ex);
52466+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
52467+ xattr_pax_flags = pax_parse_xattr_pax(file);
52468+
52469+ if (pt_pax_flags == ~0UL)
52470+ pt_pax_flags = xattr_pax_flags;
52471+ else if (xattr_pax_flags == ~0UL)
52472+ xattr_pax_flags = pt_pax_flags;
52473+ if (pt_pax_flags != xattr_pax_flags)
52474+ return -EINVAL;
52475+ if (pt_pax_flags != ~0UL)
52476+ pax_flags = pt_pax_flags;
52477+
52478+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
52479+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52480+ if ((__supported_pte_mask & _PAGE_NX))
52481+ pax_flags &= ~MF_PAX_SEGMEXEC;
52482+ else
52483+ pax_flags &= ~MF_PAX_PAGEEXEC;
52484+ }
52485+#endif
52486+
52487+ if (0 > pax_check_flags(&pax_flags))
52488+ return -EINVAL;
52489+
52490+ current->mm->pax_flags = pax_flags;
52491+ return 0;
52492+}
52493+#endif
52494+
52495 /*
52496 * These are the functions used to load ELF style executables and shared
52497 * libraries. There is no binary dependent code anywhere else.
52498@@ -554,6 +892,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
52499 {
52500 unsigned int random_variable = 0;
52501
52502+#ifdef CONFIG_PAX_RANDUSTACK
52503+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
52504+ return stack_top - current->mm->delta_stack;
52505+#endif
52506+
52507 if ((current->flags & PF_RANDOMIZE) &&
52508 !(current->personality & ADDR_NO_RANDOMIZE)) {
52509 random_variable = get_random_int() & STACK_RND_MASK;
52510@@ -572,7 +915,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
52511 unsigned long load_addr = 0, load_bias = 0;
52512 int load_addr_set = 0;
52513 char * elf_interpreter = NULL;
52514- unsigned long error;
52515+ unsigned long error = 0;
52516 struct elf_phdr *elf_ppnt, *elf_phdata;
52517 unsigned long elf_bss, elf_brk;
52518 int retval, i;
52519@@ -582,12 +925,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
52520 unsigned long start_code, end_code, start_data, end_data;
52521 unsigned long reloc_func_desc __maybe_unused = 0;
52522 int executable_stack = EXSTACK_DEFAULT;
52523- unsigned long def_flags = 0;
52524 struct pt_regs *regs = current_pt_regs();
52525 struct {
52526 struct elfhdr elf_ex;
52527 struct elfhdr interp_elf_ex;
52528 } *loc;
52529+ unsigned long pax_task_size = TASK_SIZE;
52530
52531 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
52532 if (!loc) {
52533@@ -723,11 +1066,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
52534 goto out_free_dentry;
52535
52536 /* OK, This is the point of no return */
52537- current->mm->def_flags = def_flags;
52538+
52539+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52540+ current->mm->pax_flags = 0UL;
52541+#endif
52542+
52543+#ifdef CONFIG_PAX_DLRESOLVE
52544+ current->mm->call_dl_resolve = 0UL;
52545+#endif
52546+
52547+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
52548+ current->mm->call_syscall = 0UL;
52549+#endif
52550+
52551+#ifdef CONFIG_PAX_ASLR
52552+ current->mm->delta_mmap = 0UL;
52553+ current->mm->delta_stack = 0UL;
52554+#endif
52555+
52556+ current->mm->def_flags = 0;
52557+
52558+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52559+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
52560+ send_sig(SIGKILL, current, 0);
52561+ goto out_free_dentry;
52562+ }
52563+#endif
52564+
52565+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52566+ pax_set_initial_flags(bprm);
52567+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
52568+ if (pax_set_initial_flags_func)
52569+ (pax_set_initial_flags_func)(bprm);
52570+#endif
52571+
52572+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
52573+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
52574+ current->mm->context.user_cs_limit = PAGE_SIZE;
52575+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
52576+ }
52577+#endif
52578+
52579+#ifdef CONFIG_PAX_SEGMEXEC
52580+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
52581+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
52582+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
52583+ pax_task_size = SEGMEXEC_TASK_SIZE;
52584+ current->mm->def_flags |= VM_NOHUGEPAGE;
52585+ }
52586+#endif
52587+
52588+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
52589+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52590+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
52591+ put_cpu();
52592+ }
52593+#endif
52594
52595 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
52596 may depend on the personality. */
52597 SET_PERSONALITY(loc->elf_ex);
52598+
52599+#ifdef CONFIG_PAX_ASLR
52600+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
52601+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
52602+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
52603+ }
52604+#endif
52605+
52606+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
52607+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52608+ executable_stack = EXSTACK_DISABLE_X;
52609+ current->personality &= ~READ_IMPLIES_EXEC;
52610+ } else
52611+#endif
52612+
52613 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
52614 current->personality |= READ_IMPLIES_EXEC;
52615
52616@@ -819,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
52617 #else
52618 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
52619 #endif
52620+
52621+#ifdef CONFIG_PAX_RANDMMAP
52622+ /* PaX: randomize base address at the default exe base if requested */
52623+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
52624+#ifdef CONFIG_SPARC64
52625+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
52626+#else
52627+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
52628+#endif
52629+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
52630+ elf_flags |= MAP_FIXED;
52631+ }
52632+#endif
52633+
52634 }
52635
52636 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
52637@@ -851,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
52638 * allowed task size. Note that p_filesz must always be
52639 * <= p_memsz so it is only necessary to check p_memsz.
52640 */
52641- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
52642- elf_ppnt->p_memsz > TASK_SIZE ||
52643- TASK_SIZE - elf_ppnt->p_memsz < k) {
52644+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
52645+ elf_ppnt->p_memsz > pax_task_size ||
52646+ pax_task_size - elf_ppnt->p_memsz < k) {
52647 /* set_brk can never work. Avoid overflows. */
52648 send_sig(SIGKILL, current, 0);
52649 retval = -EINVAL;
52650@@ -892,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
52651 goto out_free_dentry;
52652 }
52653 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
52654- send_sig(SIGSEGV, current, 0);
52655- retval = -EFAULT; /* Nobody gets to see this, but.. */
52656- goto out_free_dentry;
52657+ /*
52658+ * This bss-zeroing can fail if the ELF
52659+ * file specifies odd protections. So
52660+ * we don't check the return value
52661+ */
52662 }
52663
52664+#ifdef CONFIG_PAX_RANDMMAP
52665+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
52666+ unsigned long start, size, flags;
52667+ vm_flags_t vm_flags;
52668+
52669+ start = ELF_PAGEALIGN(elf_brk);
52670+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
52671+ flags = MAP_FIXED | MAP_PRIVATE;
52672+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
52673+
52674+ down_write(&current->mm->mmap_sem);
52675+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
52676+ retval = -ENOMEM;
52677+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
52678+// if (current->personality & ADDR_NO_RANDOMIZE)
52679+// vm_flags |= VM_READ | VM_MAYREAD;
52680+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
52681+ retval = IS_ERR_VALUE(start) ? start : 0;
52682+ }
52683+ up_write(&current->mm->mmap_sem);
52684+ if (retval == 0)
52685+ retval = set_brk(start + size, start + size + PAGE_SIZE);
52686+ if (retval < 0) {
52687+ send_sig(SIGKILL, current, 0);
52688+ goto out_free_dentry;
52689+ }
52690+ }
52691+#endif
52692+
52693 if (elf_interpreter) {
52694- unsigned long interp_map_addr = 0;
52695-
52696 elf_entry = load_elf_interp(&loc->interp_elf_ex,
52697 interpreter,
52698- &interp_map_addr,
52699 load_bias);
52700 if (!IS_ERR((void *)elf_entry)) {
52701 /*
52702@@ -1124,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
52703 * Decide what to dump of a segment, part, all or none.
52704 */
52705 static unsigned long vma_dump_size(struct vm_area_struct *vma,
52706- unsigned long mm_flags)
52707+ unsigned long mm_flags, long signr)
52708 {
52709 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
52710
52711@@ -1162,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
52712 if (vma->vm_file == NULL)
52713 return 0;
52714
52715- if (FILTER(MAPPED_PRIVATE))
52716+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
52717 goto whole;
52718
52719 /*
52720@@ -1387,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
52721 {
52722 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
52723 int i = 0;
52724- do
52725+ do {
52726 i += 2;
52727- while (auxv[i - 2] != AT_NULL);
52728+ } while (auxv[i - 2] != AT_NULL);
52729 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
52730 }
52731
52732@@ -1398,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
52733 {
52734 mm_segment_t old_fs = get_fs();
52735 set_fs(KERNEL_DS);
52736- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
52737+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
52738 set_fs(old_fs);
52739 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
52740 }
52741@@ -2019,14 +2474,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
52742 }
52743
52744 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
52745- unsigned long mm_flags)
52746+ struct coredump_params *cprm)
52747 {
52748 struct vm_area_struct *vma;
52749 size_t size = 0;
52750
52751 for (vma = first_vma(current, gate_vma); vma != NULL;
52752 vma = next_vma(vma, gate_vma))
52753- size += vma_dump_size(vma, mm_flags);
52754+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52755 return size;
52756 }
52757
52758@@ -2119,7 +2574,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52759
52760 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
52761
52762- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
52763+ offset += elf_core_vma_data_size(gate_vma, cprm);
52764 offset += elf_core_extra_data_size();
52765 e_shoff = offset;
52766
52767@@ -2133,10 +2588,12 @@ static int elf_core_dump(struct coredump_params *cprm)
52768 offset = dataoff;
52769
52770 size += sizeof(*elf);
52771+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52772 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
52773 goto end_coredump;
52774
52775 size += sizeof(*phdr4note);
52776+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52777 if (size > cprm->limit
52778 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
52779 goto end_coredump;
52780@@ -2150,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52781 phdr.p_offset = offset;
52782 phdr.p_vaddr = vma->vm_start;
52783 phdr.p_paddr = 0;
52784- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
52785+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52786 phdr.p_memsz = vma->vm_end - vma->vm_start;
52787 offset += phdr.p_filesz;
52788 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
52789@@ -2161,6 +2618,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52790 phdr.p_align = ELF_EXEC_PAGESIZE;
52791
52792 size += sizeof(phdr);
52793+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52794 if (size > cprm->limit
52795 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
52796 goto end_coredump;
52797@@ -2185,7 +2643,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52798 unsigned long addr;
52799 unsigned long end;
52800
52801- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
52802+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
52803
52804 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
52805 struct page *page;
52806@@ -2194,6 +2652,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52807 page = get_dump_page(addr);
52808 if (page) {
52809 void *kaddr = kmap(page);
52810+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
52811 stop = ((size += PAGE_SIZE) > cprm->limit) ||
52812 !dump_write(cprm->file, kaddr,
52813 PAGE_SIZE);
52814@@ -2211,6 +2670,7 @@ static int elf_core_dump(struct coredump_params *cprm)
52815
52816 if (e_phnum == PN_XNUM) {
52817 size += sizeof(*shdr4extnum);
52818+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
52819 if (size > cprm->limit
52820 || !dump_write(cprm->file, shdr4extnum,
52821 sizeof(*shdr4extnum)))
52822@@ -2231,6 +2691,167 @@ out:
52823
52824 #endif /* CONFIG_ELF_CORE */
52825
52826+#ifdef CONFIG_PAX_MPROTECT
52827+/* PaX: non-PIC ELF libraries need relocations on their executable segments
52828+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
52829+ * we'll remove VM_MAYWRITE for good on RELRO segments.
52830+ *
52831+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
52832+ * basis because we want to allow the common case and not the special ones.
52833+ */
52834+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
52835+{
52836+ struct elfhdr elf_h;
52837+ struct elf_phdr elf_p;
52838+ unsigned long i;
52839+ unsigned long oldflags;
52840+ bool is_textrel_rw, is_textrel_rx, is_relro;
52841+
52842+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
52843+ return;
52844+
52845+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
52846+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
52847+
52848+#ifdef CONFIG_PAX_ELFRELOCS
52849+ /* possible TEXTREL */
52850+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
52851+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
52852+#else
52853+ is_textrel_rw = false;
52854+ is_textrel_rx = false;
52855+#endif
52856+
52857+ /* possible RELRO */
52858+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
52859+
52860+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
52861+ return;
52862+
52863+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
52864+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
52865+
52866+#ifdef CONFIG_PAX_ETEXECRELOCS
52867+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
52868+#else
52869+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
52870+#endif
52871+
52872+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
52873+ !elf_check_arch(&elf_h) ||
52874+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
52875+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
52876+ return;
52877+
52878+ for (i = 0UL; i < elf_h.e_phnum; i++) {
52879+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
52880+ return;
52881+ switch (elf_p.p_type) {
52882+ case PT_DYNAMIC:
52883+ if (!is_textrel_rw && !is_textrel_rx)
52884+ continue;
52885+ i = 0UL;
52886+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
52887+ elf_dyn dyn;
52888+
52889+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
52890+ break;
52891+ if (dyn.d_tag == DT_NULL)
52892+ break;
52893+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
52894+ gr_log_textrel(vma);
52895+ if (is_textrel_rw)
52896+ vma->vm_flags |= VM_MAYWRITE;
52897+ else
52898+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
52899+ vma->vm_flags &= ~VM_MAYWRITE;
52900+ break;
52901+ }
52902+ i++;
52903+ }
52904+ is_textrel_rw = false;
52905+ is_textrel_rx = false;
52906+ continue;
52907+
52908+ case PT_GNU_RELRO:
52909+ if (!is_relro)
52910+ continue;
52911+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
52912+ vma->vm_flags &= ~VM_MAYWRITE;
52913+ is_relro = false;
52914+ continue;
52915+
52916+#ifdef CONFIG_PAX_PT_PAX_FLAGS
52917+ case PT_PAX_FLAGS: {
52918+ const char *msg_mprotect = "", *msg_emutramp = "";
52919+ char *buffer_lib, *buffer_exe;
52920+
52921+ if (elf_p.p_flags & PF_NOMPROTECT)
52922+ msg_mprotect = "MPROTECT disabled";
52923+
52924+#ifdef CONFIG_PAX_EMUTRAMP
52925+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
52926+ msg_emutramp = "EMUTRAMP enabled";
52927+#endif
52928+
52929+ if (!msg_mprotect[0] && !msg_emutramp[0])
52930+ continue;
52931+
52932+ if (!printk_ratelimit())
52933+ continue;
52934+
52935+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
52936+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
52937+ if (buffer_lib && buffer_exe) {
52938+ char *path_lib, *path_exe;
52939+
52940+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
52941+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
52942+
52943+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
52944+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
52945+
52946+ }
52947+ free_page((unsigned long)buffer_exe);
52948+ free_page((unsigned long)buffer_lib);
52949+ continue;
52950+ }
52951+#endif
52952+
52953+ }
52954+ }
52955+}
52956+#endif
52957+
52958+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52959+
52960+extern int grsec_enable_log_rwxmaps;
52961+
52962+static void elf_handle_mmap(struct file *file)
52963+{
52964+ struct elfhdr elf_h;
52965+ struct elf_phdr elf_p;
52966+ unsigned long i;
52967+
52968+ if (!grsec_enable_log_rwxmaps)
52969+ return;
52970+
52971+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
52972+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
52973+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
52974+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
52975+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
52976+ return;
52977+
52978+ for (i = 0UL; i < elf_h.e_phnum; i++) {
52979+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
52980+ return;
52981+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
52982+ gr_log_ptgnustack(file);
52983+ }
52984+}
52985+#endif
52986+
52987 static int __init init_elf_binfmt(void)
52988 {
52989 register_binfmt(&elf_format);
52990diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
52991index d50bbe5..af3b649 100644
52992--- a/fs/binfmt_flat.c
52993+++ b/fs/binfmt_flat.c
52994@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
52995 realdatastart = (unsigned long) -ENOMEM;
52996 printk("Unable to allocate RAM for process data, errno %d\n",
52997 (int)-realdatastart);
52998+ down_write(&current->mm->mmap_sem);
52999 vm_munmap(textpos, text_len);
53000+ up_write(&current->mm->mmap_sem);
53001 ret = realdatastart;
53002 goto err;
53003 }
53004@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
53005 }
53006 if (IS_ERR_VALUE(result)) {
53007 printk("Unable to read data+bss, errno %d\n", (int)-result);
53008+ down_write(&current->mm->mmap_sem);
53009 vm_munmap(textpos, text_len);
53010 vm_munmap(realdatastart, len);
53011+ up_write(&current->mm->mmap_sem);
53012 ret = result;
53013 goto err;
53014 }
53015@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
53016 }
53017 if (IS_ERR_VALUE(result)) {
53018 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
53019+ down_write(&current->mm->mmap_sem);
53020 vm_munmap(textpos, text_len + data_len + extra +
53021 MAX_SHARED_LIBS * sizeof(unsigned long));
53022+ up_write(&current->mm->mmap_sem);
53023 ret = result;
53024 goto err;
53025 }
53026diff --git a/fs/bio.c b/fs/bio.c
53027index 94bbc04..599e3cf 100644
53028--- a/fs/bio.c
53029+++ b/fs/bio.c
53030@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
53031 int bio_uncopy_user(struct bio *bio)
53032 {
53033 struct bio_map_data *bmd = bio->bi_private;
53034- int ret = 0;
53035+ struct bio_vec *bvec;
53036+ int ret = 0, i;
53037
53038- if (!bio_flagged(bio, BIO_NULL_MAPPED))
53039- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
53040- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
53041- 0, bmd->is_our_pages);
53042+ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
53043+ /*
53044+ * if we're in a workqueue, the request is orphaned, so
53045+ * don't copy into a random user address space, just free.
53046+ */
53047+ if (current->mm)
53048+ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
53049+ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
53050+ 0, bmd->is_our_pages);
53051+ else if (bmd->is_our_pages)
53052+ bio_for_each_segment_all(bvec, bio, i)
53053+ __free_page(bvec->bv_page);
53054+ }
53055 bio_free_map_data(bmd);
53056 bio_put(bio);
53057 return ret;
53058@@ -1096,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
53059 /*
53060 * Overflow, abort
53061 */
53062- if (end < start)
53063+ if (end < start || end - start > INT_MAX - nr_pages)
53064 return ERR_PTR(-EINVAL);
53065
53066 nr_pages += end - start;
53067@@ -1230,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
53068 /*
53069 * Overflow, abort
53070 */
53071- if (end < start)
53072+ if (end < start || end - start > INT_MAX - nr_pages)
53073 return ERR_PTR(-EINVAL);
53074
53075 nr_pages += end - start;
53076@@ -1492,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
53077 const int read = bio_data_dir(bio) == READ;
53078 struct bio_map_data *bmd = bio->bi_private;
53079 int i;
53080- char *p = bmd->sgvecs[0].iov_base;
53081+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
53082
53083 bio_for_each_segment_all(bvec, bio, i) {
53084 char *addr = page_address(bvec->bv_page);
53085diff --git a/fs/block_dev.c b/fs/block_dev.c
53086index 85f5c85..d6f0b1a 100644
53087--- a/fs/block_dev.c
53088+++ b/fs/block_dev.c
53089@@ -658,7 +658,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
53090 else if (bdev->bd_contains == bdev)
53091 return true; /* is a whole device which isn't held */
53092
53093- else if (whole->bd_holder == bd_may_claim)
53094+ else if (whole->bd_holder == (void *)bd_may_claim)
53095 return true; /* is a partition of a device that is being partitioned */
53096 else if (whole->bd_holder != NULL)
53097 return false; /* is a partition of a held device */
53098diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
53099index 7fb054b..ad36c67 100644
53100--- a/fs/btrfs/ctree.c
53101+++ b/fs/btrfs/ctree.c
53102@@ -1076,9 +1076,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
53103 free_extent_buffer(buf);
53104 add_root_to_dirty_list(root);
53105 } else {
53106- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
53107- parent_start = parent->start;
53108- else
53109+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
53110+ if (parent)
53111+ parent_start = parent->start;
53112+ else
53113+ parent_start = 0;
53114+ } else
53115 parent_start = 0;
53116
53117 WARN_ON(trans->transid != btrfs_header_generation(parent));
53118diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
53119index 0f81d67..0ad55fe 100644
53120--- a/fs/btrfs/ioctl.c
53121+++ b/fs/btrfs/ioctl.c
53122@@ -3084,9 +3084,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
53123 for (i = 0; i < num_types; i++) {
53124 struct btrfs_space_info *tmp;
53125
53126+ /* Don't copy in more than we allocated */
53127 if (!slot_count)
53128 break;
53129
53130+ slot_count--;
53131+
53132 info = NULL;
53133 rcu_read_lock();
53134 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
53135@@ -3108,10 +3111,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
53136 memcpy(dest, &space, sizeof(space));
53137 dest++;
53138 space_args.total_spaces++;
53139- slot_count--;
53140 }
53141- if (!slot_count)
53142- break;
53143 }
53144 up_read(&info->groups_sem);
53145 }
53146diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
53147index f0857e0..e7023c5 100644
53148--- a/fs/btrfs/super.c
53149+++ b/fs/btrfs/super.c
53150@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
53151 function, line, errstr);
53152 return;
53153 }
53154- ACCESS_ONCE(trans->transaction->aborted) = errno;
53155+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
53156 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
53157 }
53158 /*
53159diff --git a/fs/buffer.c b/fs/buffer.c
53160index d2a4d1b..df798ca 100644
53161--- a/fs/buffer.c
53162+++ b/fs/buffer.c
53163@@ -3367,7 +3367,7 @@ void __init buffer_init(void)
53164 bh_cachep = kmem_cache_create("buffer_head",
53165 sizeof(struct buffer_head), 0,
53166 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
53167- SLAB_MEM_SPREAD),
53168+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
53169 NULL);
53170
53171 /*
53172diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
53173index 622f469..e8d2d55 100644
53174--- a/fs/cachefiles/bind.c
53175+++ b/fs/cachefiles/bind.c
53176@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
53177 args);
53178
53179 /* start by checking things over */
53180- ASSERT(cache->fstop_percent >= 0 &&
53181- cache->fstop_percent < cache->fcull_percent &&
53182+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
53183 cache->fcull_percent < cache->frun_percent &&
53184 cache->frun_percent < 100);
53185
53186- ASSERT(cache->bstop_percent >= 0 &&
53187- cache->bstop_percent < cache->bcull_percent &&
53188+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
53189 cache->bcull_percent < cache->brun_percent &&
53190 cache->brun_percent < 100);
53191
53192diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
53193index 0a1467b..6a53245 100644
53194--- a/fs/cachefiles/daemon.c
53195+++ b/fs/cachefiles/daemon.c
53196@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
53197 if (n > buflen)
53198 return -EMSGSIZE;
53199
53200- if (copy_to_user(_buffer, buffer, n) != 0)
53201+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
53202 return -EFAULT;
53203
53204 return n;
53205@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
53206 if (test_bit(CACHEFILES_DEAD, &cache->flags))
53207 return -EIO;
53208
53209- if (datalen < 0 || datalen > PAGE_SIZE - 1)
53210+ if (datalen > PAGE_SIZE - 1)
53211 return -EOPNOTSUPP;
53212
53213 /* drag the command string into the kernel so we can parse it */
53214@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
53215 if (args[0] != '%' || args[1] != '\0')
53216 return -EINVAL;
53217
53218- if (fstop < 0 || fstop >= cache->fcull_percent)
53219+ if (fstop >= cache->fcull_percent)
53220 return cachefiles_daemon_range_error(cache, args);
53221
53222 cache->fstop_percent = fstop;
53223@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
53224 if (args[0] != '%' || args[1] != '\0')
53225 return -EINVAL;
53226
53227- if (bstop < 0 || bstop >= cache->bcull_percent)
53228+ if (bstop >= cache->bcull_percent)
53229 return cachefiles_daemon_range_error(cache, args);
53230
53231 cache->bstop_percent = bstop;
53232diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
53233index 4938251..7e01445 100644
53234--- a/fs/cachefiles/internal.h
53235+++ b/fs/cachefiles/internal.h
53236@@ -59,7 +59,7 @@ struct cachefiles_cache {
53237 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
53238 struct rb_root active_nodes; /* active nodes (can't be culled) */
53239 rwlock_t active_lock; /* lock for active_nodes */
53240- atomic_t gravecounter; /* graveyard uniquifier */
53241+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
53242 unsigned frun_percent; /* when to stop culling (% files) */
53243 unsigned fcull_percent; /* when to start culling (% files) */
53244 unsigned fstop_percent; /* when to stop allocating (% files) */
53245@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
53246 * proc.c
53247 */
53248 #ifdef CONFIG_CACHEFILES_HISTOGRAM
53249-extern atomic_t cachefiles_lookup_histogram[HZ];
53250-extern atomic_t cachefiles_mkdir_histogram[HZ];
53251-extern atomic_t cachefiles_create_histogram[HZ];
53252+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
53253+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
53254+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
53255
53256 extern int __init cachefiles_proc_init(void);
53257 extern void cachefiles_proc_cleanup(void);
53258 static inline
53259-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
53260+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
53261 {
53262 unsigned long jif = jiffies - start_jif;
53263 if (jif >= HZ)
53264 jif = HZ - 1;
53265- atomic_inc(&histogram[jif]);
53266+ atomic_inc_unchecked(&histogram[jif]);
53267 }
53268
53269 #else
53270diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
53271index 8c01c5fc..15f982e 100644
53272--- a/fs/cachefiles/namei.c
53273+++ b/fs/cachefiles/namei.c
53274@@ -317,7 +317,7 @@ try_again:
53275 /* first step is to make up a grave dentry in the graveyard */
53276 sprintf(nbuffer, "%08x%08x",
53277 (uint32_t) get_seconds(),
53278- (uint32_t) atomic_inc_return(&cache->gravecounter));
53279+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
53280
53281 /* do the multiway lock magic */
53282 trap = lock_rename(cache->graveyard, dir);
53283diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
53284index eccd339..4c1d995 100644
53285--- a/fs/cachefiles/proc.c
53286+++ b/fs/cachefiles/proc.c
53287@@ -14,9 +14,9 @@
53288 #include <linux/seq_file.h>
53289 #include "internal.h"
53290
53291-atomic_t cachefiles_lookup_histogram[HZ];
53292-atomic_t cachefiles_mkdir_histogram[HZ];
53293-atomic_t cachefiles_create_histogram[HZ];
53294+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
53295+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
53296+atomic_unchecked_t cachefiles_create_histogram[HZ];
53297
53298 /*
53299 * display the latency histogram
53300@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
53301 return 0;
53302 default:
53303 index = (unsigned long) v - 3;
53304- x = atomic_read(&cachefiles_lookup_histogram[index]);
53305- y = atomic_read(&cachefiles_mkdir_histogram[index]);
53306- z = atomic_read(&cachefiles_create_histogram[index]);
53307+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
53308+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
53309+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
53310 if (x == 0 && y == 0 && z == 0)
53311 return 0;
53312
53313diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
53314index 317f9ee..3d24511 100644
53315--- a/fs/cachefiles/rdwr.c
53316+++ b/fs/cachefiles/rdwr.c
53317@@ -966,7 +966,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
53318 old_fs = get_fs();
53319 set_fs(KERNEL_DS);
53320 ret = file->f_op->write(
53321- file, (const void __user *) data, len, &pos);
53322+ file, (const void __force_user *) data, len, &pos);
53323 set_fs(old_fs);
53324 kunmap(page);
53325 file_end_write(file);
53326diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
53327index f02d82b..2632cf86 100644
53328--- a/fs/ceph/dir.c
53329+++ b/fs/ceph/dir.c
53330@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
53331 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
53332 struct ceph_mds_client *mdsc = fsc->mdsc;
53333 unsigned frag = fpos_frag(filp->f_pos);
53334- int off = fpos_off(filp->f_pos);
53335+ unsigned int off = fpos_off(filp->f_pos);
53336 int err;
53337 u32 ftype;
53338 struct ceph_mds_reply_info_parsed *rinfo;
53339diff --git a/fs/ceph/super.c b/fs/ceph/super.c
53340index 7d377c9..3fb6559 100644
53341--- a/fs/ceph/super.c
53342+++ b/fs/ceph/super.c
53343@@ -839,7 +839,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
53344 /*
53345 * construct our own bdi so we can control readahead, etc.
53346 */
53347-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
53348+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
53349
53350 static int ceph_register_bdi(struct super_block *sb,
53351 struct ceph_fs_client *fsc)
53352@@ -856,7 +856,7 @@ static int ceph_register_bdi(struct super_block *sb,
53353 default_backing_dev_info.ra_pages;
53354
53355 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
53356- atomic_long_inc_return(&bdi_seq));
53357+ atomic_long_inc_return_unchecked(&bdi_seq));
53358 if (!err)
53359 sb->s_bdi = &fsc->backing_dev_info;
53360 return err;
53361diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
53362index d597483..747901b 100644
53363--- a/fs/cifs/cifs_debug.c
53364+++ b/fs/cifs/cifs_debug.c
53365@@ -284,8 +284,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
53366
53367 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
53368 #ifdef CONFIG_CIFS_STATS2
53369- atomic_set(&totBufAllocCount, 0);
53370- atomic_set(&totSmBufAllocCount, 0);
53371+ atomic_set_unchecked(&totBufAllocCount, 0);
53372+ atomic_set_unchecked(&totSmBufAllocCount, 0);
53373 #endif /* CONFIG_CIFS_STATS2 */
53374 spin_lock(&cifs_tcp_ses_lock);
53375 list_for_each(tmp1, &cifs_tcp_ses_list) {
53376@@ -298,7 +298,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
53377 tcon = list_entry(tmp3,
53378 struct cifs_tcon,
53379 tcon_list);
53380- atomic_set(&tcon->num_smbs_sent, 0);
53381+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
53382 if (server->ops->clear_stats)
53383 server->ops->clear_stats(tcon);
53384 }
53385@@ -330,8 +330,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
53386 smBufAllocCount.counter, cifs_min_small);
53387 #ifdef CONFIG_CIFS_STATS2
53388 seq_printf(m, "Total Large %d Small %d Allocations\n",
53389- atomic_read(&totBufAllocCount),
53390- atomic_read(&totSmBufAllocCount));
53391+ atomic_read_unchecked(&totBufAllocCount),
53392+ atomic_read_unchecked(&totSmBufAllocCount));
53393 #endif /* CONFIG_CIFS_STATS2 */
53394
53395 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
53396@@ -360,7 +360,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
53397 if (tcon->need_reconnect)
53398 seq_puts(m, "\tDISCONNECTED ");
53399 seq_printf(m, "\nSMBs: %d",
53400- atomic_read(&tcon->num_smbs_sent));
53401+ atomic_read_unchecked(&tcon->num_smbs_sent));
53402 if (server->ops->print_stats)
53403 server->ops->print_stats(m, tcon);
53404 }
53405diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
53406index 3752b9f..8db5569 100644
53407--- a/fs/cifs/cifsfs.c
53408+++ b/fs/cifs/cifsfs.c
53409@@ -1035,7 +1035,7 @@ cifs_init_request_bufs(void)
53410 */
53411 cifs_req_cachep = kmem_cache_create("cifs_request",
53412 CIFSMaxBufSize + max_hdr_size, 0,
53413- SLAB_HWCACHE_ALIGN, NULL);
53414+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
53415 if (cifs_req_cachep == NULL)
53416 return -ENOMEM;
53417
53418@@ -1062,7 +1062,7 @@ cifs_init_request_bufs(void)
53419 efficient to alloc 1 per page off the slab compared to 17K (5page)
53420 alloc of large cifs buffers even when page debugging is on */
53421 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
53422- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
53423+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
53424 NULL);
53425 if (cifs_sm_req_cachep == NULL) {
53426 mempool_destroy(cifs_req_poolp);
53427@@ -1147,8 +1147,8 @@ init_cifs(void)
53428 atomic_set(&bufAllocCount, 0);
53429 atomic_set(&smBufAllocCount, 0);
53430 #ifdef CONFIG_CIFS_STATS2
53431- atomic_set(&totBufAllocCount, 0);
53432- atomic_set(&totSmBufAllocCount, 0);
53433+ atomic_set_unchecked(&totBufAllocCount, 0);
53434+ atomic_set_unchecked(&totSmBufAllocCount, 0);
53435 #endif /* CONFIG_CIFS_STATS2 */
53436
53437 atomic_set(&midCount, 0);
53438diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
53439index ea3a0b3..0194e39 100644
53440--- a/fs/cifs/cifsglob.h
53441+++ b/fs/cifs/cifsglob.h
53442@@ -752,35 +752,35 @@ struct cifs_tcon {
53443 __u16 Flags; /* optional support bits */
53444 enum statusEnum tidStatus;
53445 #ifdef CONFIG_CIFS_STATS
53446- atomic_t num_smbs_sent;
53447+ atomic_unchecked_t num_smbs_sent;
53448 union {
53449 struct {
53450- atomic_t num_writes;
53451- atomic_t num_reads;
53452- atomic_t num_flushes;
53453- atomic_t num_oplock_brks;
53454- atomic_t num_opens;
53455- atomic_t num_closes;
53456- atomic_t num_deletes;
53457- atomic_t num_mkdirs;
53458- atomic_t num_posixopens;
53459- atomic_t num_posixmkdirs;
53460- atomic_t num_rmdirs;
53461- atomic_t num_renames;
53462- atomic_t num_t2renames;
53463- atomic_t num_ffirst;
53464- atomic_t num_fnext;
53465- atomic_t num_fclose;
53466- atomic_t num_hardlinks;
53467- atomic_t num_symlinks;
53468- atomic_t num_locks;
53469- atomic_t num_acl_get;
53470- atomic_t num_acl_set;
53471+ atomic_unchecked_t num_writes;
53472+ atomic_unchecked_t num_reads;
53473+ atomic_unchecked_t num_flushes;
53474+ atomic_unchecked_t num_oplock_brks;
53475+ atomic_unchecked_t num_opens;
53476+ atomic_unchecked_t num_closes;
53477+ atomic_unchecked_t num_deletes;
53478+ atomic_unchecked_t num_mkdirs;
53479+ atomic_unchecked_t num_posixopens;
53480+ atomic_unchecked_t num_posixmkdirs;
53481+ atomic_unchecked_t num_rmdirs;
53482+ atomic_unchecked_t num_renames;
53483+ atomic_unchecked_t num_t2renames;
53484+ atomic_unchecked_t num_ffirst;
53485+ atomic_unchecked_t num_fnext;
53486+ atomic_unchecked_t num_fclose;
53487+ atomic_unchecked_t num_hardlinks;
53488+ atomic_unchecked_t num_symlinks;
53489+ atomic_unchecked_t num_locks;
53490+ atomic_unchecked_t num_acl_get;
53491+ atomic_unchecked_t num_acl_set;
53492 } cifs_stats;
53493 #ifdef CONFIG_CIFS_SMB2
53494 struct {
53495- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
53496- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
53497+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
53498+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
53499 } smb2_stats;
53500 #endif /* CONFIG_CIFS_SMB2 */
53501 } stats;
53502@@ -1081,7 +1081,7 @@ convert_delimiter(char *path, char delim)
53503 }
53504
53505 #ifdef CONFIG_CIFS_STATS
53506-#define cifs_stats_inc atomic_inc
53507+#define cifs_stats_inc atomic_inc_unchecked
53508
53509 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
53510 unsigned int bytes)
53511@@ -1446,8 +1446,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
53512 /* Various Debug counters */
53513 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
53514 #ifdef CONFIG_CIFS_STATS2
53515-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
53516-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
53517+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
53518+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
53519 #endif
53520 GLOBAL_EXTERN atomic_t smBufAllocCount;
53521 GLOBAL_EXTERN atomic_t midCount;
53522diff --git a/fs/cifs/link.c b/fs/cifs/link.c
53523index b83c3f5..6437caa 100644
53524--- a/fs/cifs/link.c
53525+++ b/fs/cifs/link.c
53526@@ -616,7 +616,7 @@ symlink_exit:
53527
53528 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
53529 {
53530- char *p = nd_get_link(nd);
53531+ const char *p = nd_get_link(nd);
53532 if (!IS_ERR(p))
53533 kfree(p);
53534 }
53535diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
53536index 1bec014..f329411 100644
53537--- a/fs/cifs/misc.c
53538+++ b/fs/cifs/misc.c
53539@@ -169,7 +169,7 @@ cifs_buf_get(void)
53540 memset(ret_buf, 0, buf_size + 3);
53541 atomic_inc(&bufAllocCount);
53542 #ifdef CONFIG_CIFS_STATS2
53543- atomic_inc(&totBufAllocCount);
53544+ atomic_inc_unchecked(&totBufAllocCount);
53545 #endif /* CONFIG_CIFS_STATS2 */
53546 }
53547
53548@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
53549 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
53550 atomic_inc(&smBufAllocCount);
53551 #ifdef CONFIG_CIFS_STATS2
53552- atomic_inc(&totSmBufAllocCount);
53553+ atomic_inc_unchecked(&totSmBufAllocCount);
53554 #endif /* CONFIG_CIFS_STATS2 */
53555
53556 }
53557diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
53558index 3efdb9d..e845a5e 100644
53559--- a/fs/cifs/smb1ops.c
53560+++ b/fs/cifs/smb1ops.c
53561@@ -591,27 +591,27 @@ static void
53562 cifs_clear_stats(struct cifs_tcon *tcon)
53563 {
53564 #ifdef CONFIG_CIFS_STATS
53565- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
53566- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
53567- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
53568- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
53569- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
53570- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
53571- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
53572- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
53573- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
53574- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
53575- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
53576- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
53577- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
53578- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
53579- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
53580- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
53581- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
53582- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
53583- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
53584- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
53585- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
53586+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
53587+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
53588+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
53589+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
53590+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
53591+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
53592+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
53593+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
53594+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
53595+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
53596+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
53597+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
53598+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
53599+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
53600+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
53601+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
53602+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
53603+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
53604+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
53605+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
53606+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
53607 #endif
53608 }
53609
53610@@ -620,36 +620,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
53611 {
53612 #ifdef CONFIG_CIFS_STATS
53613 seq_printf(m, " Oplocks breaks: %d",
53614- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
53615+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
53616 seq_printf(m, "\nReads: %d Bytes: %llu",
53617- atomic_read(&tcon->stats.cifs_stats.num_reads),
53618+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
53619 (long long)(tcon->bytes_read));
53620 seq_printf(m, "\nWrites: %d Bytes: %llu",
53621- atomic_read(&tcon->stats.cifs_stats.num_writes),
53622+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
53623 (long long)(tcon->bytes_written));
53624 seq_printf(m, "\nFlushes: %d",
53625- atomic_read(&tcon->stats.cifs_stats.num_flushes));
53626+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
53627 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
53628- atomic_read(&tcon->stats.cifs_stats.num_locks),
53629- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
53630- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
53631+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
53632+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
53633+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
53634 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
53635- atomic_read(&tcon->stats.cifs_stats.num_opens),
53636- atomic_read(&tcon->stats.cifs_stats.num_closes),
53637- atomic_read(&tcon->stats.cifs_stats.num_deletes));
53638+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
53639+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
53640+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
53641 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
53642- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
53643- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
53644+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
53645+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
53646 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
53647- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
53648- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
53649+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
53650+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
53651 seq_printf(m, "\nRenames: %d T2 Renames %d",
53652- atomic_read(&tcon->stats.cifs_stats.num_renames),
53653- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
53654+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
53655+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
53656 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
53657- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
53658- atomic_read(&tcon->stats.cifs_stats.num_fnext),
53659- atomic_read(&tcon->stats.cifs_stats.num_fclose));
53660+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
53661+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
53662+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
53663 #endif
53664 }
53665
53666diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
53667index f2e76f3..c44fac7 100644
53668--- a/fs/cifs/smb2ops.c
53669+++ b/fs/cifs/smb2ops.c
53670@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
53671 #ifdef CONFIG_CIFS_STATS
53672 int i;
53673 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
53674- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
53675- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
53676+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
53677+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
53678 }
53679 #endif
53680 }
53681@@ -284,66 +284,66 @@ static void
53682 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
53683 {
53684 #ifdef CONFIG_CIFS_STATS
53685- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
53686- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
53687+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
53688+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
53689 seq_printf(m, "\nNegotiates: %d sent %d failed",
53690- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
53691- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
53692+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
53693+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
53694 seq_printf(m, "\nSessionSetups: %d sent %d failed",
53695- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
53696- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
53697+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
53698+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
53699 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
53700 seq_printf(m, "\nLogoffs: %d sent %d failed",
53701- atomic_read(&sent[SMB2_LOGOFF_HE]),
53702- atomic_read(&failed[SMB2_LOGOFF_HE]));
53703+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
53704+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
53705 seq_printf(m, "\nTreeConnects: %d sent %d failed",
53706- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
53707- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
53708+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
53709+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
53710 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
53711- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
53712- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
53713+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
53714+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
53715 seq_printf(m, "\nCreates: %d sent %d failed",
53716- atomic_read(&sent[SMB2_CREATE_HE]),
53717- atomic_read(&failed[SMB2_CREATE_HE]));
53718+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
53719+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
53720 seq_printf(m, "\nCloses: %d sent %d failed",
53721- atomic_read(&sent[SMB2_CLOSE_HE]),
53722- atomic_read(&failed[SMB2_CLOSE_HE]));
53723+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
53724+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
53725 seq_printf(m, "\nFlushes: %d sent %d failed",
53726- atomic_read(&sent[SMB2_FLUSH_HE]),
53727- atomic_read(&failed[SMB2_FLUSH_HE]));
53728+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
53729+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
53730 seq_printf(m, "\nReads: %d sent %d failed",
53731- atomic_read(&sent[SMB2_READ_HE]),
53732- atomic_read(&failed[SMB2_READ_HE]));
53733+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
53734+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
53735 seq_printf(m, "\nWrites: %d sent %d failed",
53736- atomic_read(&sent[SMB2_WRITE_HE]),
53737- atomic_read(&failed[SMB2_WRITE_HE]));
53738+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
53739+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
53740 seq_printf(m, "\nLocks: %d sent %d failed",
53741- atomic_read(&sent[SMB2_LOCK_HE]),
53742- atomic_read(&failed[SMB2_LOCK_HE]));
53743+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
53744+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
53745 seq_printf(m, "\nIOCTLs: %d sent %d failed",
53746- atomic_read(&sent[SMB2_IOCTL_HE]),
53747- atomic_read(&failed[SMB2_IOCTL_HE]));
53748+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
53749+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
53750 seq_printf(m, "\nCancels: %d sent %d failed",
53751- atomic_read(&sent[SMB2_CANCEL_HE]),
53752- atomic_read(&failed[SMB2_CANCEL_HE]));
53753+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
53754+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
53755 seq_printf(m, "\nEchos: %d sent %d failed",
53756- atomic_read(&sent[SMB2_ECHO_HE]),
53757- atomic_read(&failed[SMB2_ECHO_HE]));
53758+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
53759+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
53760 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
53761- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
53762- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
53763+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
53764+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
53765 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
53766- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
53767- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
53768+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
53769+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
53770 seq_printf(m, "\nQueryInfos: %d sent %d failed",
53771- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
53772- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
53773+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
53774+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
53775 seq_printf(m, "\nSetInfos: %d sent %d failed",
53776- atomic_read(&sent[SMB2_SET_INFO_HE]),
53777- atomic_read(&failed[SMB2_SET_INFO_HE]));
53778+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
53779+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
53780 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
53781- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
53782- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
53783+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
53784+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
53785 #endif
53786 }
53787
53788diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
53789index 2b95ce2..d079d75 100644
53790--- a/fs/cifs/smb2pdu.c
53791+++ b/fs/cifs/smb2pdu.c
53792@@ -1760,8 +1760,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
53793 default:
53794 cifs_dbg(VFS, "info level %u isn't supported\n",
53795 srch_inf->info_level);
53796- rc = -EINVAL;
53797- goto qdir_exit;
53798+ return -EINVAL;
53799 }
53800
53801 req->FileIndex = cpu_to_le32(index);
53802diff --git a/fs/coda/cache.c b/fs/coda/cache.c
53803index 1da168c..8bc7ff6 100644
53804--- a/fs/coda/cache.c
53805+++ b/fs/coda/cache.c
53806@@ -24,7 +24,7 @@
53807 #include "coda_linux.h"
53808 #include "coda_cache.h"
53809
53810-static atomic_t permission_epoch = ATOMIC_INIT(0);
53811+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
53812
53813 /* replace or extend an acl cache hit */
53814 void coda_cache_enter(struct inode *inode, int mask)
53815@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
53816 struct coda_inode_info *cii = ITOC(inode);
53817
53818 spin_lock(&cii->c_lock);
53819- cii->c_cached_epoch = atomic_read(&permission_epoch);
53820+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
53821 if (!uid_eq(cii->c_uid, current_fsuid())) {
53822 cii->c_uid = current_fsuid();
53823 cii->c_cached_perm = mask;
53824@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
53825 {
53826 struct coda_inode_info *cii = ITOC(inode);
53827 spin_lock(&cii->c_lock);
53828- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
53829+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
53830 spin_unlock(&cii->c_lock);
53831 }
53832
53833 /* remove all acl caches */
53834 void coda_cache_clear_all(struct super_block *sb)
53835 {
53836- atomic_inc(&permission_epoch);
53837+ atomic_inc_unchecked(&permission_epoch);
53838 }
53839
53840
53841@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
53842 spin_lock(&cii->c_lock);
53843 hit = (mask & cii->c_cached_perm) == mask &&
53844 uid_eq(cii->c_uid, current_fsuid()) &&
53845- cii->c_cached_epoch == atomic_read(&permission_epoch);
53846+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
53847 spin_unlock(&cii->c_lock);
53848
53849 return hit;
53850diff --git a/fs/compat.c b/fs/compat.c
53851index fc3b55d..7b568ae 100644
53852--- a/fs/compat.c
53853+++ b/fs/compat.c
53854@@ -54,7 +54,7 @@
53855 #include <asm/ioctls.h>
53856 #include "internal.h"
53857
53858-int compat_log = 1;
53859+int compat_log = 0;
53860
53861 int compat_printk(const char *fmt, ...)
53862 {
53863@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
53864
53865 set_fs(KERNEL_DS);
53866 /* The __user pointer cast is valid because of the set_fs() */
53867- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
53868+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
53869 set_fs(oldfs);
53870 /* truncating is ok because it's a user address */
53871 if (!ret)
53872@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
53873 goto out;
53874
53875 ret = -EINVAL;
53876- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
53877+ if (nr_segs > UIO_MAXIOV)
53878 goto out;
53879 if (nr_segs > fast_segs) {
53880 ret = -ENOMEM;
53881@@ -833,6 +833,7 @@ struct compat_old_linux_dirent {
53882
53883 struct compat_readdir_callback {
53884 struct compat_old_linux_dirent __user *dirent;
53885+ struct file * file;
53886 int result;
53887 };
53888
53889@@ -850,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
53890 buf->result = -EOVERFLOW;
53891 return -EOVERFLOW;
53892 }
53893+
53894+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53895+ return 0;
53896+
53897 buf->result++;
53898 dirent = buf->dirent;
53899 if (!access_ok(VERIFY_WRITE, dirent,
53900@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
53901
53902 buf.result = 0;
53903 buf.dirent = dirent;
53904+ buf.file = f.file;
53905
53906 error = vfs_readdir(f.file, compat_fillonedir, &buf);
53907 if (buf.result)
53908@@ -899,6 +905,7 @@ struct compat_linux_dirent {
53909 struct compat_getdents_callback {
53910 struct compat_linux_dirent __user *current_dir;
53911 struct compat_linux_dirent __user *previous;
53912+ struct file * file;
53913 int count;
53914 int error;
53915 };
53916@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
53917 buf->error = -EOVERFLOW;
53918 return -EOVERFLOW;
53919 }
53920+
53921+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53922+ return 0;
53923+
53924 dirent = buf->previous;
53925 if (dirent) {
53926 if (__put_user(offset, &dirent->d_off))
53927@@ -965,6 +976,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
53928 buf.previous = NULL;
53929 buf.count = count;
53930 buf.error = 0;
53931+ buf.file = f.file;
53932
53933 error = vfs_readdir(f.file, compat_filldir, &buf);
53934 if (error >= 0)
53935@@ -985,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
53936 struct compat_getdents_callback64 {
53937 struct linux_dirent64 __user *current_dir;
53938 struct linux_dirent64 __user *previous;
53939+ struct file * file;
53940 int count;
53941 int error;
53942 };
53943@@ -1001,6 +1014,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
53944 buf->error = -EINVAL; /* only used if we fail.. */
53945 if (reclen > buf->count)
53946 return -EINVAL;
53947+
53948+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53949+ return 0;
53950+
53951 dirent = buf->previous;
53952
53953 if (dirent) {
53954@@ -1050,13 +1067,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
53955 buf.previous = NULL;
53956 buf.count = count;
53957 buf.error = 0;
53958+ buf.file = f.file;
53959
53960 error = vfs_readdir(f.file, compat_filldir64, &buf);
53961 if (error >= 0)
53962 error = buf.error;
53963 lastdirent = buf.previous;
53964 if (lastdirent) {
53965- typeof(lastdirent->d_off) d_off = f.file->f_pos;
53966+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
53967 if (__put_user_unaligned(d_off, &lastdirent->d_off))
53968 error = -EFAULT;
53969 else
53970diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
53971index a81147e..20bf2b5 100644
53972--- a/fs/compat_binfmt_elf.c
53973+++ b/fs/compat_binfmt_elf.c
53974@@ -30,11 +30,13 @@
53975 #undef elf_phdr
53976 #undef elf_shdr
53977 #undef elf_note
53978+#undef elf_dyn
53979 #undef elf_addr_t
53980 #define elfhdr elf32_hdr
53981 #define elf_phdr elf32_phdr
53982 #define elf_shdr elf32_shdr
53983 #define elf_note elf32_note
53984+#define elf_dyn Elf32_Dyn
53985 #define elf_addr_t Elf32_Addr
53986
53987 /*
53988diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
53989index 996cdc5..15e2f33 100644
53990--- a/fs/compat_ioctl.c
53991+++ b/fs/compat_ioctl.c
53992@@ -622,7 +622,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
53993 return -EFAULT;
53994 if (__get_user(udata, &ss32->iomem_base))
53995 return -EFAULT;
53996- ss.iomem_base = compat_ptr(udata);
53997+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
53998 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
53999 __get_user(ss.port_high, &ss32->port_high))
54000 return -EFAULT;
54001@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
54002 for (i = 0; i < nmsgs; i++) {
54003 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
54004 return -EFAULT;
54005- if (get_user(datap, &umsgs[i].buf) ||
54006- put_user(compat_ptr(datap), &tmsgs[i].buf))
54007+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
54008+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
54009 return -EFAULT;
54010 }
54011 return sys_ioctl(fd, cmd, (unsigned long)tdata);
54012@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
54013 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
54014 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
54015 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
54016- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
54017+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
54018 return -EFAULT;
54019
54020 return ioctl_preallocate(file, p);
54021@@ -1619,8 +1619,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
54022 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
54023 {
54024 unsigned int a, b;
54025- a = *(unsigned int *)p;
54026- b = *(unsigned int *)q;
54027+ a = *(const unsigned int *)p;
54028+ b = *(const unsigned int *)q;
54029 if (a > b)
54030 return 1;
54031 if (a < b)
54032diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
54033index 7aabc6a..34c1197 100644
54034--- a/fs/configfs/dir.c
54035+++ b/fs/configfs/dir.c
54036@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
54037 }
54038 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
54039 struct configfs_dirent *next;
54040- const char * name;
54041+ const unsigned char * name;
54042+ char d_name[sizeof(next->s_dentry->d_iname)];
54043 int len;
54044 struct inode *inode = NULL;
54045
54046@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
54047 continue;
54048
54049 name = configfs_get_name(next);
54050- len = strlen(name);
54051+ if (next->s_dentry && name == next->s_dentry->d_iname) {
54052+ len = next->s_dentry->d_name.len;
54053+ memcpy(d_name, name, len);
54054+ name = d_name;
54055+ } else
54056+ len = strlen(name);
54057
54058 /*
54059 * We'll have a dentry and an inode for
54060diff --git a/fs/coredump.c b/fs/coredump.c
54061index dafafba..10b3b27 100644
54062--- a/fs/coredump.c
54063+++ b/fs/coredump.c
54064@@ -52,7 +52,7 @@ struct core_name {
54065 char *corename;
54066 int used, size;
54067 };
54068-static atomic_t call_count = ATOMIC_INIT(1);
54069+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
54070
54071 /* The maximal length of core_pattern is also specified in sysctl.c */
54072
54073@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
54074 {
54075 char *old_corename = cn->corename;
54076
54077- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
54078+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
54079 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
54080
54081 if (!cn->corename) {
54082@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
54083 int pid_in_pattern = 0;
54084 int err = 0;
54085
54086- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
54087+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
54088 cn->corename = kmalloc(cn->size, GFP_KERNEL);
54089 cn->used = 0;
54090
54091@@ -435,8 +435,8 @@ static void wait_for_dump_helpers(struct file *file)
54092 struct pipe_inode_info *pipe = file->private_data;
54093
54094 pipe_lock(pipe);
54095- pipe->readers++;
54096- pipe->writers--;
54097+ atomic_inc(&pipe->readers);
54098+ atomic_dec(&pipe->writers);
54099 wake_up_interruptible_sync(&pipe->wait);
54100 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54101 pipe_unlock(pipe);
54102@@ -445,11 +445,11 @@ static void wait_for_dump_helpers(struct file *file)
54103 * We actually want wait_event_freezable() but then we need
54104 * to clear TIF_SIGPENDING and improve dump_interrupted().
54105 */
54106- wait_event_interruptible(pipe->wait, pipe->readers == 1);
54107+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
54108
54109 pipe_lock(pipe);
54110- pipe->readers--;
54111- pipe->writers++;
54112+ atomic_dec(&pipe->readers);
54113+ atomic_inc(&pipe->writers);
54114 pipe_unlock(pipe);
54115 }
54116
54117@@ -496,7 +496,8 @@ void do_coredump(siginfo_t *siginfo)
54118 struct files_struct *displaced;
54119 bool need_nonrelative = false;
54120 bool core_dumped = false;
54121- static atomic_t core_dump_count = ATOMIC_INIT(0);
54122+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
54123+ long signr = siginfo->si_signo;
54124 struct coredump_params cprm = {
54125 .siginfo = siginfo,
54126 .regs = signal_pt_regs(),
54127@@ -509,7 +510,10 @@ void do_coredump(siginfo_t *siginfo)
54128 .mm_flags = mm->flags,
54129 };
54130
54131- audit_core_dumps(siginfo->si_signo);
54132+ audit_core_dumps(signr);
54133+
54134+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
54135+ gr_handle_brute_attach(cprm.mm_flags);
54136
54137 binfmt = mm->binfmt;
54138 if (!binfmt || !binfmt->core_dump)
54139@@ -533,7 +537,7 @@ void do_coredump(siginfo_t *siginfo)
54140 need_nonrelative = true;
54141 }
54142
54143- retval = coredump_wait(siginfo->si_signo, &core_state);
54144+ retval = coredump_wait(signr, &core_state);
54145 if (retval < 0)
54146 goto fail_creds;
54147
54148@@ -576,7 +580,7 @@ void do_coredump(siginfo_t *siginfo)
54149 }
54150 cprm.limit = RLIM_INFINITY;
54151
54152- dump_count = atomic_inc_return(&core_dump_count);
54153+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
54154 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
54155 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
54156 task_tgid_vnr(current), current->comm);
54157@@ -608,6 +612,8 @@ void do_coredump(siginfo_t *siginfo)
54158 } else {
54159 struct inode *inode;
54160
54161+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
54162+
54163 if (cprm.limit < binfmt->min_coredump)
54164 goto fail_unlock;
54165
54166@@ -666,7 +672,7 @@ close_fail:
54167 filp_close(cprm.file, NULL);
54168 fail_dropcount:
54169 if (ispipe)
54170- atomic_dec(&core_dump_count);
54171+ atomic_dec_unchecked(&core_dump_count);
54172 fail_unlock:
54173 kfree(cn.corename);
54174 fail_corename:
54175@@ -687,7 +693,7 @@ int dump_write(struct file *file, const void *addr, int nr)
54176 {
54177 return !dump_interrupted() &&
54178 access_ok(VERIFY_READ, addr, nr) &&
54179- file->f_op->write(file, addr, nr, &file->f_pos) == nr;
54180+ file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
54181 }
54182 EXPORT_SYMBOL(dump_write);
54183
54184diff --git a/fs/dcache.c b/fs/dcache.c
54185index f09b908..04b9690 100644
54186--- a/fs/dcache.c
54187+++ b/fs/dcache.c
54188@@ -3086,7 +3086,8 @@ void __init vfs_caches_init(unsigned long mempages)
54189 mempages -= reserve;
54190
54191 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
54192- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
54193+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
54194+ SLAB_NO_SANITIZE, NULL);
54195
54196 dcache_init();
54197 inode_init();
54198diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
54199index c7c83ff..bda9461 100644
54200--- a/fs/debugfs/inode.c
54201+++ b/fs/debugfs/inode.c
54202@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
54203 */
54204 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
54205 {
54206+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54207+ return __create_file(name, S_IFDIR | S_IRWXU,
54208+#else
54209 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
54210+#endif
54211 parent, NULL, NULL);
54212 }
54213 EXPORT_SYMBOL_GPL(debugfs_create_dir);
54214diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
54215index 5eab400..810a3f5 100644
54216--- a/fs/ecryptfs/inode.c
54217+++ b/fs/ecryptfs/inode.c
54218@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
54219 old_fs = get_fs();
54220 set_fs(get_ds());
54221 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
54222- (char __user *)lower_buf,
54223+ (char __force_user *)lower_buf,
54224 PATH_MAX);
54225 set_fs(old_fs);
54226 if (rc < 0)
54227@@ -706,7 +706,7 @@ out:
54228 static void
54229 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
54230 {
54231- char *buf = nd_get_link(nd);
54232+ const char *buf = nd_get_link(nd);
54233 if (!IS_ERR(buf)) {
54234 /* Free the char* */
54235 kfree(buf);
54236diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
54237index e4141f2..d8263e8 100644
54238--- a/fs/ecryptfs/miscdev.c
54239+++ b/fs/ecryptfs/miscdev.c
54240@@ -304,7 +304,7 @@ check_list:
54241 goto out_unlock_msg_ctx;
54242 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
54243 if (msg_ctx->msg) {
54244- if (copy_to_user(&buf[i], packet_length, packet_length_size))
54245+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
54246 goto out_unlock_msg_ctx;
54247 i += packet_length_size;
54248 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
54249diff --git a/fs/exec.c b/fs/exec.c
54250index 1f44670..3c84660 100644
54251--- a/fs/exec.c
54252+++ b/fs/exec.c
54253@@ -55,8 +55,20 @@
54254 #include <linux/pipe_fs_i.h>
54255 #include <linux/oom.h>
54256 #include <linux/compat.h>
54257+#include <linux/random.h>
54258+#include <linux/seq_file.h>
54259+#include <linux/coredump.h>
54260+#include <linux/mman.h>
54261+
54262+#ifdef CONFIG_PAX_REFCOUNT
54263+#include <linux/kallsyms.h>
54264+#include <linux/kdebug.h>
54265+#endif
54266+
54267+#include <trace/events/fs.h>
54268
54269 #include <asm/uaccess.h>
54270+#include <asm/sections.h>
54271 #include <asm/mmu_context.h>
54272 #include <asm/tlb.h>
54273
54274@@ -66,17 +78,32 @@
54275
54276 #include <trace/events/sched.h>
54277
54278+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54279+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
54280+{
54281+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
54282+}
54283+#endif
54284+
54285+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
54286+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54287+EXPORT_SYMBOL(pax_set_initial_flags_func);
54288+#endif
54289+
54290 int suid_dumpable = 0;
54291
54292 static LIST_HEAD(formats);
54293 static DEFINE_RWLOCK(binfmt_lock);
54294
54295+extern int gr_process_kernel_exec_ban(void);
54296+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
54297+
54298 void __register_binfmt(struct linux_binfmt * fmt, int insert)
54299 {
54300 BUG_ON(!fmt);
54301 write_lock(&binfmt_lock);
54302- insert ? list_add(&fmt->lh, &formats) :
54303- list_add_tail(&fmt->lh, &formats);
54304+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
54305+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
54306 write_unlock(&binfmt_lock);
54307 }
54308
54309@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt);
54310 void unregister_binfmt(struct linux_binfmt * fmt)
54311 {
54312 write_lock(&binfmt_lock);
54313- list_del(&fmt->lh);
54314+ pax_list_del((struct list_head *)&fmt->lh);
54315 write_unlock(&binfmt_lock);
54316 }
54317
54318@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
54319 int write)
54320 {
54321 struct page *page;
54322- int ret;
54323
54324-#ifdef CONFIG_STACK_GROWSUP
54325- if (write) {
54326- ret = expand_downwards(bprm->vma, pos);
54327- if (ret < 0)
54328- return NULL;
54329- }
54330-#endif
54331- ret = get_user_pages(current, bprm->mm, pos,
54332- 1, write, 1, &page, NULL);
54333- if (ret <= 0)
54334+ if (0 > expand_downwards(bprm->vma, pos))
54335+ return NULL;
54336+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
54337 return NULL;
54338
54339 if (write) {
54340@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
54341 if (size <= ARG_MAX)
54342 return page;
54343
54344+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54345+ // only allow 512KB for argv+env on suid/sgid binaries
54346+ // to prevent easy ASLR exhaustion
54347+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
54348+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
54349+ (size > (512 * 1024))) {
54350+ put_page(page);
54351+ return NULL;
54352+ }
54353+#endif
54354+
54355 /*
54356 * Limit to 1/4-th the stack size for the argv+env strings.
54357 * This ensures that:
54358@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
54359 vma->vm_end = STACK_TOP_MAX;
54360 vma->vm_start = vma->vm_end - PAGE_SIZE;
54361 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
54362+
54363+#ifdef CONFIG_PAX_SEGMEXEC
54364+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
54365+#endif
54366+
54367 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
54368 INIT_LIST_HEAD(&vma->anon_vma_chain);
54369
54370@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
54371 mm->stack_vm = mm->total_vm = 1;
54372 up_write(&mm->mmap_sem);
54373 bprm->p = vma->vm_end - sizeof(void *);
54374+
54375+#ifdef CONFIG_PAX_RANDUSTACK
54376+ if (randomize_va_space)
54377+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
54378+#endif
54379+
54380 return 0;
54381 err:
54382 up_write(&mm->mmap_sem);
54383@@ -396,7 +437,7 @@ struct user_arg_ptr {
54384 } ptr;
54385 };
54386
54387-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54388+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54389 {
54390 const char __user *native;
54391
54392@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
54393 compat_uptr_t compat;
54394
54395 if (get_user(compat, argv.ptr.compat + nr))
54396- return ERR_PTR(-EFAULT);
54397+ return (const char __force_user *)ERR_PTR(-EFAULT);
54398
54399 return compat_ptr(compat);
54400 }
54401 #endif
54402
54403 if (get_user(native, argv.ptr.native + nr))
54404- return ERR_PTR(-EFAULT);
54405+ return (const char __force_user *)ERR_PTR(-EFAULT);
54406
54407 return native;
54408 }
54409@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
54410 if (!p)
54411 break;
54412
54413- if (IS_ERR(p))
54414+ if (IS_ERR((const char __force_kernel *)p))
54415 return -EFAULT;
54416
54417 if (i >= max)
54418@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
54419
54420 ret = -EFAULT;
54421 str = get_user_arg_ptr(argv, argc);
54422- if (IS_ERR(str))
54423+ if (IS_ERR((const char __force_kernel *)str))
54424 goto out;
54425
54426 len = strnlen_user(str, MAX_ARG_STRLEN);
54427@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
54428 int r;
54429 mm_segment_t oldfs = get_fs();
54430 struct user_arg_ptr argv = {
54431- .ptr.native = (const char __user *const __user *)__argv,
54432+ .ptr.native = (const char __force_user * const __force_user *)__argv,
54433 };
54434
54435 set_fs(KERNEL_DS);
54436@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
54437 unsigned long new_end = old_end - shift;
54438 struct mmu_gather tlb;
54439
54440- BUG_ON(new_start > new_end);
54441+ if (new_start >= new_end || new_start < mmap_min_addr)
54442+ return -ENOMEM;
54443
54444 /*
54445 * ensure there are no vmas between where we want to go
54446@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
54447 if (vma != find_vma(mm, new_start))
54448 return -EFAULT;
54449
54450+#ifdef CONFIG_PAX_SEGMEXEC
54451+ BUG_ON(pax_find_mirror_vma(vma));
54452+#endif
54453+
54454 /*
54455 * cover the whole range: [new_start, old_end)
54456 */
54457@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
54458 stack_top = arch_align_stack(stack_top);
54459 stack_top = PAGE_ALIGN(stack_top);
54460
54461- if (unlikely(stack_top < mmap_min_addr) ||
54462- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
54463- return -ENOMEM;
54464-
54465 stack_shift = vma->vm_end - stack_top;
54466
54467 bprm->p -= stack_shift;
54468@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
54469 bprm->exec -= stack_shift;
54470
54471 down_write(&mm->mmap_sem);
54472+
54473+ /* Move stack pages down in memory. */
54474+ if (stack_shift) {
54475+ ret = shift_arg_pages(vma, stack_shift);
54476+ if (ret)
54477+ goto out_unlock;
54478+ }
54479+
54480 vm_flags = VM_STACK_FLAGS;
54481
54482+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
54483+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54484+ vm_flags &= ~VM_EXEC;
54485+
54486+#ifdef CONFIG_PAX_MPROTECT
54487+ if (mm->pax_flags & MF_PAX_MPROTECT)
54488+ vm_flags &= ~VM_MAYEXEC;
54489+#endif
54490+
54491+ }
54492+#endif
54493+
54494 /*
54495 * Adjust stack execute permissions; explicitly enable for
54496 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
54497@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
54498 goto out_unlock;
54499 BUG_ON(prev != vma);
54500
54501- /* Move stack pages down in memory. */
54502- if (stack_shift) {
54503- ret = shift_arg_pages(vma, stack_shift);
54504- if (ret)
54505- goto out_unlock;
54506- }
54507-
54508 /* mprotect_fixup is overkill to remove the temporary stack flags */
54509 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
54510
54511@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
54512 #endif
54513 current->mm->start_stack = bprm->p;
54514 ret = expand_stack(vma, stack_base);
54515+
54516+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
54517+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
54518+ unsigned long size;
54519+ vm_flags_t vm_flags;
54520+
54521+ size = STACK_TOP - vma->vm_end;
54522+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
54523+
54524+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
54525+
54526+#ifdef CONFIG_X86
54527+ if (!ret) {
54528+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
54529+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
54530+ }
54531+#endif
54532+
54533+ }
54534+#endif
54535+
54536 if (ret)
54537 ret = -EFAULT;
54538
54539@@ -772,6 +848,8 @@ struct file *open_exec(const char *name)
54540
54541 fsnotify_open(file);
54542
54543+ trace_open_exec(name);
54544+
54545 err = deny_write_access(file);
54546 if (err)
54547 goto exit;
54548@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset,
54549 old_fs = get_fs();
54550 set_fs(get_ds());
54551 /* The cast to a user pointer is valid due to the set_fs() */
54552- result = vfs_read(file, (void __user *)addr, count, &pos);
54553+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
54554 set_fs(old_fs);
54555 return result;
54556 }
54557@@ -1251,7 +1329,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
54558 }
54559 rcu_read_unlock();
54560
54561- if (p->fs->users > n_fs) {
54562+ if (atomic_read(&p->fs->users) > n_fs) {
54563 bprm->unsafe |= LSM_UNSAFE_SHARE;
54564 } else {
54565 res = -EAGAIN;
54566@@ -1451,6 +1529,31 @@ int search_binary_handler(struct linux_binprm *bprm)
54567
54568 EXPORT_SYMBOL(search_binary_handler);
54569
54570+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54571+static DEFINE_PER_CPU(u64, exec_counter);
54572+static int __init init_exec_counters(void)
54573+{
54574+ unsigned int cpu;
54575+
54576+ for_each_possible_cpu(cpu) {
54577+ per_cpu(exec_counter, cpu) = (u64)cpu;
54578+ }
54579+
54580+ return 0;
54581+}
54582+early_initcall(init_exec_counters);
54583+static inline void increment_exec_counter(void)
54584+{
54585+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
54586+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
54587+}
54588+#else
54589+static inline void increment_exec_counter(void) {}
54590+#endif
54591+
54592+extern void gr_handle_exec_args(struct linux_binprm *bprm,
54593+ struct user_arg_ptr argv);
54594+
54595 /*
54596 * sys_execve() executes a new program.
54597 */
54598@@ -1458,6 +1561,11 @@ static int do_execve_common(const char *filename,
54599 struct user_arg_ptr argv,
54600 struct user_arg_ptr envp)
54601 {
54602+#ifdef CONFIG_GRKERNSEC
54603+ struct file *old_exec_file;
54604+ struct acl_subject_label *old_acl;
54605+ struct rlimit old_rlim[RLIM_NLIMITS];
54606+#endif
54607 struct linux_binprm *bprm;
54608 struct file *file;
54609 struct files_struct *displaced;
54610@@ -1465,6 +1573,8 @@ static int do_execve_common(const char *filename,
54611 int retval;
54612 const struct cred *cred = current_cred();
54613
54614+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
54615+
54616 /*
54617 * We move the actual failure in case of RLIMIT_NPROC excess from
54618 * set*uid() to execve() because too many poorly written programs
54619@@ -1505,12 +1615,22 @@ static int do_execve_common(const char *filename,
54620 if (IS_ERR(file))
54621 goto out_unmark;
54622
54623+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
54624+ retval = -EPERM;
54625+ goto out_file;
54626+ }
54627+
54628 sched_exec();
54629
54630 bprm->file = file;
54631 bprm->filename = filename;
54632 bprm->interp = filename;
54633
54634+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
54635+ retval = -EACCES;
54636+ goto out_file;
54637+ }
54638+
54639 retval = bprm_mm_init(bprm);
54640 if (retval)
54641 goto out_file;
54642@@ -1527,24 +1647,70 @@ static int do_execve_common(const char *filename,
54643 if (retval < 0)
54644 goto out;
54645
54646+#ifdef CONFIG_GRKERNSEC
54647+ old_acl = current->acl;
54648+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
54649+ old_exec_file = current->exec_file;
54650+ get_file(file);
54651+ current->exec_file = file;
54652+#endif
54653+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54654+ /* limit suid stack to 8MB
54655+ * we saved the old limits above and will restore them if this exec fails
54656+ */
54657+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
54658+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
54659+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
54660+#endif
54661+
54662+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
54663+ retval = -EPERM;
54664+ goto out_fail;
54665+ }
54666+
54667+ if (!gr_tpe_allow(file)) {
54668+ retval = -EACCES;
54669+ goto out_fail;
54670+ }
54671+
54672+ if (gr_check_crash_exec(file)) {
54673+ retval = -EACCES;
54674+ goto out_fail;
54675+ }
54676+
54677+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
54678+ bprm->unsafe);
54679+ if (retval < 0)
54680+ goto out_fail;
54681+
54682 retval = copy_strings_kernel(1, &bprm->filename, bprm);
54683 if (retval < 0)
54684- goto out;
54685+ goto out_fail;
54686
54687 bprm->exec = bprm->p;
54688 retval = copy_strings(bprm->envc, envp, bprm);
54689 if (retval < 0)
54690- goto out;
54691+ goto out_fail;
54692
54693 retval = copy_strings(bprm->argc, argv, bprm);
54694 if (retval < 0)
54695- goto out;
54696+ goto out_fail;
54697+
54698+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
54699+
54700+ gr_handle_exec_args(bprm, argv);
54701
54702 retval = search_binary_handler(bprm);
54703 if (retval < 0)
54704- goto out;
54705+ goto out_fail;
54706+#ifdef CONFIG_GRKERNSEC
54707+ if (old_exec_file)
54708+ fput(old_exec_file);
54709+#endif
54710
54711 /* execve succeeded */
54712+
54713+ increment_exec_counter();
54714 current->fs->in_exec = 0;
54715 current->in_execve = 0;
54716 acct_update_integrals(current);
54717@@ -1553,6 +1719,14 @@ static int do_execve_common(const char *filename,
54718 put_files_struct(displaced);
54719 return retval;
54720
54721+out_fail:
54722+#ifdef CONFIG_GRKERNSEC
54723+ current->acl = old_acl;
54724+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
54725+ fput(current->exec_file);
54726+ current->exec_file = old_exec_file;
54727+#endif
54728+
54729 out:
54730 if (bprm->mm) {
54731 acct_arg_size(bprm, 0);
54732@@ -1701,3 +1875,287 @@ asmlinkage long compat_sys_execve(const char __user * filename,
54733 return error;
54734 }
54735 #endif
54736+
54737+int pax_check_flags(unsigned long *flags)
54738+{
54739+ int retval = 0;
54740+
54741+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
54742+ if (*flags & MF_PAX_SEGMEXEC)
54743+ {
54744+ *flags &= ~MF_PAX_SEGMEXEC;
54745+ retval = -EINVAL;
54746+ }
54747+#endif
54748+
54749+ if ((*flags & MF_PAX_PAGEEXEC)
54750+
54751+#ifdef CONFIG_PAX_PAGEEXEC
54752+ && (*flags & MF_PAX_SEGMEXEC)
54753+#endif
54754+
54755+ )
54756+ {
54757+ *flags &= ~MF_PAX_PAGEEXEC;
54758+ retval = -EINVAL;
54759+ }
54760+
54761+ if ((*flags & MF_PAX_MPROTECT)
54762+
54763+#ifdef CONFIG_PAX_MPROTECT
54764+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
54765+#endif
54766+
54767+ )
54768+ {
54769+ *flags &= ~MF_PAX_MPROTECT;
54770+ retval = -EINVAL;
54771+ }
54772+
54773+ if ((*flags & MF_PAX_EMUTRAMP)
54774+
54775+#ifdef CONFIG_PAX_EMUTRAMP
54776+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
54777+#endif
54778+
54779+ )
54780+ {
54781+ *flags &= ~MF_PAX_EMUTRAMP;
54782+ retval = -EINVAL;
54783+ }
54784+
54785+ return retval;
54786+}
54787+
54788+EXPORT_SYMBOL(pax_check_flags);
54789+
54790+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
54791+char *pax_get_path(const struct path *path, char *buf, int buflen)
54792+{
54793+ char *pathname = d_path(path, buf, buflen);
54794+
54795+ if (IS_ERR(pathname))
54796+ goto toolong;
54797+
54798+ pathname = mangle_path(buf, pathname, "\t\n\\");
54799+ if (!pathname)
54800+ goto toolong;
54801+
54802+ *pathname = 0;
54803+ return buf;
54804+
54805+toolong:
54806+ return "<path too long>";
54807+}
54808+EXPORT_SYMBOL(pax_get_path);
54809+
54810+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
54811+{
54812+ struct task_struct *tsk = current;
54813+ struct mm_struct *mm = current->mm;
54814+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
54815+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
54816+ char *path_exec = NULL;
54817+ char *path_fault = NULL;
54818+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
54819+ siginfo_t info = { };
54820+
54821+ if (buffer_exec && buffer_fault) {
54822+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
54823+
54824+ down_read(&mm->mmap_sem);
54825+ vma = mm->mmap;
54826+ while (vma && (!vma_exec || !vma_fault)) {
54827+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
54828+ vma_exec = vma;
54829+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
54830+ vma_fault = vma;
54831+ vma = vma->vm_next;
54832+ }
54833+ if (vma_exec)
54834+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
54835+ if (vma_fault) {
54836+ start = vma_fault->vm_start;
54837+ end = vma_fault->vm_end;
54838+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
54839+ if (vma_fault->vm_file)
54840+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
54841+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
54842+ path_fault = "<heap>";
54843+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
54844+ path_fault = "<stack>";
54845+ else
54846+ path_fault = "<anonymous mapping>";
54847+ }
54848+ up_read(&mm->mmap_sem);
54849+ }
54850+ if (tsk->signal->curr_ip)
54851+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
54852+ else
54853+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
54854+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
54855+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
54856+ free_page((unsigned long)buffer_exec);
54857+ free_page((unsigned long)buffer_fault);
54858+ pax_report_insns(regs, pc, sp);
54859+ info.si_signo = SIGKILL;
54860+ info.si_errno = 0;
54861+ info.si_code = SI_KERNEL;
54862+ info.si_pid = 0;
54863+ info.si_uid = 0;
54864+ do_coredump(&info);
54865+}
54866+#endif
54867+
54868+#ifdef CONFIG_PAX_REFCOUNT
54869+void pax_report_refcount_overflow(struct pt_regs *regs)
54870+{
54871+ if (current->signal->curr_ip)
54872+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
54873+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
54874+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
54875+ else
54876+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
54877+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
54878+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
54879+ preempt_disable();
54880+ show_regs(regs);
54881+ preempt_enable();
54882+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
54883+}
54884+#endif
54885+
54886+#ifdef CONFIG_PAX_USERCOPY
54887+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
54888+static noinline int check_stack_object(const void *obj, unsigned long len)
54889+{
54890+ const void * const stack = task_stack_page(current);
54891+ const void * const stackend = stack + THREAD_SIZE;
54892+
54893+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
54894+ const void *frame = NULL;
54895+ const void *oldframe;
54896+#endif
54897+
54898+ if (obj + len < obj)
54899+ return -1;
54900+
54901+ if (obj + len <= stack || stackend <= obj)
54902+ return 0;
54903+
54904+ if (obj < stack || stackend < obj + len)
54905+ return -1;
54906+
54907+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
54908+ oldframe = __builtin_frame_address(1);
54909+ if (oldframe)
54910+ frame = __builtin_frame_address(2);
54911+ /*
54912+ low ----------------------------------------------> high
54913+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
54914+ ^----------------^
54915+ allow copies only within here
54916+ */
54917+ while (stack <= frame && frame < stackend) {
54918+ /* if obj + len extends past the last frame, this
54919+ check won't pass and the next frame will be 0,
54920+ causing us to bail out and correctly report
54921+ the copy as invalid
54922+ */
54923+ if (obj + len <= frame)
54924+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
54925+ oldframe = frame;
54926+ frame = *(const void * const *)frame;
54927+ }
54928+ return -1;
54929+#else
54930+ return 1;
54931+#endif
54932+}
54933+
54934+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
54935+{
54936+ if (current->signal->curr_ip)
54937+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
54938+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
54939+ else
54940+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
54941+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
54942+ dump_stack();
54943+ gr_handle_kernel_exploit();
54944+ do_group_exit(SIGKILL);
54945+}
54946+#endif
54947+
54948+#ifdef CONFIG_PAX_USERCOPY
54949+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
54950+{
54951+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
54952+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
54953+#ifdef CONFIG_MODULES
54954+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
54955+#else
54956+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
54957+#endif
54958+
54959+#else
54960+ unsigned long textlow = (unsigned long)_stext;
54961+ unsigned long texthigh = (unsigned long)_etext;
54962+#endif
54963+
54964+ if (high <= textlow || low > texthigh)
54965+ return false;
54966+ else
54967+ return true;
54968+}
54969+#endif
54970+
54971+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
54972+{
54973+
54974+#ifdef CONFIG_PAX_USERCOPY
54975+ const char *type;
54976+
54977+ if (!n)
54978+ return;
54979+
54980+ type = check_heap_object(ptr, n);
54981+ if (!type) {
54982+ int ret = check_stack_object(ptr, n);
54983+ if (ret == 1 || ret == 2)
54984+ return;
54985+ if (ret == 0) {
54986+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
54987+ type = "<kernel text>";
54988+ else
54989+ return;
54990+ } else
54991+ type = "<process stack>";
54992+ }
54993+
54994+ pax_report_usercopy(ptr, n, to_user, type);
54995+#endif
54996+
54997+}
54998+EXPORT_SYMBOL(__check_object_size);
54999+
55000+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
55001+void pax_track_stack(void)
55002+{
55003+ unsigned long sp = (unsigned long)&sp;
55004+ if (sp < current_thread_info()->lowest_stack &&
55005+ sp > (unsigned long)task_stack_page(current))
55006+ current_thread_info()->lowest_stack = sp;
55007+}
55008+EXPORT_SYMBOL(pax_track_stack);
55009+#endif
55010+
55011+#ifdef CONFIG_PAX_SIZE_OVERFLOW
55012+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
55013+{
55014+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
55015+ dump_stack();
55016+ do_group_exit(SIGKILL);
55017+}
55018+EXPORT_SYMBOL(report_size_overflow);
55019+#endif
55020diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
55021index 9f9992b..8b59411 100644
55022--- a/fs/ext2/balloc.c
55023+++ b/fs/ext2/balloc.c
55024@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
55025
55026 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
55027 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
55028- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
55029+ if (free_blocks < root_blocks + 1 &&
55030 !uid_eq(sbi->s_resuid, current_fsuid()) &&
55031 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
55032- !in_group_p (sbi->s_resgid))) {
55033+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
55034 return 0;
55035 }
55036 return 1;
55037diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
55038index 22548f5..41521d8 100644
55039--- a/fs/ext3/balloc.c
55040+++ b/fs/ext3/balloc.c
55041@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
55042
55043 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
55044 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
55045- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
55046+ if (free_blocks < root_blocks + 1 &&
55047 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
55048 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
55049- !in_group_p (sbi->s_resgid))) {
55050+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
55051 return 0;
55052 }
55053 return 1;
55054diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
55055index 3742e4c..69a797f 100644
55056--- a/fs/ext4/balloc.c
55057+++ b/fs/ext4/balloc.c
55058@@ -528,8 +528,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
55059 /* Hm, nope. Are (enough) root reserved clusters available? */
55060 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
55061 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
55062- capable(CAP_SYS_RESOURCE) ||
55063- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
55064+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
55065+ capable_nolog(CAP_SYS_RESOURCE)) {
55066
55067 if (free_clusters >= (nclusters + dirty_clusters +
55068 resv_clusters))
55069diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
55070index 5aae3d1..b5da7f8 100644
55071--- a/fs/ext4/ext4.h
55072+++ b/fs/ext4/ext4.h
55073@@ -1252,19 +1252,19 @@ struct ext4_sb_info {
55074 unsigned long s_mb_last_start;
55075
55076 /* stats for buddy allocator */
55077- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
55078- atomic_t s_bal_success; /* we found long enough chunks */
55079- atomic_t s_bal_allocated; /* in blocks */
55080- atomic_t s_bal_ex_scanned; /* total extents scanned */
55081- atomic_t s_bal_goals; /* goal hits */
55082- atomic_t s_bal_breaks; /* too long searches */
55083- atomic_t s_bal_2orders; /* 2^order hits */
55084+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
55085+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
55086+ atomic_unchecked_t s_bal_allocated; /* in blocks */
55087+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
55088+ atomic_unchecked_t s_bal_goals; /* goal hits */
55089+ atomic_unchecked_t s_bal_breaks; /* too long searches */
55090+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
55091 spinlock_t s_bal_lock;
55092 unsigned long s_mb_buddies_generated;
55093 unsigned long long s_mb_generation_time;
55094- atomic_t s_mb_lost_chunks;
55095- atomic_t s_mb_preallocated;
55096- atomic_t s_mb_discarded;
55097+ atomic_unchecked_t s_mb_lost_chunks;
55098+ atomic_unchecked_t s_mb_preallocated;
55099+ atomic_unchecked_t s_mb_discarded;
55100 atomic_t s_lock_busy;
55101
55102 /* locality groups */
55103diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
55104index 59c6750..a549154 100644
55105--- a/fs/ext4/mballoc.c
55106+++ b/fs/ext4/mballoc.c
55107@@ -1865,7 +1865,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
55108 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
55109
55110 if (EXT4_SB(sb)->s_mb_stats)
55111- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
55112+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
55113
55114 break;
55115 }
55116@@ -2170,7 +2170,7 @@ repeat:
55117 ac->ac_status = AC_STATUS_CONTINUE;
55118 ac->ac_flags |= EXT4_MB_HINT_FIRST;
55119 cr = 3;
55120- atomic_inc(&sbi->s_mb_lost_chunks);
55121+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
55122 goto repeat;
55123 }
55124 }
55125@@ -2678,25 +2678,25 @@ int ext4_mb_release(struct super_block *sb)
55126 if (sbi->s_mb_stats) {
55127 ext4_msg(sb, KERN_INFO,
55128 "mballoc: %u blocks %u reqs (%u success)",
55129- atomic_read(&sbi->s_bal_allocated),
55130- atomic_read(&sbi->s_bal_reqs),
55131- atomic_read(&sbi->s_bal_success));
55132+ atomic_read_unchecked(&sbi->s_bal_allocated),
55133+ atomic_read_unchecked(&sbi->s_bal_reqs),
55134+ atomic_read_unchecked(&sbi->s_bal_success));
55135 ext4_msg(sb, KERN_INFO,
55136 "mballoc: %u extents scanned, %u goal hits, "
55137 "%u 2^N hits, %u breaks, %u lost",
55138- atomic_read(&sbi->s_bal_ex_scanned),
55139- atomic_read(&sbi->s_bal_goals),
55140- atomic_read(&sbi->s_bal_2orders),
55141- atomic_read(&sbi->s_bal_breaks),
55142- atomic_read(&sbi->s_mb_lost_chunks));
55143+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
55144+ atomic_read_unchecked(&sbi->s_bal_goals),
55145+ atomic_read_unchecked(&sbi->s_bal_2orders),
55146+ atomic_read_unchecked(&sbi->s_bal_breaks),
55147+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
55148 ext4_msg(sb, KERN_INFO,
55149 "mballoc: %lu generated and it took %Lu",
55150 sbi->s_mb_buddies_generated,
55151 sbi->s_mb_generation_time);
55152 ext4_msg(sb, KERN_INFO,
55153 "mballoc: %u preallocated, %u discarded",
55154- atomic_read(&sbi->s_mb_preallocated),
55155- atomic_read(&sbi->s_mb_discarded));
55156+ atomic_read_unchecked(&sbi->s_mb_preallocated),
55157+ atomic_read_unchecked(&sbi->s_mb_discarded));
55158 }
55159
55160 free_percpu(sbi->s_locality_groups);
55161@@ -3150,16 +3150,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
55162 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
55163
55164 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
55165- atomic_inc(&sbi->s_bal_reqs);
55166- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
55167+ atomic_inc_unchecked(&sbi->s_bal_reqs);
55168+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
55169 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
55170- atomic_inc(&sbi->s_bal_success);
55171- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
55172+ atomic_inc_unchecked(&sbi->s_bal_success);
55173+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
55174 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
55175 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
55176- atomic_inc(&sbi->s_bal_goals);
55177+ atomic_inc_unchecked(&sbi->s_bal_goals);
55178 if (ac->ac_found > sbi->s_mb_max_to_scan)
55179- atomic_inc(&sbi->s_bal_breaks);
55180+ atomic_inc_unchecked(&sbi->s_bal_breaks);
55181 }
55182
55183 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
55184@@ -3559,7 +3559,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
55185 trace_ext4_mb_new_inode_pa(ac, pa);
55186
55187 ext4_mb_use_inode_pa(ac, pa);
55188- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
55189+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
55190
55191 ei = EXT4_I(ac->ac_inode);
55192 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
55193@@ -3619,7 +3619,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
55194 trace_ext4_mb_new_group_pa(ac, pa);
55195
55196 ext4_mb_use_group_pa(ac, pa);
55197- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
55198+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
55199
55200 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
55201 lg = ac->ac_lg;
55202@@ -3708,7 +3708,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
55203 * from the bitmap and continue.
55204 */
55205 }
55206- atomic_add(free, &sbi->s_mb_discarded);
55207+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
55208
55209 return err;
55210 }
55211@@ -3726,7 +3726,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
55212 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
55213 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
55214 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
55215- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
55216+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
55217 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
55218
55219 return 0;
55220diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
55221index 214461e..3614c89 100644
55222--- a/fs/ext4/mmp.c
55223+++ b/fs/ext4/mmp.c
55224@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
55225 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
55226 const char *function, unsigned int line, const char *msg)
55227 {
55228- __ext4_warning(sb, function, line, msg);
55229+ __ext4_warning(sb, function, line, "%s", msg);
55230 __ext4_warning(sb, function, line,
55231 "MMP failure info: last update time: %llu, last update "
55232 "node: %s, last update device: %s\n",
55233diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
55234index 49d3c01..9579efd 100644
55235--- a/fs/ext4/resize.c
55236+++ b/fs/ext4/resize.c
55237@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
55238 ext4_fsblk_t end = start + input->blocks_count;
55239 ext4_group_t group = input->group;
55240 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
55241- unsigned overhead = ext4_group_overhead_blocks(sb, group);
55242- ext4_fsblk_t metaend = start + overhead;
55243+ unsigned overhead;
55244+ ext4_fsblk_t metaend;
55245 struct buffer_head *bh = NULL;
55246 ext4_grpblk_t free_blocks_count, offset;
55247 int err = -EINVAL;
55248
55249+ if (group != sbi->s_groups_count) {
55250+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
55251+ input->group, sbi->s_groups_count);
55252+ return -EINVAL;
55253+ }
55254+
55255+ overhead = ext4_group_overhead_blocks(sb, group);
55256+ metaend = start + overhead;
55257 input->free_blocks_count = free_blocks_count =
55258 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
55259
55260@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
55261 free_blocks_count, input->reserved_blocks);
55262
55263 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
55264- if (group != sbi->s_groups_count)
55265- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
55266- input->group, sbi->s_groups_count);
55267- else if (offset != 0)
55268+ if (offset != 0)
55269 ext4_warning(sb, "Last group not full");
55270 else if (input->reserved_blocks > input->blocks_count / 5)
55271 ext4_warning(sb, "Reserved blocks too high (%u)",
55272diff --git a/fs/ext4/super.c b/fs/ext4/super.c
55273index 3f7c39e..227f24f 100644
55274--- a/fs/ext4/super.c
55275+++ b/fs/ext4/super.c
55276@@ -1236,7 +1236,7 @@ static ext4_fsblk_t get_sb_block(void **data)
55277 }
55278
55279 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
55280-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
55281+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
55282 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
55283
55284 #ifdef CONFIG_QUOTA
55285@@ -2372,7 +2372,7 @@ struct ext4_attr {
55286 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
55287 const char *, size_t);
55288 int offset;
55289-};
55290+} __do_const;
55291
55292 static int parse_strtoull(const char *buf,
55293 unsigned long long max, unsigned long long *value)
55294diff --git a/fs/fcntl.c b/fs/fcntl.c
55295index 6599222..e7bf0de 100644
55296--- a/fs/fcntl.c
55297+++ b/fs/fcntl.c
55298@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
55299 if (err)
55300 return err;
55301
55302+ if (gr_handle_chroot_fowner(pid, type))
55303+ return -ENOENT;
55304+ if (gr_check_protected_task_fowner(pid, type))
55305+ return -EACCES;
55306+
55307 f_modown(filp, pid, type, force);
55308 return 0;
55309 }
55310diff --git a/fs/fhandle.c b/fs/fhandle.c
55311index 999ff5c..41f4109 100644
55312--- a/fs/fhandle.c
55313+++ b/fs/fhandle.c
55314@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
55315 } else
55316 retval = 0;
55317 /* copy the mount id */
55318- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
55319- sizeof(*mnt_id)) ||
55320+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
55321 copy_to_user(ufh, handle,
55322 sizeof(struct file_handle) + handle_bytes))
55323 retval = -EFAULT;
55324diff --git a/fs/file.c b/fs/file.c
55325index 4a78f98..9447397 100644
55326--- a/fs/file.c
55327+++ b/fs/file.c
55328@@ -16,6 +16,7 @@
55329 #include <linux/slab.h>
55330 #include <linux/vmalloc.h>
55331 #include <linux/file.h>
55332+#include <linux/security.h>
55333 #include <linux/fdtable.h>
55334 #include <linux/bitops.h>
55335 #include <linux/interrupt.h>
55336@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
55337 if (!file)
55338 return __close_fd(files, fd);
55339
55340+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
55341 if (fd >= rlimit(RLIMIT_NOFILE))
55342 return -EBADF;
55343
55344@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
55345 if (unlikely(oldfd == newfd))
55346 return -EINVAL;
55347
55348+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
55349 if (newfd >= rlimit(RLIMIT_NOFILE))
55350 return -EBADF;
55351
55352@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
55353 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
55354 {
55355 int err;
55356+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
55357 if (from >= rlimit(RLIMIT_NOFILE))
55358 return -EINVAL;
55359 err = alloc_fd(from, flags);
55360diff --git a/fs/filesystems.c b/fs/filesystems.c
55361index 92567d9..fcd8cbf 100644
55362--- a/fs/filesystems.c
55363+++ b/fs/filesystems.c
55364@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
55365 int len = dot ? dot - name : strlen(name);
55366
55367 fs = __get_fs_type(name, len);
55368+#ifdef CONFIG_GRKERNSEC_MODHARDEN
55369+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
55370+#else
55371 if (!fs && (request_module("fs-%.*s", len, name) == 0))
55372+#endif
55373 fs = __get_fs_type(name, len);
55374
55375 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
55376diff --git a/fs/fs_struct.c b/fs/fs_struct.c
55377index d8ac61d..79a36f0 100644
55378--- a/fs/fs_struct.c
55379+++ b/fs/fs_struct.c
55380@@ -4,6 +4,7 @@
55381 #include <linux/path.h>
55382 #include <linux/slab.h>
55383 #include <linux/fs_struct.h>
55384+#include <linux/grsecurity.h>
55385 #include "internal.h"
55386
55387 /*
55388@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
55389 write_seqcount_begin(&fs->seq);
55390 old_root = fs->root;
55391 fs->root = *path;
55392+ gr_set_chroot_entries(current, path);
55393 write_seqcount_end(&fs->seq);
55394 spin_unlock(&fs->lock);
55395 if (old_root.dentry)
55396@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
55397 int hits = 0;
55398 spin_lock(&fs->lock);
55399 write_seqcount_begin(&fs->seq);
55400+ /* this root replacement is only done by pivot_root,
55401+ leave grsec's chroot tagging alone for this task
55402+ so that a pivoted root isn't treated as a chroot
55403+ */
55404 hits += replace_path(&fs->root, old_root, new_root);
55405 hits += replace_path(&fs->pwd, old_root, new_root);
55406 write_seqcount_end(&fs->seq);
55407@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
55408 task_lock(tsk);
55409 spin_lock(&fs->lock);
55410 tsk->fs = NULL;
55411- kill = !--fs->users;
55412+ gr_clear_chroot_entries(tsk);
55413+ kill = !atomic_dec_return(&fs->users);
55414 spin_unlock(&fs->lock);
55415 task_unlock(tsk);
55416 if (kill)
55417@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
55418 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
55419 /* We don't need to lock fs - think why ;-) */
55420 if (fs) {
55421- fs->users = 1;
55422+ atomic_set(&fs->users, 1);
55423 fs->in_exec = 0;
55424 spin_lock_init(&fs->lock);
55425 seqcount_init(&fs->seq);
55426@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
55427 spin_lock(&old->lock);
55428 fs->root = old->root;
55429 path_get(&fs->root);
55430+ /* instead of calling gr_set_chroot_entries here,
55431+ we call it from every caller of this function
55432+ */
55433 fs->pwd = old->pwd;
55434 path_get(&fs->pwd);
55435 spin_unlock(&old->lock);
55436@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
55437
55438 task_lock(current);
55439 spin_lock(&fs->lock);
55440- kill = !--fs->users;
55441+ kill = !atomic_dec_return(&fs->users);
55442 current->fs = new_fs;
55443+ gr_set_chroot_entries(current, &new_fs->root);
55444 spin_unlock(&fs->lock);
55445 task_unlock(current);
55446
55447@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
55448
55449 int current_umask(void)
55450 {
55451- return current->fs->umask;
55452+ return current->fs->umask | gr_acl_umask();
55453 }
55454 EXPORT_SYMBOL(current_umask);
55455
55456 /* to be mentioned only in INIT_TASK */
55457 struct fs_struct init_fs = {
55458- .users = 1,
55459+ .users = ATOMIC_INIT(1),
55460 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
55461 .seq = SEQCNT_ZERO,
55462 .umask = 0022,
55463diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
55464index e2cba1f..17a25bb 100644
55465--- a/fs/fscache/cookie.c
55466+++ b/fs/fscache/cookie.c
55467@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
55468 parent ? (char *) parent->def->name : "<no-parent>",
55469 def->name, netfs_data);
55470
55471- fscache_stat(&fscache_n_acquires);
55472+ fscache_stat_unchecked(&fscache_n_acquires);
55473
55474 /* if there's no parent cookie, then we don't create one here either */
55475 if (!parent) {
55476- fscache_stat(&fscache_n_acquires_null);
55477+ fscache_stat_unchecked(&fscache_n_acquires_null);
55478 _leave(" [no parent]");
55479 return NULL;
55480 }
55481@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
55482 /* allocate and initialise a cookie */
55483 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
55484 if (!cookie) {
55485- fscache_stat(&fscache_n_acquires_oom);
55486+ fscache_stat_unchecked(&fscache_n_acquires_oom);
55487 _leave(" [ENOMEM]");
55488 return NULL;
55489 }
55490@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
55491
55492 switch (cookie->def->type) {
55493 case FSCACHE_COOKIE_TYPE_INDEX:
55494- fscache_stat(&fscache_n_cookie_index);
55495+ fscache_stat_unchecked(&fscache_n_cookie_index);
55496 break;
55497 case FSCACHE_COOKIE_TYPE_DATAFILE:
55498- fscache_stat(&fscache_n_cookie_data);
55499+ fscache_stat_unchecked(&fscache_n_cookie_data);
55500 break;
55501 default:
55502- fscache_stat(&fscache_n_cookie_special);
55503+ fscache_stat_unchecked(&fscache_n_cookie_special);
55504 break;
55505 }
55506
55507@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
55508 if (fscache_acquire_non_index_cookie(cookie) < 0) {
55509 atomic_dec(&parent->n_children);
55510 __fscache_cookie_put(cookie);
55511- fscache_stat(&fscache_n_acquires_nobufs);
55512+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
55513 _leave(" = NULL");
55514 return NULL;
55515 }
55516 }
55517
55518- fscache_stat(&fscache_n_acquires_ok);
55519+ fscache_stat_unchecked(&fscache_n_acquires_ok);
55520 _leave(" = %p", cookie);
55521 return cookie;
55522 }
55523@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
55524 cache = fscache_select_cache_for_object(cookie->parent);
55525 if (!cache) {
55526 up_read(&fscache_addremove_sem);
55527- fscache_stat(&fscache_n_acquires_no_cache);
55528+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
55529 _leave(" = -ENOMEDIUM [no cache]");
55530 return -ENOMEDIUM;
55531 }
55532@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
55533 object = cache->ops->alloc_object(cache, cookie);
55534 fscache_stat_d(&fscache_n_cop_alloc_object);
55535 if (IS_ERR(object)) {
55536- fscache_stat(&fscache_n_object_no_alloc);
55537+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
55538 ret = PTR_ERR(object);
55539 goto error;
55540 }
55541
55542- fscache_stat(&fscache_n_object_alloc);
55543+ fscache_stat_unchecked(&fscache_n_object_alloc);
55544
55545 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
55546
55547@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
55548
55549 _enter("{%s}", cookie->def->name);
55550
55551- fscache_stat(&fscache_n_invalidates);
55552+ fscache_stat_unchecked(&fscache_n_invalidates);
55553
55554 /* Only permit invalidation of data files. Invalidating an index will
55555 * require the caller to release all its attachments to the tree rooted
55556@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
55557 {
55558 struct fscache_object *object;
55559
55560- fscache_stat(&fscache_n_updates);
55561+ fscache_stat_unchecked(&fscache_n_updates);
55562
55563 if (!cookie) {
55564- fscache_stat(&fscache_n_updates_null);
55565+ fscache_stat_unchecked(&fscache_n_updates_null);
55566 _leave(" [no cookie]");
55567 return;
55568 }
55569@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
55570 struct fscache_object *object;
55571 unsigned long event;
55572
55573- fscache_stat(&fscache_n_relinquishes);
55574+ fscache_stat_unchecked(&fscache_n_relinquishes);
55575 if (retire)
55576- fscache_stat(&fscache_n_relinquishes_retire);
55577+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
55578
55579 if (!cookie) {
55580- fscache_stat(&fscache_n_relinquishes_null);
55581+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
55582 _leave(" [no cookie]");
55583 return;
55584 }
55585@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
55586
55587 /* wait for the cookie to finish being instantiated (or to fail) */
55588 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
55589- fscache_stat(&fscache_n_relinquishes_waitcrt);
55590+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
55591 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
55592 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
55593 }
55594diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
55595index ee38fef..0a326d4 100644
55596--- a/fs/fscache/internal.h
55597+++ b/fs/fscache/internal.h
55598@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
55599 * stats.c
55600 */
55601 #ifdef CONFIG_FSCACHE_STATS
55602-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
55603-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
55604+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
55605+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
55606
55607-extern atomic_t fscache_n_op_pend;
55608-extern atomic_t fscache_n_op_run;
55609-extern atomic_t fscache_n_op_enqueue;
55610-extern atomic_t fscache_n_op_deferred_release;
55611-extern atomic_t fscache_n_op_release;
55612-extern atomic_t fscache_n_op_gc;
55613-extern atomic_t fscache_n_op_cancelled;
55614-extern atomic_t fscache_n_op_rejected;
55615+extern atomic_unchecked_t fscache_n_op_pend;
55616+extern atomic_unchecked_t fscache_n_op_run;
55617+extern atomic_unchecked_t fscache_n_op_enqueue;
55618+extern atomic_unchecked_t fscache_n_op_deferred_release;
55619+extern atomic_unchecked_t fscache_n_op_release;
55620+extern atomic_unchecked_t fscache_n_op_gc;
55621+extern atomic_unchecked_t fscache_n_op_cancelled;
55622+extern atomic_unchecked_t fscache_n_op_rejected;
55623
55624-extern atomic_t fscache_n_attr_changed;
55625-extern atomic_t fscache_n_attr_changed_ok;
55626-extern atomic_t fscache_n_attr_changed_nobufs;
55627-extern atomic_t fscache_n_attr_changed_nomem;
55628-extern atomic_t fscache_n_attr_changed_calls;
55629+extern atomic_unchecked_t fscache_n_attr_changed;
55630+extern atomic_unchecked_t fscache_n_attr_changed_ok;
55631+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
55632+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
55633+extern atomic_unchecked_t fscache_n_attr_changed_calls;
55634
55635-extern atomic_t fscache_n_allocs;
55636-extern atomic_t fscache_n_allocs_ok;
55637-extern atomic_t fscache_n_allocs_wait;
55638-extern atomic_t fscache_n_allocs_nobufs;
55639-extern atomic_t fscache_n_allocs_intr;
55640-extern atomic_t fscache_n_allocs_object_dead;
55641-extern atomic_t fscache_n_alloc_ops;
55642-extern atomic_t fscache_n_alloc_op_waits;
55643+extern atomic_unchecked_t fscache_n_allocs;
55644+extern atomic_unchecked_t fscache_n_allocs_ok;
55645+extern atomic_unchecked_t fscache_n_allocs_wait;
55646+extern atomic_unchecked_t fscache_n_allocs_nobufs;
55647+extern atomic_unchecked_t fscache_n_allocs_intr;
55648+extern atomic_unchecked_t fscache_n_allocs_object_dead;
55649+extern atomic_unchecked_t fscache_n_alloc_ops;
55650+extern atomic_unchecked_t fscache_n_alloc_op_waits;
55651
55652-extern atomic_t fscache_n_retrievals;
55653-extern atomic_t fscache_n_retrievals_ok;
55654-extern atomic_t fscache_n_retrievals_wait;
55655-extern atomic_t fscache_n_retrievals_nodata;
55656-extern atomic_t fscache_n_retrievals_nobufs;
55657-extern atomic_t fscache_n_retrievals_intr;
55658-extern atomic_t fscache_n_retrievals_nomem;
55659-extern atomic_t fscache_n_retrievals_object_dead;
55660-extern atomic_t fscache_n_retrieval_ops;
55661-extern atomic_t fscache_n_retrieval_op_waits;
55662+extern atomic_unchecked_t fscache_n_retrievals;
55663+extern atomic_unchecked_t fscache_n_retrievals_ok;
55664+extern atomic_unchecked_t fscache_n_retrievals_wait;
55665+extern atomic_unchecked_t fscache_n_retrievals_nodata;
55666+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
55667+extern atomic_unchecked_t fscache_n_retrievals_intr;
55668+extern atomic_unchecked_t fscache_n_retrievals_nomem;
55669+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
55670+extern atomic_unchecked_t fscache_n_retrieval_ops;
55671+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
55672
55673-extern atomic_t fscache_n_stores;
55674-extern atomic_t fscache_n_stores_ok;
55675-extern atomic_t fscache_n_stores_again;
55676-extern atomic_t fscache_n_stores_nobufs;
55677-extern atomic_t fscache_n_stores_oom;
55678-extern atomic_t fscache_n_store_ops;
55679-extern atomic_t fscache_n_store_calls;
55680-extern atomic_t fscache_n_store_pages;
55681-extern atomic_t fscache_n_store_radix_deletes;
55682-extern atomic_t fscache_n_store_pages_over_limit;
55683+extern atomic_unchecked_t fscache_n_stores;
55684+extern atomic_unchecked_t fscache_n_stores_ok;
55685+extern atomic_unchecked_t fscache_n_stores_again;
55686+extern atomic_unchecked_t fscache_n_stores_nobufs;
55687+extern atomic_unchecked_t fscache_n_stores_oom;
55688+extern atomic_unchecked_t fscache_n_store_ops;
55689+extern atomic_unchecked_t fscache_n_store_calls;
55690+extern atomic_unchecked_t fscache_n_store_pages;
55691+extern atomic_unchecked_t fscache_n_store_radix_deletes;
55692+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
55693
55694-extern atomic_t fscache_n_store_vmscan_not_storing;
55695-extern atomic_t fscache_n_store_vmscan_gone;
55696-extern atomic_t fscache_n_store_vmscan_busy;
55697-extern atomic_t fscache_n_store_vmscan_cancelled;
55698-extern atomic_t fscache_n_store_vmscan_wait;
55699+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
55700+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
55701+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
55702+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
55703+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
55704
55705-extern atomic_t fscache_n_marks;
55706-extern atomic_t fscache_n_uncaches;
55707+extern atomic_unchecked_t fscache_n_marks;
55708+extern atomic_unchecked_t fscache_n_uncaches;
55709
55710-extern atomic_t fscache_n_acquires;
55711-extern atomic_t fscache_n_acquires_null;
55712-extern atomic_t fscache_n_acquires_no_cache;
55713-extern atomic_t fscache_n_acquires_ok;
55714-extern atomic_t fscache_n_acquires_nobufs;
55715-extern atomic_t fscache_n_acquires_oom;
55716+extern atomic_unchecked_t fscache_n_acquires;
55717+extern atomic_unchecked_t fscache_n_acquires_null;
55718+extern atomic_unchecked_t fscache_n_acquires_no_cache;
55719+extern atomic_unchecked_t fscache_n_acquires_ok;
55720+extern atomic_unchecked_t fscache_n_acquires_nobufs;
55721+extern atomic_unchecked_t fscache_n_acquires_oom;
55722
55723-extern atomic_t fscache_n_invalidates;
55724-extern atomic_t fscache_n_invalidates_run;
55725+extern atomic_unchecked_t fscache_n_invalidates;
55726+extern atomic_unchecked_t fscache_n_invalidates_run;
55727
55728-extern atomic_t fscache_n_updates;
55729-extern atomic_t fscache_n_updates_null;
55730-extern atomic_t fscache_n_updates_run;
55731+extern atomic_unchecked_t fscache_n_updates;
55732+extern atomic_unchecked_t fscache_n_updates_null;
55733+extern atomic_unchecked_t fscache_n_updates_run;
55734
55735-extern atomic_t fscache_n_relinquishes;
55736-extern atomic_t fscache_n_relinquishes_null;
55737-extern atomic_t fscache_n_relinquishes_waitcrt;
55738-extern atomic_t fscache_n_relinquishes_retire;
55739+extern atomic_unchecked_t fscache_n_relinquishes;
55740+extern atomic_unchecked_t fscache_n_relinquishes_null;
55741+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
55742+extern atomic_unchecked_t fscache_n_relinquishes_retire;
55743
55744-extern atomic_t fscache_n_cookie_index;
55745-extern atomic_t fscache_n_cookie_data;
55746-extern atomic_t fscache_n_cookie_special;
55747+extern atomic_unchecked_t fscache_n_cookie_index;
55748+extern atomic_unchecked_t fscache_n_cookie_data;
55749+extern atomic_unchecked_t fscache_n_cookie_special;
55750
55751-extern atomic_t fscache_n_object_alloc;
55752-extern atomic_t fscache_n_object_no_alloc;
55753-extern atomic_t fscache_n_object_lookups;
55754-extern atomic_t fscache_n_object_lookups_negative;
55755-extern atomic_t fscache_n_object_lookups_positive;
55756-extern atomic_t fscache_n_object_lookups_timed_out;
55757-extern atomic_t fscache_n_object_created;
55758-extern atomic_t fscache_n_object_avail;
55759-extern atomic_t fscache_n_object_dead;
55760+extern atomic_unchecked_t fscache_n_object_alloc;
55761+extern atomic_unchecked_t fscache_n_object_no_alloc;
55762+extern atomic_unchecked_t fscache_n_object_lookups;
55763+extern atomic_unchecked_t fscache_n_object_lookups_negative;
55764+extern atomic_unchecked_t fscache_n_object_lookups_positive;
55765+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
55766+extern atomic_unchecked_t fscache_n_object_created;
55767+extern atomic_unchecked_t fscache_n_object_avail;
55768+extern atomic_unchecked_t fscache_n_object_dead;
55769
55770-extern atomic_t fscache_n_checkaux_none;
55771-extern atomic_t fscache_n_checkaux_okay;
55772-extern atomic_t fscache_n_checkaux_update;
55773-extern atomic_t fscache_n_checkaux_obsolete;
55774+extern atomic_unchecked_t fscache_n_checkaux_none;
55775+extern atomic_unchecked_t fscache_n_checkaux_okay;
55776+extern atomic_unchecked_t fscache_n_checkaux_update;
55777+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
55778
55779 extern atomic_t fscache_n_cop_alloc_object;
55780 extern atomic_t fscache_n_cop_lookup_object;
55781@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
55782 atomic_inc(stat);
55783 }
55784
55785+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
55786+{
55787+ atomic_inc_unchecked(stat);
55788+}
55789+
55790 static inline void fscache_stat_d(atomic_t *stat)
55791 {
55792 atomic_dec(stat);
55793@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
55794
55795 #define __fscache_stat(stat) (NULL)
55796 #define fscache_stat(stat) do {} while (0)
55797+#define fscache_stat_unchecked(stat) do {} while (0)
55798 #define fscache_stat_d(stat) do {} while (0)
55799 #endif
55800
55801diff --git a/fs/fscache/object.c b/fs/fscache/object.c
55802index 50d41c1..10ee117 100644
55803--- a/fs/fscache/object.c
55804+++ b/fs/fscache/object.c
55805@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55806 /* Invalidate an object on disk */
55807 case FSCACHE_OBJECT_INVALIDATING:
55808 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
55809- fscache_stat(&fscache_n_invalidates_run);
55810+ fscache_stat_unchecked(&fscache_n_invalidates_run);
55811 fscache_stat(&fscache_n_cop_invalidate_object);
55812 fscache_invalidate_object(object);
55813 fscache_stat_d(&fscache_n_cop_invalidate_object);
55814@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55815 /* update the object metadata on disk */
55816 case FSCACHE_OBJECT_UPDATING:
55817 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
55818- fscache_stat(&fscache_n_updates_run);
55819+ fscache_stat_unchecked(&fscache_n_updates_run);
55820 fscache_stat(&fscache_n_cop_update_object);
55821 object->cache->ops->update_object(object);
55822 fscache_stat_d(&fscache_n_cop_update_object);
55823@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55824 spin_lock(&object->lock);
55825 object->state = FSCACHE_OBJECT_DEAD;
55826 spin_unlock(&object->lock);
55827- fscache_stat(&fscache_n_object_dead);
55828+ fscache_stat_unchecked(&fscache_n_object_dead);
55829 goto terminal_transit;
55830
55831 /* handle the parent cache of this object being withdrawn from
55832@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
55833 spin_lock(&object->lock);
55834 object->state = FSCACHE_OBJECT_DEAD;
55835 spin_unlock(&object->lock);
55836- fscache_stat(&fscache_n_object_dead);
55837+ fscache_stat_unchecked(&fscache_n_object_dead);
55838 goto terminal_transit;
55839
55840 /* complain about the object being woken up once it is
55841@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
55842 parent->cookie->def->name, cookie->def->name,
55843 object->cache->tag->name);
55844
55845- fscache_stat(&fscache_n_object_lookups);
55846+ fscache_stat_unchecked(&fscache_n_object_lookups);
55847 fscache_stat(&fscache_n_cop_lookup_object);
55848 ret = object->cache->ops->lookup_object(object);
55849 fscache_stat_d(&fscache_n_cop_lookup_object);
55850@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
55851 if (ret == -ETIMEDOUT) {
55852 /* probably stuck behind another object, so move this one to
55853 * the back of the queue */
55854- fscache_stat(&fscache_n_object_lookups_timed_out);
55855+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
55856 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
55857 }
55858
55859@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
55860
55861 spin_lock(&object->lock);
55862 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
55863- fscache_stat(&fscache_n_object_lookups_negative);
55864+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
55865
55866 /* transit here to allow write requests to begin stacking up
55867 * and read requests to begin returning ENODATA */
55868@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
55869 * result, in which case there may be data available */
55870 spin_lock(&object->lock);
55871 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
55872- fscache_stat(&fscache_n_object_lookups_positive);
55873+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
55874
55875 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
55876
55877@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
55878 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
55879 } else {
55880 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
55881- fscache_stat(&fscache_n_object_created);
55882+ fscache_stat_unchecked(&fscache_n_object_created);
55883
55884 object->state = FSCACHE_OBJECT_AVAILABLE;
55885 spin_unlock(&object->lock);
55886@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
55887 fscache_enqueue_dependents(object);
55888
55889 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
55890- fscache_stat(&fscache_n_object_avail);
55891+ fscache_stat_unchecked(&fscache_n_object_avail);
55892
55893 _leave("");
55894 }
55895@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
55896 enum fscache_checkaux result;
55897
55898 if (!object->cookie->def->check_aux) {
55899- fscache_stat(&fscache_n_checkaux_none);
55900+ fscache_stat_unchecked(&fscache_n_checkaux_none);
55901 return FSCACHE_CHECKAUX_OKAY;
55902 }
55903
55904@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
55905 switch (result) {
55906 /* entry okay as is */
55907 case FSCACHE_CHECKAUX_OKAY:
55908- fscache_stat(&fscache_n_checkaux_okay);
55909+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
55910 break;
55911
55912 /* entry requires update */
55913 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
55914- fscache_stat(&fscache_n_checkaux_update);
55915+ fscache_stat_unchecked(&fscache_n_checkaux_update);
55916 break;
55917
55918 /* entry requires deletion */
55919 case FSCACHE_CHECKAUX_OBSOLETE:
55920- fscache_stat(&fscache_n_checkaux_obsolete);
55921+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
55922 break;
55923
55924 default:
55925diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
55926index 762a9ec..2023284 100644
55927--- a/fs/fscache/operation.c
55928+++ b/fs/fscache/operation.c
55929@@ -17,7 +17,7 @@
55930 #include <linux/slab.h>
55931 #include "internal.h"
55932
55933-atomic_t fscache_op_debug_id;
55934+atomic_unchecked_t fscache_op_debug_id;
55935 EXPORT_SYMBOL(fscache_op_debug_id);
55936
55937 /**
55938@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
55939 ASSERTCMP(atomic_read(&op->usage), >, 0);
55940 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
55941
55942- fscache_stat(&fscache_n_op_enqueue);
55943+ fscache_stat_unchecked(&fscache_n_op_enqueue);
55944 switch (op->flags & FSCACHE_OP_TYPE) {
55945 case FSCACHE_OP_ASYNC:
55946 _debug("queue async");
55947@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
55948 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
55949 if (op->processor)
55950 fscache_enqueue_operation(op);
55951- fscache_stat(&fscache_n_op_run);
55952+ fscache_stat_unchecked(&fscache_n_op_run);
55953 }
55954
55955 /*
55956@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
55957 if (object->n_in_progress > 0) {
55958 atomic_inc(&op->usage);
55959 list_add_tail(&op->pend_link, &object->pending_ops);
55960- fscache_stat(&fscache_n_op_pend);
55961+ fscache_stat_unchecked(&fscache_n_op_pend);
55962 } else if (!list_empty(&object->pending_ops)) {
55963 atomic_inc(&op->usage);
55964 list_add_tail(&op->pend_link, &object->pending_ops);
55965- fscache_stat(&fscache_n_op_pend);
55966+ fscache_stat_unchecked(&fscache_n_op_pend);
55967 fscache_start_operations(object);
55968 } else {
55969 ASSERTCMP(object->n_in_progress, ==, 0);
55970@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
55971 object->n_exclusive++; /* reads and writes must wait */
55972 atomic_inc(&op->usage);
55973 list_add_tail(&op->pend_link, &object->pending_ops);
55974- fscache_stat(&fscache_n_op_pend);
55975+ fscache_stat_unchecked(&fscache_n_op_pend);
55976 ret = 0;
55977 } else {
55978 /* If we're in any other state, there must have been an I/O
55979@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
55980 if (object->n_exclusive > 0) {
55981 atomic_inc(&op->usage);
55982 list_add_tail(&op->pend_link, &object->pending_ops);
55983- fscache_stat(&fscache_n_op_pend);
55984+ fscache_stat_unchecked(&fscache_n_op_pend);
55985 } else if (!list_empty(&object->pending_ops)) {
55986 atomic_inc(&op->usage);
55987 list_add_tail(&op->pend_link, &object->pending_ops);
55988- fscache_stat(&fscache_n_op_pend);
55989+ fscache_stat_unchecked(&fscache_n_op_pend);
55990 fscache_start_operations(object);
55991 } else {
55992 ASSERTCMP(object->n_exclusive, ==, 0);
55993@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
55994 object->n_ops++;
55995 atomic_inc(&op->usage);
55996 list_add_tail(&op->pend_link, &object->pending_ops);
55997- fscache_stat(&fscache_n_op_pend);
55998+ fscache_stat_unchecked(&fscache_n_op_pend);
55999 ret = 0;
56000 } else if (object->state == FSCACHE_OBJECT_DYING ||
56001 object->state == FSCACHE_OBJECT_LC_DYING ||
56002 object->state == FSCACHE_OBJECT_WITHDRAWING) {
56003- fscache_stat(&fscache_n_op_rejected);
56004+ fscache_stat_unchecked(&fscache_n_op_rejected);
56005 op->state = FSCACHE_OP_ST_CANCELLED;
56006 ret = -ENOBUFS;
56007 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
56008@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
56009 ret = -EBUSY;
56010 if (op->state == FSCACHE_OP_ST_PENDING) {
56011 ASSERT(!list_empty(&op->pend_link));
56012- fscache_stat(&fscache_n_op_cancelled);
56013+ fscache_stat_unchecked(&fscache_n_op_cancelled);
56014 list_del_init(&op->pend_link);
56015 if (do_cancel)
56016 do_cancel(op);
56017@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
56018 while (!list_empty(&object->pending_ops)) {
56019 op = list_entry(object->pending_ops.next,
56020 struct fscache_operation, pend_link);
56021- fscache_stat(&fscache_n_op_cancelled);
56022+ fscache_stat_unchecked(&fscache_n_op_cancelled);
56023 list_del_init(&op->pend_link);
56024
56025 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
56026@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
56027 op->state, ==, FSCACHE_OP_ST_CANCELLED);
56028 op->state = FSCACHE_OP_ST_DEAD;
56029
56030- fscache_stat(&fscache_n_op_release);
56031+ fscache_stat_unchecked(&fscache_n_op_release);
56032
56033 if (op->release) {
56034 op->release(op);
56035@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
56036 * lock, and defer it otherwise */
56037 if (!spin_trylock(&object->lock)) {
56038 _debug("defer put");
56039- fscache_stat(&fscache_n_op_deferred_release);
56040+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
56041
56042 cache = object->cache;
56043 spin_lock(&cache->op_gc_list_lock);
56044@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
56045
56046 _debug("GC DEFERRED REL OBJ%x OP%x",
56047 object->debug_id, op->debug_id);
56048- fscache_stat(&fscache_n_op_gc);
56049+ fscache_stat_unchecked(&fscache_n_op_gc);
56050
56051 ASSERTCMP(atomic_read(&op->usage), ==, 0);
56052 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
56053diff --git a/fs/fscache/page.c b/fs/fscache/page.c
56054index ff000e5..c44ec6d 100644
56055--- a/fs/fscache/page.c
56056+++ b/fs/fscache/page.c
56057@@ -61,7 +61,7 @@ try_again:
56058 val = radix_tree_lookup(&cookie->stores, page->index);
56059 if (!val) {
56060 rcu_read_unlock();
56061- fscache_stat(&fscache_n_store_vmscan_not_storing);
56062+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
56063 __fscache_uncache_page(cookie, page);
56064 return true;
56065 }
56066@@ -91,11 +91,11 @@ try_again:
56067 spin_unlock(&cookie->stores_lock);
56068
56069 if (xpage) {
56070- fscache_stat(&fscache_n_store_vmscan_cancelled);
56071- fscache_stat(&fscache_n_store_radix_deletes);
56072+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
56073+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
56074 ASSERTCMP(xpage, ==, page);
56075 } else {
56076- fscache_stat(&fscache_n_store_vmscan_gone);
56077+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
56078 }
56079
56080 wake_up_bit(&cookie->flags, 0);
56081@@ -110,11 +110,11 @@ page_busy:
56082 * sleeping on memory allocation, so we may need to impose a timeout
56083 * too. */
56084 if (!(gfp & __GFP_WAIT)) {
56085- fscache_stat(&fscache_n_store_vmscan_busy);
56086+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
56087 return false;
56088 }
56089
56090- fscache_stat(&fscache_n_store_vmscan_wait);
56091+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
56092 __fscache_wait_on_page_write(cookie, page);
56093 gfp &= ~__GFP_WAIT;
56094 goto try_again;
56095@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
56096 FSCACHE_COOKIE_STORING_TAG);
56097 if (!radix_tree_tag_get(&cookie->stores, page->index,
56098 FSCACHE_COOKIE_PENDING_TAG)) {
56099- fscache_stat(&fscache_n_store_radix_deletes);
56100+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
56101 xpage = radix_tree_delete(&cookie->stores, page->index);
56102 }
56103 spin_unlock(&cookie->stores_lock);
56104@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
56105
56106 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
56107
56108- fscache_stat(&fscache_n_attr_changed_calls);
56109+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
56110
56111 if (fscache_object_is_active(object)) {
56112 fscache_stat(&fscache_n_cop_attr_changed);
56113@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56114
56115 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56116
56117- fscache_stat(&fscache_n_attr_changed);
56118+ fscache_stat_unchecked(&fscache_n_attr_changed);
56119
56120 op = kzalloc(sizeof(*op), GFP_KERNEL);
56121 if (!op) {
56122- fscache_stat(&fscache_n_attr_changed_nomem);
56123+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
56124 _leave(" = -ENOMEM");
56125 return -ENOMEM;
56126 }
56127@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56128 if (fscache_submit_exclusive_op(object, op) < 0)
56129 goto nobufs;
56130 spin_unlock(&cookie->lock);
56131- fscache_stat(&fscache_n_attr_changed_ok);
56132+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
56133 fscache_put_operation(op);
56134 _leave(" = 0");
56135 return 0;
56136@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
56137 nobufs:
56138 spin_unlock(&cookie->lock);
56139 kfree(op);
56140- fscache_stat(&fscache_n_attr_changed_nobufs);
56141+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
56142 _leave(" = %d", -ENOBUFS);
56143 return -ENOBUFS;
56144 }
56145@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
56146 /* allocate a retrieval operation and attempt to submit it */
56147 op = kzalloc(sizeof(*op), GFP_NOIO);
56148 if (!op) {
56149- fscache_stat(&fscache_n_retrievals_nomem);
56150+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56151 return NULL;
56152 }
56153
56154@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
56155 return 0;
56156 }
56157
56158- fscache_stat(&fscache_n_retrievals_wait);
56159+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
56160
56161 jif = jiffies;
56162 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
56163 fscache_wait_bit_interruptible,
56164 TASK_INTERRUPTIBLE) != 0) {
56165- fscache_stat(&fscache_n_retrievals_intr);
56166+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56167 _leave(" = -ERESTARTSYS");
56168 return -ERESTARTSYS;
56169 }
56170@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
56171 */
56172 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56173 struct fscache_retrieval *op,
56174- atomic_t *stat_op_waits,
56175- atomic_t *stat_object_dead)
56176+ atomic_unchecked_t *stat_op_waits,
56177+ atomic_unchecked_t *stat_object_dead)
56178 {
56179 int ret;
56180
56181@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56182 goto check_if_dead;
56183
56184 _debug(">>> WT");
56185- fscache_stat(stat_op_waits);
56186+ fscache_stat_unchecked(stat_op_waits);
56187 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
56188 fscache_wait_bit_interruptible,
56189 TASK_INTERRUPTIBLE) != 0) {
56190@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
56191
56192 check_if_dead:
56193 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
56194- fscache_stat(stat_object_dead);
56195+ fscache_stat_unchecked(stat_object_dead);
56196 _leave(" = -ENOBUFS [cancelled]");
56197 return -ENOBUFS;
56198 }
56199 if (unlikely(fscache_object_is_dead(object))) {
56200 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
56201 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
56202- fscache_stat(stat_object_dead);
56203+ fscache_stat_unchecked(stat_object_dead);
56204 return -ENOBUFS;
56205 }
56206 return 0;
56207@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56208
56209 _enter("%p,%p,,,", cookie, page);
56210
56211- fscache_stat(&fscache_n_retrievals);
56212+ fscache_stat_unchecked(&fscache_n_retrievals);
56213
56214 if (hlist_empty(&cookie->backing_objects))
56215 goto nobufs;
56216@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56217 goto nobufs_unlock_dec;
56218 spin_unlock(&cookie->lock);
56219
56220- fscache_stat(&fscache_n_retrieval_ops);
56221+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
56222
56223 /* pin the netfs read context in case we need to do the actual netfs
56224 * read because we've encountered a cache read failure */
56225@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
56226
56227 error:
56228 if (ret == -ENOMEM)
56229- fscache_stat(&fscache_n_retrievals_nomem);
56230+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56231 else if (ret == -ERESTARTSYS)
56232- fscache_stat(&fscache_n_retrievals_intr);
56233+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56234 else if (ret == -ENODATA)
56235- fscache_stat(&fscache_n_retrievals_nodata);
56236+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
56237 else if (ret < 0)
56238- fscache_stat(&fscache_n_retrievals_nobufs);
56239+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56240 else
56241- fscache_stat(&fscache_n_retrievals_ok);
56242+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
56243
56244 fscache_put_retrieval(op);
56245 _leave(" = %d", ret);
56246@@ -467,7 +467,7 @@ nobufs_unlock:
56247 spin_unlock(&cookie->lock);
56248 kfree(op);
56249 nobufs:
56250- fscache_stat(&fscache_n_retrievals_nobufs);
56251+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56252 _leave(" = -ENOBUFS");
56253 return -ENOBUFS;
56254 }
56255@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56256
56257 _enter("%p,,%d,,,", cookie, *nr_pages);
56258
56259- fscache_stat(&fscache_n_retrievals);
56260+ fscache_stat_unchecked(&fscache_n_retrievals);
56261
56262 if (hlist_empty(&cookie->backing_objects))
56263 goto nobufs;
56264@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56265 goto nobufs_unlock_dec;
56266 spin_unlock(&cookie->lock);
56267
56268- fscache_stat(&fscache_n_retrieval_ops);
56269+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
56270
56271 /* pin the netfs read context in case we need to do the actual netfs
56272 * read because we've encountered a cache read failure */
56273@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
56274
56275 error:
56276 if (ret == -ENOMEM)
56277- fscache_stat(&fscache_n_retrievals_nomem);
56278+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
56279 else if (ret == -ERESTARTSYS)
56280- fscache_stat(&fscache_n_retrievals_intr);
56281+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
56282 else if (ret == -ENODATA)
56283- fscache_stat(&fscache_n_retrievals_nodata);
56284+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
56285 else if (ret < 0)
56286- fscache_stat(&fscache_n_retrievals_nobufs);
56287+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56288 else
56289- fscache_stat(&fscache_n_retrievals_ok);
56290+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
56291
56292 fscache_put_retrieval(op);
56293 _leave(" = %d", ret);
56294@@ -591,7 +591,7 @@ nobufs_unlock:
56295 spin_unlock(&cookie->lock);
56296 kfree(op);
56297 nobufs:
56298- fscache_stat(&fscache_n_retrievals_nobufs);
56299+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
56300 _leave(" = -ENOBUFS");
56301 return -ENOBUFS;
56302 }
56303@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56304
56305 _enter("%p,%p,,,", cookie, page);
56306
56307- fscache_stat(&fscache_n_allocs);
56308+ fscache_stat_unchecked(&fscache_n_allocs);
56309
56310 if (hlist_empty(&cookie->backing_objects))
56311 goto nobufs;
56312@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56313 goto nobufs_unlock;
56314 spin_unlock(&cookie->lock);
56315
56316- fscache_stat(&fscache_n_alloc_ops);
56317+ fscache_stat_unchecked(&fscache_n_alloc_ops);
56318
56319 ret = fscache_wait_for_retrieval_activation(
56320 object, op,
56321@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
56322
56323 error:
56324 if (ret == -ERESTARTSYS)
56325- fscache_stat(&fscache_n_allocs_intr);
56326+ fscache_stat_unchecked(&fscache_n_allocs_intr);
56327 else if (ret < 0)
56328- fscache_stat(&fscache_n_allocs_nobufs);
56329+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
56330 else
56331- fscache_stat(&fscache_n_allocs_ok);
56332+ fscache_stat_unchecked(&fscache_n_allocs_ok);
56333
56334 fscache_put_retrieval(op);
56335 _leave(" = %d", ret);
56336@@ -677,7 +677,7 @@ nobufs_unlock:
56337 spin_unlock(&cookie->lock);
56338 kfree(op);
56339 nobufs:
56340- fscache_stat(&fscache_n_allocs_nobufs);
56341+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
56342 _leave(" = -ENOBUFS");
56343 return -ENOBUFS;
56344 }
56345@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56346
56347 spin_lock(&cookie->stores_lock);
56348
56349- fscache_stat(&fscache_n_store_calls);
56350+ fscache_stat_unchecked(&fscache_n_store_calls);
56351
56352 /* find a page to store */
56353 page = NULL;
56354@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56355 page = results[0];
56356 _debug("gang %d [%lx]", n, page->index);
56357 if (page->index > op->store_limit) {
56358- fscache_stat(&fscache_n_store_pages_over_limit);
56359+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
56360 goto superseded;
56361 }
56362
56363@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
56364 spin_unlock(&cookie->stores_lock);
56365 spin_unlock(&object->lock);
56366
56367- fscache_stat(&fscache_n_store_pages);
56368+ fscache_stat_unchecked(&fscache_n_store_pages);
56369 fscache_stat(&fscache_n_cop_write_page);
56370 ret = object->cache->ops->write_page(op, page);
56371 fscache_stat_d(&fscache_n_cop_write_page);
56372@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56373 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56374 ASSERT(PageFsCache(page));
56375
56376- fscache_stat(&fscache_n_stores);
56377+ fscache_stat_unchecked(&fscache_n_stores);
56378
56379 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
56380 _leave(" = -ENOBUFS [invalidating]");
56381@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56382 spin_unlock(&cookie->stores_lock);
56383 spin_unlock(&object->lock);
56384
56385- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
56386+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
56387 op->store_limit = object->store_limit;
56388
56389 if (fscache_submit_op(object, &op->op) < 0)
56390@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56391
56392 spin_unlock(&cookie->lock);
56393 radix_tree_preload_end();
56394- fscache_stat(&fscache_n_store_ops);
56395- fscache_stat(&fscache_n_stores_ok);
56396+ fscache_stat_unchecked(&fscache_n_store_ops);
56397+ fscache_stat_unchecked(&fscache_n_stores_ok);
56398
56399 /* the work queue now carries its own ref on the object */
56400 fscache_put_operation(&op->op);
56401@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
56402 return 0;
56403
56404 already_queued:
56405- fscache_stat(&fscache_n_stores_again);
56406+ fscache_stat_unchecked(&fscache_n_stores_again);
56407 already_pending:
56408 spin_unlock(&cookie->stores_lock);
56409 spin_unlock(&object->lock);
56410 spin_unlock(&cookie->lock);
56411 radix_tree_preload_end();
56412 kfree(op);
56413- fscache_stat(&fscache_n_stores_ok);
56414+ fscache_stat_unchecked(&fscache_n_stores_ok);
56415 _leave(" = 0");
56416 return 0;
56417
56418@@ -959,14 +959,14 @@ nobufs:
56419 spin_unlock(&cookie->lock);
56420 radix_tree_preload_end();
56421 kfree(op);
56422- fscache_stat(&fscache_n_stores_nobufs);
56423+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
56424 _leave(" = -ENOBUFS");
56425 return -ENOBUFS;
56426
56427 nomem_free:
56428 kfree(op);
56429 nomem:
56430- fscache_stat(&fscache_n_stores_oom);
56431+ fscache_stat_unchecked(&fscache_n_stores_oom);
56432 _leave(" = -ENOMEM");
56433 return -ENOMEM;
56434 }
56435@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
56436 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
56437 ASSERTCMP(page, !=, NULL);
56438
56439- fscache_stat(&fscache_n_uncaches);
56440+ fscache_stat_unchecked(&fscache_n_uncaches);
56441
56442 /* cache withdrawal may beat us to it */
56443 if (!PageFsCache(page))
56444@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
56445 struct fscache_cookie *cookie = op->op.object->cookie;
56446
56447 #ifdef CONFIG_FSCACHE_STATS
56448- atomic_inc(&fscache_n_marks);
56449+ atomic_inc_unchecked(&fscache_n_marks);
56450 #endif
56451
56452 _debug("- mark %p{%lx}", page, page->index);
56453diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
56454index 40d13c7..ddf52b9 100644
56455--- a/fs/fscache/stats.c
56456+++ b/fs/fscache/stats.c
56457@@ -18,99 +18,99 @@
56458 /*
56459 * operation counters
56460 */
56461-atomic_t fscache_n_op_pend;
56462-atomic_t fscache_n_op_run;
56463-atomic_t fscache_n_op_enqueue;
56464-atomic_t fscache_n_op_requeue;
56465-atomic_t fscache_n_op_deferred_release;
56466-atomic_t fscache_n_op_release;
56467-atomic_t fscache_n_op_gc;
56468-atomic_t fscache_n_op_cancelled;
56469-atomic_t fscache_n_op_rejected;
56470+atomic_unchecked_t fscache_n_op_pend;
56471+atomic_unchecked_t fscache_n_op_run;
56472+atomic_unchecked_t fscache_n_op_enqueue;
56473+atomic_unchecked_t fscache_n_op_requeue;
56474+atomic_unchecked_t fscache_n_op_deferred_release;
56475+atomic_unchecked_t fscache_n_op_release;
56476+atomic_unchecked_t fscache_n_op_gc;
56477+atomic_unchecked_t fscache_n_op_cancelled;
56478+atomic_unchecked_t fscache_n_op_rejected;
56479
56480-atomic_t fscache_n_attr_changed;
56481-atomic_t fscache_n_attr_changed_ok;
56482-atomic_t fscache_n_attr_changed_nobufs;
56483-atomic_t fscache_n_attr_changed_nomem;
56484-atomic_t fscache_n_attr_changed_calls;
56485+atomic_unchecked_t fscache_n_attr_changed;
56486+atomic_unchecked_t fscache_n_attr_changed_ok;
56487+atomic_unchecked_t fscache_n_attr_changed_nobufs;
56488+atomic_unchecked_t fscache_n_attr_changed_nomem;
56489+atomic_unchecked_t fscache_n_attr_changed_calls;
56490
56491-atomic_t fscache_n_allocs;
56492-atomic_t fscache_n_allocs_ok;
56493-atomic_t fscache_n_allocs_wait;
56494-atomic_t fscache_n_allocs_nobufs;
56495-atomic_t fscache_n_allocs_intr;
56496-atomic_t fscache_n_allocs_object_dead;
56497-atomic_t fscache_n_alloc_ops;
56498-atomic_t fscache_n_alloc_op_waits;
56499+atomic_unchecked_t fscache_n_allocs;
56500+atomic_unchecked_t fscache_n_allocs_ok;
56501+atomic_unchecked_t fscache_n_allocs_wait;
56502+atomic_unchecked_t fscache_n_allocs_nobufs;
56503+atomic_unchecked_t fscache_n_allocs_intr;
56504+atomic_unchecked_t fscache_n_allocs_object_dead;
56505+atomic_unchecked_t fscache_n_alloc_ops;
56506+atomic_unchecked_t fscache_n_alloc_op_waits;
56507
56508-atomic_t fscache_n_retrievals;
56509-atomic_t fscache_n_retrievals_ok;
56510-atomic_t fscache_n_retrievals_wait;
56511-atomic_t fscache_n_retrievals_nodata;
56512-atomic_t fscache_n_retrievals_nobufs;
56513-atomic_t fscache_n_retrievals_intr;
56514-atomic_t fscache_n_retrievals_nomem;
56515-atomic_t fscache_n_retrievals_object_dead;
56516-atomic_t fscache_n_retrieval_ops;
56517-atomic_t fscache_n_retrieval_op_waits;
56518+atomic_unchecked_t fscache_n_retrievals;
56519+atomic_unchecked_t fscache_n_retrievals_ok;
56520+atomic_unchecked_t fscache_n_retrievals_wait;
56521+atomic_unchecked_t fscache_n_retrievals_nodata;
56522+atomic_unchecked_t fscache_n_retrievals_nobufs;
56523+atomic_unchecked_t fscache_n_retrievals_intr;
56524+atomic_unchecked_t fscache_n_retrievals_nomem;
56525+atomic_unchecked_t fscache_n_retrievals_object_dead;
56526+atomic_unchecked_t fscache_n_retrieval_ops;
56527+atomic_unchecked_t fscache_n_retrieval_op_waits;
56528
56529-atomic_t fscache_n_stores;
56530-atomic_t fscache_n_stores_ok;
56531-atomic_t fscache_n_stores_again;
56532-atomic_t fscache_n_stores_nobufs;
56533-atomic_t fscache_n_stores_oom;
56534-atomic_t fscache_n_store_ops;
56535-atomic_t fscache_n_store_calls;
56536-atomic_t fscache_n_store_pages;
56537-atomic_t fscache_n_store_radix_deletes;
56538-atomic_t fscache_n_store_pages_over_limit;
56539+atomic_unchecked_t fscache_n_stores;
56540+atomic_unchecked_t fscache_n_stores_ok;
56541+atomic_unchecked_t fscache_n_stores_again;
56542+atomic_unchecked_t fscache_n_stores_nobufs;
56543+atomic_unchecked_t fscache_n_stores_oom;
56544+atomic_unchecked_t fscache_n_store_ops;
56545+atomic_unchecked_t fscache_n_store_calls;
56546+atomic_unchecked_t fscache_n_store_pages;
56547+atomic_unchecked_t fscache_n_store_radix_deletes;
56548+atomic_unchecked_t fscache_n_store_pages_over_limit;
56549
56550-atomic_t fscache_n_store_vmscan_not_storing;
56551-atomic_t fscache_n_store_vmscan_gone;
56552-atomic_t fscache_n_store_vmscan_busy;
56553-atomic_t fscache_n_store_vmscan_cancelled;
56554-atomic_t fscache_n_store_vmscan_wait;
56555+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
56556+atomic_unchecked_t fscache_n_store_vmscan_gone;
56557+atomic_unchecked_t fscache_n_store_vmscan_busy;
56558+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
56559+atomic_unchecked_t fscache_n_store_vmscan_wait;
56560
56561-atomic_t fscache_n_marks;
56562-atomic_t fscache_n_uncaches;
56563+atomic_unchecked_t fscache_n_marks;
56564+atomic_unchecked_t fscache_n_uncaches;
56565
56566-atomic_t fscache_n_acquires;
56567-atomic_t fscache_n_acquires_null;
56568-atomic_t fscache_n_acquires_no_cache;
56569-atomic_t fscache_n_acquires_ok;
56570-atomic_t fscache_n_acquires_nobufs;
56571-atomic_t fscache_n_acquires_oom;
56572+atomic_unchecked_t fscache_n_acquires;
56573+atomic_unchecked_t fscache_n_acquires_null;
56574+atomic_unchecked_t fscache_n_acquires_no_cache;
56575+atomic_unchecked_t fscache_n_acquires_ok;
56576+atomic_unchecked_t fscache_n_acquires_nobufs;
56577+atomic_unchecked_t fscache_n_acquires_oom;
56578
56579-atomic_t fscache_n_invalidates;
56580-atomic_t fscache_n_invalidates_run;
56581+atomic_unchecked_t fscache_n_invalidates;
56582+atomic_unchecked_t fscache_n_invalidates_run;
56583
56584-atomic_t fscache_n_updates;
56585-atomic_t fscache_n_updates_null;
56586-atomic_t fscache_n_updates_run;
56587+atomic_unchecked_t fscache_n_updates;
56588+atomic_unchecked_t fscache_n_updates_null;
56589+atomic_unchecked_t fscache_n_updates_run;
56590
56591-atomic_t fscache_n_relinquishes;
56592-atomic_t fscache_n_relinquishes_null;
56593-atomic_t fscache_n_relinquishes_waitcrt;
56594-atomic_t fscache_n_relinquishes_retire;
56595+atomic_unchecked_t fscache_n_relinquishes;
56596+atomic_unchecked_t fscache_n_relinquishes_null;
56597+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
56598+atomic_unchecked_t fscache_n_relinquishes_retire;
56599
56600-atomic_t fscache_n_cookie_index;
56601-atomic_t fscache_n_cookie_data;
56602-atomic_t fscache_n_cookie_special;
56603+atomic_unchecked_t fscache_n_cookie_index;
56604+atomic_unchecked_t fscache_n_cookie_data;
56605+atomic_unchecked_t fscache_n_cookie_special;
56606
56607-atomic_t fscache_n_object_alloc;
56608-atomic_t fscache_n_object_no_alloc;
56609-atomic_t fscache_n_object_lookups;
56610-atomic_t fscache_n_object_lookups_negative;
56611-atomic_t fscache_n_object_lookups_positive;
56612-atomic_t fscache_n_object_lookups_timed_out;
56613-atomic_t fscache_n_object_created;
56614-atomic_t fscache_n_object_avail;
56615-atomic_t fscache_n_object_dead;
56616+atomic_unchecked_t fscache_n_object_alloc;
56617+atomic_unchecked_t fscache_n_object_no_alloc;
56618+atomic_unchecked_t fscache_n_object_lookups;
56619+atomic_unchecked_t fscache_n_object_lookups_negative;
56620+atomic_unchecked_t fscache_n_object_lookups_positive;
56621+atomic_unchecked_t fscache_n_object_lookups_timed_out;
56622+atomic_unchecked_t fscache_n_object_created;
56623+atomic_unchecked_t fscache_n_object_avail;
56624+atomic_unchecked_t fscache_n_object_dead;
56625
56626-atomic_t fscache_n_checkaux_none;
56627-atomic_t fscache_n_checkaux_okay;
56628-atomic_t fscache_n_checkaux_update;
56629-atomic_t fscache_n_checkaux_obsolete;
56630+atomic_unchecked_t fscache_n_checkaux_none;
56631+atomic_unchecked_t fscache_n_checkaux_okay;
56632+atomic_unchecked_t fscache_n_checkaux_update;
56633+atomic_unchecked_t fscache_n_checkaux_obsolete;
56634
56635 atomic_t fscache_n_cop_alloc_object;
56636 atomic_t fscache_n_cop_lookup_object;
56637@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
56638 seq_puts(m, "FS-Cache statistics\n");
56639
56640 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
56641- atomic_read(&fscache_n_cookie_index),
56642- atomic_read(&fscache_n_cookie_data),
56643- atomic_read(&fscache_n_cookie_special));
56644+ atomic_read_unchecked(&fscache_n_cookie_index),
56645+ atomic_read_unchecked(&fscache_n_cookie_data),
56646+ atomic_read_unchecked(&fscache_n_cookie_special));
56647
56648 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
56649- atomic_read(&fscache_n_object_alloc),
56650- atomic_read(&fscache_n_object_no_alloc),
56651- atomic_read(&fscache_n_object_avail),
56652- atomic_read(&fscache_n_object_dead));
56653+ atomic_read_unchecked(&fscache_n_object_alloc),
56654+ atomic_read_unchecked(&fscache_n_object_no_alloc),
56655+ atomic_read_unchecked(&fscache_n_object_avail),
56656+ atomic_read_unchecked(&fscache_n_object_dead));
56657 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
56658- atomic_read(&fscache_n_checkaux_none),
56659- atomic_read(&fscache_n_checkaux_okay),
56660- atomic_read(&fscache_n_checkaux_update),
56661- atomic_read(&fscache_n_checkaux_obsolete));
56662+ atomic_read_unchecked(&fscache_n_checkaux_none),
56663+ atomic_read_unchecked(&fscache_n_checkaux_okay),
56664+ atomic_read_unchecked(&fscache_n_checkaux_update),
56665+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
56666
56667 seq_printf(m, "Pages : mrk=%u unc=%u\n",
56668- atomic_read(&fscache_n_marks),
56669- atomic_read(&fscache_n_uncaches));
56670+ atomic_read_unchecked(&fscache_n_marks),
56671+ atomic_read_unchecked(&fscache_n_uncaches));
56672
56673 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
56674 " oom=%u\n",
56675- atomic_read(&fscache_n_acquires),
56676- atomic_read(&fscache_n_acquires_null),
56677- atomic_read(&fscache_n_acquires_no_cache),
56678- atomic_read(&fscache_n_acquires_ok),
56679- atomic_read(&fscache_n_acquires_nobufs),
56680- atomic_read(&fscache_n_acquires_oom));
56681+ atomic_read_unchecked(&fscache_n_acquires),
56682+ atomic_read_unchecked(&fscache_n_acquires_null),
56683+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
56684+ atomic_read_unchecked(&fscache_n_acquires_ok),
56685+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
56686+ atomic_read_unchecked(&fscache_n_acquires_oom));
56687
56688 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
56689- atomic_read(&fscache_n_object_lookups),
56690- atomic_read(&fscache_n_object_lookups_negative),
56691- atomic_read(&fscache_n_object_lookups_positive),
56692- atomic_read(&fscache_n_object_created),
56693- atomic_read(&fscache_n_object_lookups_timed_out));
56694+ atomic_read_unchecked(&fscache_n_object_lookups),
56695+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
56696+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
56697+ atomic_read_unchecked(&fscache_n_object_created),
56698+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
56699
56700 seq_printf(m, "Invals : n=%u run=%u\n",
56701- atomic_read(&fscache_n_invalidates),
56702- atomic_read(&fscache_n_invalidates_run));
56703+ atomic_read_unchecked(&fscache_n_invalidates),
56704+ atomic_read_unchecked(&fscache_n_invalidates_run));
56705
56706 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
56707- atomic_read(&fscache_n_updates),
56708- atomic_read(&fscache_n_updates_null),
56709- atomic_read(&fscache_n_updates_run));
56710+ atomic_read_unchecked(&fscache_n_updates),
56711+ atomic_read_unchecked(&fscache_n_updates_null),
56712+ atomic_read_unchecked(&fscache_n_updates_run));
56713
56714 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
56715- atomic_read(&fscache_n_relinquishes),
56716- atomic_read(&fscache_n_relinquishes_null),
56717- atomic_read(&fscache_n_relinquishes_waitcrt),
56718- atomic_read(&fscache_n_relinquishes_retire));
56719+ atomic_read_unchecked(&fscache_n_relinquishes),
56720+ atomic_read_unchecked(&fscache_n_relinquishes_null),
56721+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
56722+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
56723
56724 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
56725- atomic_read(&fscache_n_attr_changed),
56726- atomic_read(&fscache_n_attr_changed_ok),
56727- atomic_read(&fscache_n_attr_changed_nobufs),
56728- atomic_read(&fscache_n_attr_changed_nomem),
56729- atomic_read(&fscache_n_attr_changed_calls));
56730+ atomic_read_unchecked(&fscache_n_attr_changed),
56731+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
56732+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
56733+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
56734+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
56735
56736 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
56737- atomic_read(&fscache_n_allocs),
56738- atomic_read(&fscache_n_allocs_ok),
56739- atomic_read(&fscache_n_allocs_wait),
56740- atomic_read(&fscache_n_allocs_nobufs),
56741- atomic_read(&fscache_n_allocs_intr));
56742+ atomic_read_unchecked(&fscache_n_allocs),
56743+ atomic_read_unchecked(&fscache_n_allocs_ok),
56744+ atomic_read_unchecked(&fscache_n_allocs_wait),
56745+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
56746+ atomic_read_unchecked(&fscache_n_allocs_intr));
56747 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
56748- atomic_read(&fscache_n_alloc_ops),
56749- atomic_read(&fscache_n_alloc_op_waits),
56750- atomic_read(&fscache_n_allocs_object_dead));
56751+ atomic_read_unchecked(&fscache_n_alloc_ops),
56752+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
56753+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
56754
56755 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
56756 " int=%u oom=%u\n",
56757- atomic_read(&fscache_n_retrievals),
56758- atomic_read(&fscache_n_retrievals_ok),
56759- atomic_read(&fscache_n_retrievals_wait),
56760- atomic_read(&fscache_n_retrievals_nodata),
56761- atomic_read(&fscache_n_retrievals_nobufs),
56762- atomic_read(&fscache_n_retrievals_intr),
56763- atomic_read(&fscache_n_retrievals_nomem));
56764+ atomic_read_unchecked(&fscache_n_retrievals),
56765+ atomic_read_unchecked(&fscache_n_retrievals_ok),
56766+ atomic_read_unchecked(&fscache_n_retrievals_wait),
56767+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
56768+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
56769+ atomic_read_unchecked(&fscache_n_retrievals_intr),
56770+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
56771 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
56772- atomic_read(&fscache_n_retrieval_ops),
56773- atomic_read(&fscache_n_retrieval_op_waits),
56774- atomic_read(&fscache_n_retrievals_object_dead));
56775+ atomic_read_unchecked(&fscache_n_retrieval_ops),
56776+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
56777+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
56778
56779 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
56780- atomic_read(&fscache_n_stores),
56781- atomic_read(&fscache_n_stores_ok),
56782- atomic_read(&fscache_n_stores_again),
56783- atomic_read(&fscache_n_stores_nobufs),
56784- atomic_read(&fscache_n_stores_oom));
56785+ atomic_read_unchecked(&fscache_n_stores),
56786+ atomic_read_unchecked(&fscache_n_stores_ok),
56787+ atomic_read_unchecked(&fscache_n_stores_again),
56788+ atomic_read_unchecked(&fscache_n_stores_nobufs),
56789+ atomic_read_unchecked(&fscache_n_stores_oom));
56790 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
56791- atomic_read(&fscache_n_store_ops),
56792- atomic_read(&fscache_n_store_calls),
56793- atomic_read(&fscache_n_store_pages),
56794- atomic_read(&fscache_n_store_radix_deletes),
56795- atomic_read(&fscache_n_store_pages_over_limit));
56796+ atomic_read_unchecked(&fscache_n_store_ops),
56797+ atomic_read_unchecked(&fscache_n_store_calls),
56798+ atomic_read_unchecked(&fscache_n_store_pages),
56799+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
56800+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
56801
56802 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
56803- atomic_read(&fscache_n_store_vmscan_not_storing),
56804- atomic_read(&fscache_n_store_vmscan_gone),
56805- atomic_read(&fscache_n_store_vmscan_busy),
56806- atomic_read(&fscache_n_store_vmscan_cancelled),
56807- atomic_read(&fscache_n_store_vmscan_wait));
56808+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
56809+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
56810+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
56811+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
56812+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
56813
56814 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
56815- atomic_read(&fscache_n_op_pend),
56816- atomic_read(&fscache_n_op_run),
56817- atomic_read(&fscache_n_op_enqueue),
56818- atomic_read(&fscache_n_op_cancelled),
56819- atomic_read(&fscache_n_op_rejected));
56820+ atomic_read_unchecked(&fscache_n_op_pend),
56821+ atomic_read_unchecked(&fscache_n_op_run),
56822+ atomic_read_unchecked(&fscache_n_op_enqueue),
56823+ atomic_read_unchecked(&fscache_n_op_cancelled),
56824+ atomic_read_unchecked(&fscache_n_op_rejected));
56825 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
56826- atomic_read(&fscache_n_op_deferred_release),
56827- atomic_read(&fscache_n_op_release),
56828- atomic_read(&fscache_n_op_gc));
56829+ atomic_read_unchecked(&fscache_n_op_deferred_release),
56830+ atomic_read_unchecked(&fscache_n_op_release),
56831+ atomic_read_unchecked(&fscache_n_op_gc));
56832
56833 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
56834 atomic_read(&fscache_n_cop_alloc_object),
56835diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
56836index aef34b1..59bfd7b 100644
56837--- a/fs/fuse/cuse.c
56838+++ b/fs/fuse/cuse.c
56839@@ -600,10 +600,12 @@ static int __init cuse_init(void)
56840 INIT_LIST_HEAD(&cuse_conntbl[i]);
56841
56842 /* inherit and extend fuse_dev_operations */
56843- cuse_channel_fops = fuse_dev_operations;
56844- cuse_channel_fops.owner = THIS_MODULE;
56845- cuse_channel_fops.open = cuse_channel_open;
56846- cuse_channel_fops.release = cuse_channel_release;
56847+ pax_open_kernel();
56848+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
56849+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
56850+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
56851+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
56852+ pax_close_kernel();
56853
56854 cuse_class = class_create(THIS_MODULE, "cuse");
56855 if (IS_ERR(cuse_class))
56856diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
56857index 1d55f94..088da65 100644
56858--- a/fs/fuse/dev.c
56859+++ b/fs/fuse/dev.c
56860@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
56861 ret = 0;
56862 pipe_lock(pipe);
56863
56864- if (!pipe->readers) {
56865+ if (!atomic_read(&pipe->readers)) {
56866 send_sig(SIGPIPE, current, 0);
56867 if (!ret)
56868 ret = -EPIPE;
56869@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
56870 page_nr++;
56871 ret += buf->len;
56872
56873- if (pipe->files)
56874+ if (atomic_read(&pipe->files))
56875 do_wakeup = 1;
56876 }
56877
56878diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
56879index 5b12746..b481b03 100644
56880--- a/fs/fuse/dir.c
56881+++ b/fs/fuse/dir.c
56882@@ -1437,7 +1437,7 @@ static char *read_link(struct dentry *dentry)
56883 return link;
56884 }
56885
56886-static void free_link(char *link)
56887+static void free_link(const char *link)
56888 {
56889 if (!IS_ERR(link))
56890 free_page((unsigned long) link);
56891diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
56892index 62b484e..0f9a140 100644
56893--- a/fs/gfs2/inode.c
56894+++ b/fs/gfs2/inode.c
56895@@ -1441,7 +1441,7 @@ out:
56896
56897 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
56898 {
56899- char *s = nd_get_link(nd);
56900+ const char *s = nd_get_link(nd);
56901 if (!IS_ERR(s))
56902 kfree(s);
56903 }
56904diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
56905index a3f868a..bb308ae 100644
56906--- a/fs/hugetlbfs/inode.c
56907+++ b/fs/hugetlbfs/inode.c
56908@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
56909 struct mm_struct *mm = current->mm;
56910 struct vm_area_struct *vma;
56911 struct hstate *h = hstate_file(file);
56912+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
56913 struct vm_unmapped_area_info info;
56914
56915 if (len & ~huge_page_mask(h))
56916@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
56917 return addr;
56918 }
56919
56920+#ifdef CONFIG_PAX_RANDMMAP
56921+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
56922+#endif
56923+
56924 if (addr) {
56925 addr = ALIGN(addr, huge_page_size(h));
56926 vma = find_vma(mm, addr);
56927- if (TASK_SIZE - len >= addr &&
56928- (!vma || addr + len <= vma->vm_start))
56929+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
56930 return addr;
56931 }
56932
56933 info.flags = 0;
56934 info.length = len;
56935 info.low_limit = TASK_UNMAPPED_BASE;
56936+
56937+#ifdef CONFIG_PAX_RANDMMAP
56938+ if (mm->pax_flags & MF_PAX_RANDMMAP)
56939+ info.low_limit += mm->delta_mmap;
56940+#endif
56941+
56942 info.high_limit = TASK_SIZE;
56943 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
56944 info.align_offset = 0;
56945@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
56946 };
56947 MODULE_ALIAS_FS("hugetlbfs");
56948
56949-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
56950+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
56951
56952 static int can_do_hugetlb_shm(void)
56953 {
56954diff --git a/fs/inode.c b/fs/inode.c
56955index 00d5fc3..98ce7d7 100644
56956--- a/fs/inode.c
56957+++ b/fs/inode.c
56958@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
56959
56960 #ifdef CONFIG_SMP
56961 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
56962- static atomic_t shared_last_ino;
56963- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
56964+ static atomic_unchecked_t shared_last_ino;
56965+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
56966
56967 res = next - LAST_INO_BATCH;
56968 }
56969diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
56970index 4a6cf28..d3a29d3 100644
56971--- a/fs/jffs2/erase.c
56972+++ b/fs/jffs2/erase.c
56973@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
56974 struct jffs2_unknown_node marker = {
56975 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
56976 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
56977- .totlen = cpu_to_je32(c->cleanmarker_size)
56978+ .totlen = cpu_to_je32(c->cleanmarker_size),
56979+ .hdr_crc = cpu_to_je32(0)
56980 };
56981
56982 jffs2_prealloc_raw_node_refs(c, jeb, 1);
56983diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
56984index a6597d6..41b30ec 100644
56985--- a/fs/jffs2/wbuf.c
56986+++ b/fs/jffs2/wbuf.c
56987@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
56988 {
56989 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
56990 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
56991- .totlen = constant_cpu_to_je32(8)
56992+ .totlen = constant_cpu_to_je32(8),
56993+ .hdr_crc = constant_cpu_to_je32(0)
56994 };
56995
56996 /*
56997diff --git a/fs/jfs/super.c b/fs/jfs/super.c
56998index 788e0a9..8433098 100644
56999--- a/fs/jfs/super.c
57000+++ b/fs/jfs/super.c
57001@@ -878,7 +878,7 @@ static int __init init_jfs_fs(void)
57002
57003 jfs_inode_cachep =
57004 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
57005- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
57006+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
57007 init_once);
57008 if (jfs_inode_cachep == NULL)
57009 return -ENOMEM;
57010diff --git a/fs/libfs.c b/fs/libfs.c
57011index 916da8c..1588998 100644
57012--- a/fs/libfs.c
57013+++ b/fs/libfs.c
57014@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
57015
57016 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
57017 struct dentry *next;
57018+ char d_name[sizeof(next->d_iname)];
57019+ const unsigned char *name;
57020+
57021 next = list_entry(p, struct dentry, d_u.d_child);
57022 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
57023 if (!simple_positive(next)) {
57024@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
57025
57026 spin_unlock(&next->d_lock);
57027 spin_unlock(&dentry->d_lock);
57028- if (filldir(dirent, next->d_name.name,
57029+ name = next->d_name.name;
57030+ if (name == next->d_iname) {
57031+ memcpy(d_name, name, next->d_name.len);
57032+ name = d_name;
57033+ }
57034+ if (filldir(dirent, name,
57035 next->d_name.len, filp->f_pos,
57036 next->d_inode->i_ino,
57037 dt_type(next->d_inode)) < 0)
57038diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
57039index acd3947..1f896e2 100644
57040--- a/fs/lockd/clntproc.c
57041+++ b/fs/lockd/clntproc.c
57042@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
57043 /*
57044 * Cookie counter for NLM requests
57045 */
57046-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
57047+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
57048
57049 void nlmclnt_next_cookie(struct nlm_cookie *c)
57050 {
57051- u32 cookie = atomic_inc_return(&nlm_cookie);
57052+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
57053
57054 memcpy(c->data, &cookie, 4);
57055 c->len=4;
57056diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
57057index a2aa97d..10d6c41 100644
57058--- a/fs/lockd/svc.c
57059+++ b/fs/lockd/svc.c
57060@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
57061 svc_sock_update_bufs(serv);
57062 serv->sv_maxconn = nlm_max_connections;
57063
57064- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
57065+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
57066 if (IS_ERR(nlmsvc_task)) {
57067 error = PTR_ERR(nlmsvc_task);
57068 printk(KERN_WARNING
57069diff --git a/fs/locks.c b/fs/locks.c
57070index cb424a4..850e4dd 100644
57071--- a/fs/locks.c
57072+++ b/fs/locks.c
57073@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
57074 return;
57075
57076 if (filp->f_op && filp->f_op->flock) {
57077- struct file_lock fl = {
57078+ struct file_lock flock = {
57079 .fl_pid = current->tgid,
57080 .fl_file = filp,
57081 .fl_flags = FL_FLOCK,
57082 .fl_type = F_UNLCK,
57083 .fl_end = OFFSET_MAX,
57084 };
57085- filp->f_op->flock(filp, F_SETLKW, &fl);
57086- if (fl.fl_ops && fl.fl_ops->fl_release_private)
57087- fl.fl_ops->fl_release_private(&fl);
57088+ filp->f_op->flock(filp, F_SETLKW, &flock);
57089+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
57090+ flock.fl_ops->fl_release_private(&flock);
57091 }
57092
57093 lock_flocks();
57094diff --git a/fs/namei.c b/fs/namei.c
57095index 9ed9361..2b72db1 100644
57096--- a/fs/namei.c
57097+++ b/fs/namei.c
57098@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
57099 if (ret != -EACCES)
57100 return ret;
57101
57102+#ifdef CONFIG_GRKERNSEC
57103+ /* we'll block if we have to log due to a denied capability use */
57104+ if (mask & MAY_NOT_BLOCK)
57105+ return -ECHILD;
57106+#endif
57107+
57108 if (S_ISDIR(inode->i_mode)) {
57109 /* DACs are overridable for directories */
57110- if (inode_capable(inode, CAP_DAC_OVERRIDE))
57111- return 0;
57112 if (!(mask & MAY_WRITE))
57113- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
57114+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
57115+ inode_capable(inode, CAP_DAC_READ_SEARCH))
57116 return 0;
57117+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
57118+ return 0;
57119 return -EACCES;
57120 }
57121 /*
57122+ * Searching includes executable on directories, else just read.
57123+ */
57124+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
57125+ if (mask == MAY_READ)
57126+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
57127+ inode_capable(inode, CAP_DAC_READ_SEARCH))
57128+ return 0;
57129+
57130+ /*
57131 * Read/write DACs are always overridable.
57132 * Executable DACs are overridable when there is
57133 * at least one exec bit set.
57134@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
57135 if (inode_capable(inode, CAP_DAC_OVERRIDE))
57136 return 0;
57137
57138- /*
57139- * Searching includes executable on directories, else just read.
57140- */
57141- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
57142- if (mask == MAY_READ)
57143- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
57144- return 0;
57145-
57146 return -EACCES;
57147 }
57148
57149@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
57150 {
57151 struct dentry *dentry = link->dentry;
57152 int error;
57153- char *s;
57154+ const char *s;
57155
57156 BUG_ON(nd->flags & LOOKUP_RCU);
57157
57158@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
57159 if (error)
57160 goto out_put_nd_path;
57161
57162+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
57163+ dentry->d_inode, dentry, nd->path.mnt)) {
57164+ error = -EACCES;
57165+ goto out_put_nd_path;
57166+ }
57167+
57168 nd->last_type = LAST_BIND;
57169 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
57170 error = PTR_ERR(*p);
57171@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
57172 if (res)
57173 break;
57174 res = walk_component(nd, path, LOOKUP_FOLLOW);
57175+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
57176+ res = -EACCES;
57177 put_link(nd, &link, cookie);
57178 } while (res > 0);
57179
57180@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
57181 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
57182 {
57183 unsigned long a, b, adata, bdata, mask, hash, len;
57184- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
57185+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
57186
57187 hash = a = 0;
57188 len = -sizeof(unsigned long);
57189@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
57190 if (err)
57191 break;
57192 err = lookup_last(nd, &path);
57193+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
57194+ err = -EACCES;
57195 put_link(nd, &link, cookie);
57196 }
57197 }
57198@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
57199 if (!err)
57200 err = complete_walk(nd);
57201
57202+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
57203+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57204+ path_put(&nd->path);
57205+ err = -ENOENT;
57206+ }
57207+ }
57208+
57209 if (!err && nd->flags & LOOKUP_DIRECTORY) {
57210 if (!can_lookup(nd->inode)) {
57211 path_put(&nd->path);
57212@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
57213 retval = path_lookupat(dfd, name->name,
57214 flags | LOOKUP_REVAL, nd);
57215
57216- if (likely(!retval))
57217+ if (likely(!retval)) {
57218 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
57219+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
57220+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
57221+ path_put(&nd->path);
57222+ return -ENOENT;
57223+ }
57224+ }
57225+ }
57226 return retval;
57227 }
57228
57229@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
57230 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
57231 return -EPERM;
57232
57233+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
57234+ return -EPERM;
57235+ if (gr_handle_rawio(inode))
57236+ return -EPERM;
57237+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
57238+ return -EACCES;
57239+
57240 return 0;
57241 }
57242
57243@@ -2602,7 +2641,7 @@ looked_up:
57244 * cleared otherwise prior to returning.
57245 */
57246 static int lookup_open(struct nameidata *nd, struct path *path,
57247- struct file *file,
57248+ struct path *link, struct file *file,
57249 const struct open_flags *op,
57250 bool got_write, int *opened)
57251 {
57252@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
57253 /* Negative dentry, just create the file */
57254 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
57255 umode_t mode = op->mode;
57256+
57257+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
57258+ error = -EACCES;
57259+ goto out_dput;
57260+ }
57261+
57262+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
57263+ error = -EACCES;
57264+ goto out_dput;
57265+ }
57266+
57267 if (!IS_POSIXACL(dir->d_inode))
57268 mode &= ~current_umask();
57269 /*
57270@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
57271 nd->flags & LOOKUP_EXCL);
57272 if (error)
57273 goto out_dput;
57274+ else
57275+ gr_handle_create(dentry, nd->path.mnt);
57276 }
57277 out_no_open:
57278 path->dentry = dentry;
57279@@ -2672,7 +2724,7 @@ out_dput:
57280 /*
57281 * Handle the last step of open()
57282 */
57283-static int do_last(struct nameidata *nd, struct path *path,
57284+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
57285 struct file *file, const struct open_flags *op,
57286 int *opened, struct filename *name)
57287 {
57288@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
57289 error = complete_walk(nd);
57290 if (error)
57291 return error;
57292+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57293+ error = -ENOENT;
57294+ goto out;
57295+ }
57296 audit_inode(name, nd->path.dentry, 0);
57297 if (open_flag & O_CREAT) {
57298 error = -EISDIR;
57299 goto out;
57300 }
57301+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57302+ error = -EACCES;
57303+ goto out;
57304+ }
57305 goto finish_open;
57306 case LAST_BIND:
57307 error = complete_walk(nd);
57308 if (error)
57309 return error;
57310+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
57311+ error = -ENOENT;
57312+ goto out;
57313+ }
57314+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57315+ error = -EACCES;
57316+ goto out;
57317+ }
57318 audit_inode(name, dir, 0);
57319 goto finish_open;
57320 }
57321@@ -2759,7 +2827,7 @@ retry_lookup:
57322 */
57323 }
57324 mutex_lock(&dir->d_inode->i_mutex);
57325- error = lookup_open(nd, path, file, op, got_write, opened);
57326+ error = lookup_open(nd, path, link, file, op, got_write, opened);
57327 mutex_unlock(&dir->d_inode->i_mutex);
57328
57329 if (error <= 0) {
57330@@ -2783,11 +2851,28 @@ retry_lookup:
57331 goto finish_open_created;
57332 }
57333
57334+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
57335+ error = -ENOENT;
57336+ goto exit_dput;
57337+ }
57338+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
57339+ error = -EACCES;
57340+ goto exit_dput;
57341+ }
57342+
57343 /*
57344 * create/update audit record if it already exists.
57345 */
57346- if (path->dentry->d_inode)
57347+ if (path->dentry->d_inode) {
57348+ /* only check if O_CREAT is specified, all other checks need to go
57349+ into may_open */
57350+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
57351+ error = -EACCES;
57352+ goto exit_dput;
57353+ }
57354+
57355 audit_inode(name, path->dentry, 0);
57356+ }
57357
57358 /*
57359 * If atomic_open() acquired write access it is dropped now due to
57360@@ -2828,6 +2913,11 @@ finish_lookup:
57361 }
57362 }
57363 BUG_ON(inode != path->dentry->d_inode);
57364+ /* if we're resolving a symlink to another symlink */
57365+ if (link && gr_handle_symlink_owner(link, inode)) {
57366+ error = -EACCES;
57367+ goto out;
57368+ }
57369 return 1;
57370 }
57371
57372@@ -2837,7 +2927,6 @@ finish_lookup:
57373 save_parent.dentry = nd->path.dentry;
57374 save_parent.mnt = mntget(path->mnt);
57375 nd->path.dentry = path->dentry;
57376-
57377 }
57378 nd->inode = inode;
57379 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
57380@@ -2846,6 +2935,16 @@ finish_lookup:
57381 path_put(&save_parent);
57382 return error;
57383 }
57384+
57385+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
57386+ error = -ENOENT;
57387+ goto out;
57388+ }
57389+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
57390+ error = -EACCES;
57391+ goto out;
57392+ }
57393+
57394 error = -EISDIR;
57395 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
57396 goto out;
57397@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
57398 if (unlikely(error))
57399 goto out;
57400
57401- error = do_last(nd, &path, file, op, &opened, pathname);
57402+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
57403 while (unlikely(error > 0)) { /* trailing symlink */
57404 struct path link = path;
57405 void *cookie;
57406@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
57407 error = follow_link(&link, nd, &cookie);
57408 if (unlikely(error))
57409 break;
57410- error = do_last(nd, &path, file, op, &opened, pathname);
57411+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
57412 put_link(nd, &link, cookie);
57413 }
57414 out:
57415@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
57416 goto unlock;
57417
57418 error = -EEXIST;
57419- if (dentry->d_inode)
57420+ if (dentry->d_inode) {
57421+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
57422+ error = -ENOENT;
57423+ }
57424 goto fail;
57425+ }
57426 /*
57427 * Special case - lookup gave negative, but... we had foo/bar/
57428 * From the vfs_mknod() POV we just have a negative dentry -
57429@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
57430 }
57431 EXPORT_SYMBOL(user_path_create);
57432
57433+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
57434+{
57435+ struct filename *tmp = getname(pathname);
57436+ struct dentry *res;
57437+ if (IS_ERR(tmp))
57438+ return ERR_CAST(tmp);
57439+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
57440+ if (IS_ERR(res))
57441+ putname(tmp);
57442+ else
57443+ *to = tmp;
57444+ return res;
57445+}
57446+
57447 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
57448 {
57449 int error = may_create(dir, dentry);
57450@@ -3177,6 +3294,17 @@ retry:
57451
57452 if (!IS_POSIXACL(path.dentry->d_inode))
57453 mode &= ~current_umask();
57454+
57455+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
57456+ error = -EPERM;
57457+ goto out;
57458+ }
57459+
57460+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
57461+ error = -EACCES;
57462+ goto out;
57463+ }
57464+
57465 error = security_path_mknod(&path, dentry, mode, dev);
57466 if (error)
57467 goto out;
57468@@ -3193,6 +3321,8 @@ retry:
57469 break;
57470 }
57471 out:
57472+ if (!error)
57473+ gr_handle_create(dentry, path.mnt);
57474 done_path_create(&path, dentry);
57475 if (retry_estale(error, lookup_flags)) {
57476 lookup_flags |= LOOKUP_REVAL;
57477@@ -3245,9 +3375,16 @@ retry:
57478
57479 if (!IS_POSIXACL(path.dentry->d_inode))
57480 mode &= ~current_umask();
57481+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
57482+ error = -EACCES;
57483+ goto out;
57484+ }
57485 error = security_path_mkdir(&path, dentry, mode);
57486 if (!error)
57487 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
57488+ if (!error)
57489+ gr_handle_create(dentry, path.mnt);
57490+out:
57491 done_path_create(&path, dentry);
57492 if (retry_estale(error, lookup_flags)) {
57493 lookup_flags |= LOOKUP_REVAL;
57494@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
57495 struct filename *name;
57496 struct dentry *dentry;
57497 struct nameidata nd;
57498+ ino_t saved_ino = 0;
57499+ dev_t saved_dev = 0;
57500 unsigned int lookup_flags = 0;
57501 retry:
57502 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
57503@@ -3360,10 +3499,21 @@ retry:
57504 error = -ENOENT;
57505 goto exit3;
57506 }
57507+
57508+ saved_ino = dentry->d_inode->i_ino;
57509+ saved_dev = gr_get_dev_from_dentry(dentry);
57510+
57511+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
57512+ error = -EACCES;
57513+ goto exit3;
57514+ }
57515+
57516 error = security_path_rmdir(&nd.path, dentry);
57517 if (error)
57518 goto exit3;
57519 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
57520+ if (!error && (saved_dev || saved_ino))
57521+ gr_handle_delete(saved_ino, saved_dev);
57522 exit3:
57523 dput(dentry);
57524 exit2:
57525@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
57526 struct dentry *dentry;
57527 struct nameidata nd;
57528 struct inode *inode = NULL;
57529+ ino_t saved_ino = 0;
57530+ dev_t saved_dev = 0;
57531 unsigned int lookup_flags = 0;
57532 retry:
57533 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
57534@@ -3455,10 +3607,22 @@ retry:
57535 if (!inode)
57536 goto slashes;
57537 ihold(inode);
57538+
57539+ if (inode->i_nlink <= 1) {
57540+ saved_ino = inode->i_ino;
57541+ saved_dev = gr_get_dev_from_dentry(dentry);
57542+ }
57543+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
57544+ error = -EACCES;
57545+ goto exit2;
57546+ }
57547+
57548 error = security_path_unlink(&nd.path, dentry);
57549 if (error)
57550 goto exit2;
57551 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
57552+ if (!error && (saved_ino || saved_dev))
57553+ gr_handle_delete(saved_ino, saved_dev);
57554 exit2:
57555 dput(dentry);
57556 }
57557@@ -3536,9 +3700,17 @@ retry:
57558 if (IS_ERR(dentry))
57559 goto out_putname;
57560
57561+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
57562+ error = -EACCES;
57563+ goto out;
57564+ }
57565+
57566 error = security_path_symlink(&path, dentry, from->name);
57567 if (!error)
57568 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
57569+ if (!error)
57570+ gr_handle_create(dentry, path.mnt);
57571+out:
57572 done_path_create(&path, dentry);
57573 if (retry_estale(error, lookup_flags)) {
57574 lookup_flags |= LOOKUP_REVAL;
57575@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
57576 {
57577 struct dentry *new_dentry;
57578 struct path old_path, new_path;
57579+ struct filename *to = NULL;
57580 int how = 0;
57581 int error;
57582
57583@@ -3635,7 +3808,7 @@ retry:
57584 if (error)
57585 return error;
57586
57587- new_dentry = user_path_create(newdfd, newname, &new_path,
57588+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
57589 (how & LOOKUP_REVAL));
57590 error = PTR_ERR(new_dentry);
57591 if (IS_ERR(new_dentry))
57592@@ -3647,11 +3820,28 @@ retry:
57593 error = may_linkat(&old_path);
57594 if (unlikely(error))
57595 goto out_dput;
57596+
57597+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
57598+ old_path.dentry->d_inode,
57599+ old_path.dentry->d_inode->i_mode, to)) {
57600+ error = -EACCES;
57601+ goto out_dput;
57602+ }
57603+
57604+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
57605+ old_path.dentry, old_path.mnt, to)) {
57606+ error = -EACCES;
57607+ goto out_dput;
57608+ }
57609+
57610 error = security_path_link(old_path.dentry, &new_path, new_dentry);
57611 if (error)
57612 goto out_dput;
57613 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
57614+ if (!error)
57615+ gr_handle_create(new_dentry, new_path.mnt);
57616 out_dput:
57617+ putname(to);
57618 done_path_create(&new_path, new_dentry);
57619 if (retry_estale(error, how)) {
57620 how |= LOOKUP_REVAL;
57621@@ -3897,12 +4087,21 @@ retry:
57622 if (new_dentry == trap)
57623 goto exit5;
57624
57625+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
57626+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
57627+ to);
57628+ if (error)
57629+ goto exit5;
57630+
57631 error = security_path_rename(&oldnd.path, old_dentry,
57632 &newnd.path, new_dentry);
57633 if (error)
57634 goto exit5;
57635 error = vfs_rename(old_dir->d_inode, old_dentry,
57636 new_dir->d_inode, new_dentry);
57637+ if (!error)
57638+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
57639+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
57640 exit5:
57641 dput(new_dentry);
57642 exit4:
57643@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
57644
57645 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
57646 {
57647+ char tmpbuf[64];
57648+ const char *newlink;
57649 int len;
57650
57651 len = PTR_ERR(link);
57652@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
57653 len = strlen(link);
57654 if (len > (unsigned) buflen)
57655 len = buflen;
57656- if (copy_to_user(buffer, link, len))
57657+
57658+ if (len < sizeof(tmpbuf)) {
57659+ memcpy(tmpbuf, link, len);
57660+ newlink = tmpbuf;
57661+ } else
57662+ newlink = link;
57663+
57664+ if (copy_to_user(buffer, newlink, len))
57665 len = -EFAULT;
57666 out:
57667 return len;
57668diff --git a/fs/namespace.c b/fs/namespace.c
57669index 7b1ca9b..6faeccf 100644
57670--- a/fs/namespace.c
57671+++ b/fs/namespace.c
57672@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
57673 if (!(sb->s_flags & MS_RDONLY))
57674 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
57675 up_write(&sb->s_umount);
57676+
57677+ gr_log_remount(mnt->mnt_devname, retval);
57678+
57679 return retval;
57680 }
57681
57682@@ -1283,6 +1286,9 @@ static int do_umount(struct mount *mnt, int flags)
57683 }
57684 br_write_unlock(&vfsmount_lock);
57685 namespace_unlock();
57686+
57687+ gr_log_unmount(mnt->mnt_devname, retval);
57688+
57689 return retval;
57690 }
57691
57692@@ -1302,7 +1308,7 @@ static inline bool may_mount(void)
57693 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
57694 */
57695
57696-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
57697+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
57698 {
57699 struct path path;
57700 struct mount *mnt;
57701@@ -1342,7 +1348,7 @@ out:
57702 /*
57703 * The 2.0 compatible umount. No flags.
57704 */
57705-SYSCALL_DEFINE1(oldumount, char __user *, name)
57706+SYSCALL_DEFINE1(oldumount, const char __user *, name)
57707 {
57708 return sys_umount(name, 0);
57709 }
57710@@ -2313,6 +2319,16 @@ long do_mount(const char *dev_name, const char *dir_name,
57711 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
57712 MS_STRICTATIME);
57713
57714+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
57715+ retval = -EPERM;
57716+ goto dput_out;
57717+ }
57718+
57719+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
57720+ retval = -EPERM;
57721+ goto dput_out;
57722+ }
57723+
57724 if (flags & MS_REMOUNT)
57725 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
57726 data_page);
57727@@ -2327,6 +2343,9 @@ long do_mount(const char *dev_name, const char *dir_name,
57728 dev_name, data_page);
57729 dput_out:
57730 path_put(&path);
57731+
57732+ gr_log_mount(dev_name, dir_name, retval);
57733+
57734 return retval;
57735 }
57736
57737@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
57738 }
57739 EXPORT_SYMBOL(mount_subtree);
57740
57741-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
57742- char __user *, type, unsigned long, flags, void __user *, data)
57743+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
57744+ const char __user *, type, unsigned long, flags, void __user *, data)
57745 {
57746 int ret;
57747 char *kernel_type;
57748@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
57749 if (error)
57750 goto out2;
57751
57752+ if (gr_handle_chroot_pivot()) {
57753+ error = -EPERM;
57754+ goto out2;
57755+ }
57756+
57757 get_fs_root(current->fs, &root);
57758 old_mp = lock_mount(&old);
57759 error = PTR_ERR(old_mp);
57760@@ -2864,7 +2888,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
57761 !nsown_capable(CAP_SYS_ADMIN))
57762 return -EPERM;
57763
57764- if (fs->users != 1)
57765+ if (atomic_read(&fs->users) != 1)
57766 return -EINVAL;
57767
57768 get_mnt_ns(mnt_ns);
57769diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
57770index cff089a..4c3d57a 100644
57771--- a/fs/nfs/callback.c
57772+++ b/fs/nfs/callback.c
57773@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
57774 struct svc_rqst *rqstp;
57775 int (*callback_svc)(void *vrqstp);
57776 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
57777- char svc_name[12];
57778 int ret;
57779
57780 nfs_callback_bc_serv(minorversion, xprt, serv);
57781@@ -235,10 +234,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
57782
57783 svc_sock_update_bufs(serv);
57784
57785- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
57786 cb_info->serv = serv;
57787 cb_info->rqst = rqstp;
57788- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
57789+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
57790 if (IS_ERR(cb_info->task)) {
57791 ret = PTR_ERR(cb_info->task);
57792 svc_exit_thread(cb_info->rqst);
57793diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
57794index a35582c..ebbdcd5 100644
57795--- a/fs/nfs/callback_xdr.c
57796+++ b/fs/nfs/callback_xdr.c
57797@@ -51,7 +51,7 @@ struct callback_op {
57798 callback_decode_arg_t decode_args;
57799 callback_encode_res_t encode_res;
57800 long res_maxsize;
57801-};
57802+} __do_const;
57803
57804 static struct callback_op callback_ops[];
57805
57806diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
57807index c1c7a9d..7afa0b8 100644
57808--- a/fs/nfs/inode.c
57809+++ b/fs/nfs/inode.c
57810@@ -1043,16 +1043,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
57811 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
57812 }
57813
57814-static atomic_long_t nfs_attr_generation_counter;
57815+static atomic_long_unchecked_t nfs_attr_generation_counter;
57816
57817 static unsigned long nfs_read_attr_generation_counter(void)
57818 {
57819- return atomic_long_read(&nfs_attr_generation_counter);
57820+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
57821 }
57822
57823 unsigned long nfs_inc_attr_generation_counter(void)
57824 {
57825- return atomic_long_inc_return(&nfs_attr_generation_counter);
57826+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
57827 }
57828
57829 void nfs_fattr_init(struct nfs_fattr *fattr)
57830diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
57831index 2c37442..9b9538b 100644
57832--- a/fs/nfs/nfs4state.c
57833+++ b/fs/nfs/nfs4state.c
57834@@ -1193,7 +1193,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
57835 snprintf(buf, sizeof(buf), "%s-manager",
57836 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
57837 rcu_read_unlock();
57838- task = kthread_run(nfs4_run_state_manager, clp, buf);
57839+ task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
57840 if (IS_ERR(task)) {
57841 printk(KERN_ERR "%s: kthread_run: %ld\n",
57842 __func__, PTR_ERR(task));
57843diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
57844index 27d74a2..c4c2a73 100644
57845--- a/fs/nfsd/nfs4proc.c
57846+++ b/fs/nfsd/nfs4proc.c
57847@@ -1126,7 +1126,7 @@ struct nfsd4_operation {
57848 nfsd4op_rsize op_rsize_bop;
57849 stateid_getter op_get_currentstateid;
57850 stateid_setter op_set_currentstateid;
57851-};
57852+} __do_const;
57853
57854 static struct nfsd4_operation nfsd4_ops[];
57855
57856diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
57857index 582321a..0224663 100644
57858--- a/fs/nfsd/nfs4xdr.c
57859+++ b/fs/nfsd/nfs4xdr.c
57860@@ -1458,7 +1458,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
57861
57862 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
57863
57864-static nfsd4_dec nfsd4_dec_ops[] = {
57865+static const nfsd4_dec nfsd4_dec_ops[] = {
57866 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
57867 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
57868 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
57869@@ -1498,7 +1498,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
57870 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
57871 };
57872
57873-static nfsd4_dec nfsd41_dec_ops[] = {
57874+static const nfsd4_dec nfsd41_dec_ops[] = {
57875 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
57876 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
57877 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
57878@@ -1560,7 +1560,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
57879 };
57880
57881 struct nfsd4_minorversion_ops {
57882- nfsd4_dec *decoders;
57883+ const nfsd4_dec *decoders;
57884 int nops;
57885 };
57886
57887diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
57888index e76244e..9fe8f2f1 100644
57889--- a/fs/nfsd/nfscache.c
57890+++ b/fs/nfsd/nfscache.c
57891@@ -526,14 +526,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
57892 {
57893 struct svc_cacherep *rp = rqstp->rq_cacherep;
57894 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
57895- int len;
57896+ long len;
57897 size_t bufsize = 0;
57898
57899 if (!rp)
57900 return;
57901
57902- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
57903- len >>= 2;
57904+ if (statp) {
57905+ len = (char*)statp - (char*)resv->iov_base;
57906+ len = resv->iov_len - len;
57907+ len >>= 2;
57908+ }
57909
57910 /* Don't cache excessive amounts of data and XDR failures */
57911 if (!statp || len > (256 >> 2)) {
57912diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
57913index baf149a..76b86ad 100644
57914--- a/fs/nfsd/vfs.c
57915+++ b/fs/nfsd/vfs.c
57916@@ -940,7 +940,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
57917 } else {
57918 oldfs = get_fs();
57919 set_fs(KERNEL_DS);
57920- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
57921+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
57922 set_fs(oldfs);
57923 }
57924
57925@@ -1027,7 +1027,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
57926
57927 /* Write the data. */
57928 oldfs = get_fs(); set_fs(KERNEL_DS);
57929- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
57930+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
57931 set_fs(oldfs);
57932 if (host_err < 0)
57933 goto out_nfserr;
57934@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
57935 */
57936
57937 oldfs = get_fs(); set_fs(KERNEL_DS);
57938- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
57939+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
57940 set_fs(oldfs);
57941
57942 if (host_err < 0)
57943diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
57944index fea6bd5..8ee9d81 100644
57945--- a/fs/nls/nls_base.c
57946+++ b/fs/nls/nls_base.c
57947@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
57948
57949 int register_nls(struct nls_table * nls)
57950 {
57951- struct nls_table ** tmp = &tables;
57952+ struct nls_table *tmp = tables;
57953
57954 if (nls->next)
57955 return -EBUSY;
57956
57957 spin_lock(&nls_lock);
57958- while (*tmp) {
57959- if (nls == *tmp) {
57960+ while (tmp) {
57961+ if (nls == tmp) {
57962 spin_unlock(&nls_lock);
57963 return -EBUSY;
57964 }
57965- tmp = &(*tmp)->next;
57966+ tmp = tmp->next;
57967 }
57968- nls->next = tables;
57969+ pax_open_kernel();
57970+ *(struct nls_table **)&nls->next = tables;
57971+ pax_close_kernel();
57972 tables = nls;
57973 spin_unlock(&nls_lock);
57974 return 0;
57975@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
57976
57977 int unregister_nls(struct nls_table * nls)
57978 {
57979- struct nls_table ** tmp = &tables;
57980+ struct nls_table * const * tmp = &tables;
57981
57982 spin_lock(&nls_lock);
57983 while (*tmp) {
57984 if (nls == *tmp) {
57985- *tmp = nls->next;
57986+ pax_open_kernel();
57987+ *(struct nls_table **)tmp = nls->next;
57988+ pax_close_kernel();
57989 spin_unlock(&nls_lock);
57990 return 0;
57991 }
57992diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
57993index 7424929..35f6be5 100644
57994--- a/fs/nls/nls_euc-jp.c
57995+++ b/fs/nls/nls_euc-jp.c
57996@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
57997 p_nls = load_nls("cp932");
57998
57999 if (p_nls) {
58000- table.charset2upper = p_nls->charset2upper;
58001- table.charset2lower = p_nls->charset2lower;
58002+ pax_open_kernel();
58003+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
58004+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
58005+ pax_close_kernel();
58006 return register_nls(&table);
58007 }
58008
58009diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
58010index e7bc1d7..06bd4bb 100644
58011--- a/fs/nls/nls_koi8-ru.c
58012+++ b/fs/nls/nls_koi8-ru.c
58013@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
58014 p_nls = load_nls("koi8-u");
58015
58016 if (p_nls) {
58017- table.charset2upper = p_nls->charset2upper;
58018- table.charset2lower = p_nls->charset2lower;
58019+ pax_open_kernel();
58020+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
58021+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
58022+ pax_close_kernel();
58023 return register_nls(&table);
58024 }
58025
58026diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
58027index 77cc85d..a1e6299 100644
58028--- a/fs/notify/fanotify/fanotify_user.c
58029+++ b/fs/notify/fanotify/fanotify_user.c
58030@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
58031
58032 fd = fanotify_event_metadata.fd;
58033 ret = -EFAULT;
58034- if (copy_to_user(buf, &fanotify_event_metadata,
58035- fanotify_event_metadata.event_len))
58036+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
58037+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
58038 goto out_close_fd;
58039
58040 ret = prepare_for_access_response(group, event, fd);
58041diff --git a/fs/notify/notification.c b/fs/notify/notification.c
58042index 7b51b05..5ea5ef6 100644
58043--- a/fs/notify/notification.c
58044+++ b/fs/notify/notification.c
58045@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
58046 * get set to 0 so it will never get 'freed'
58047 */
58048 static struct fsnotify_event *q_overflow_event;
58049-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
58050+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
58051
58052 /**
58053 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
58054@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
58055 */
58056 u32 fsnotify_get_cookie(void)
58057 {
58058- return atomic_inc_return(&fsnotify_sync_cookie);
58059+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
58060 }
58061 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
58062
58063diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
58064index aa411c3..c260a84 100644
58065--- a/fs/ntfs/dir.c
58066+++ b/fs/ntfs/dir.c
58067@@ -1329,7 +1329,7 @@ find_next_index_buffer:
58068 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
58069 ~(s64)(ndir->itype.index.block_size - 1)));
58070 /* Bounds checks. */
58071- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
58072+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
58073 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
58074 "inode 0x%lx or driver bug.", vdir->i_ino);
58075 goto err_out;
58076diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
58077index c5670b8..2b43d9b 100644
58078--- a/fs/ntfs/file.c
58079+++ b/fs/ntfs/file.c
58080@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
58081 char *addr;
58082 size_t total = 0;
58083 unsigned len;
58084- int left;
58085+ unsigned left;
58086
58087 do {
58088 len = PAGE_CACHE_SIZE - ofs;
58089@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
58090 #endif /* NTFS_RW */
58091 };
58092
58093-const struct file_operations ntfs_empty_file_ops = {};
58094+const struct file_operations ntfs_empty_file_ops __read_only;
58095
58096-const struct inode_operations ntfs_empty_inode_ops = {};
58097+const struct inode_operations ntfs_empty_inode_ops __read_only;
58098diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
58099index 20dfec7..e238cb7 100644
58100--- a/fs/ocfs2/aops.c
58101+++ b/fs/ocfs2/aops.c
58102@@ -1756,7 +1756,7 @@ try_again:
58103 goto out;
58104 } else if (ret == 1) {
58105 clusters_need = wc->w_clen;
58106- ret = ocfs2_refcount_cow(inode, filp, di_bh,
58107+ ret = ocfs2_refcount_cow(inode, di_bh,
58108 wc->w_cpos, wc->w_clen, UINT_MAX);
58109 if (ret) {
58110 mlog_errno(ret);
58111diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
58112index ff54014..ff125fd 100644
58113--- a/fs/ocfs2/file.c
58114+++ b/fs/ocfs2/file.c
58115@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
58116 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
58117 goto out;
58118
58119- return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
58120+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
58121
58122 out:
58123 return status;
58124@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
58125 zero_clusters = last_cpos - zero_cpos;
58126
58127 if (needs_cow) {
58128- rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
58129+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
58130 zero_clusters, UINT_MAX);
58131 if (rc) {
58132 mlog_errno(rc);
58133@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
58134
58135 *meta_level = 1;
58136
58137- ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
58138+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
58139 if (ret)
58140 mlog_errno(ret);
58141 out:
58142diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
58143index aebeacd..0dcdd26 100644
58144--- a/fs/ocfs2/localalloc.c
58145+++ b/fs/ocfs2/localalloc.c
58146@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
58147 goto bail;
58148 }
58149
58150- atomic_inc(&osb->alloc_stats.moves);
58151+ atomic_inc_unchecked(&osb->alloc_stats.moves);
58152
58153 bail:
58154 if (handle)
58155diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
58156index f1fc172..452068b 100644
58157--- a/fs/ocfs2/move_extents.c
58158+++ b/fs/ocfs2/move_extents.c
58159@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
58160 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
58161 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
58162
58163- ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
58164+ ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
58165 p_cpos, new_p_cpos, len);
58166 if (ret) {
58167 mlog_errno(ret);
58168diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
58169index d355e6e..578d905 100644
58170--- a/fs/ocfs2/ocfs2.h
58171+++ b/fs/ocfs2/ocfs2.h
58172@@ -235,11 +235,11 @@ enum ocfs2_vol_state
58173
58174 struct ocfs2_alloc_stats
58175 {
58176- atomic_t moves;
58177- atomic_t local_data;
58178- atomic_t bitmap_data;
58179- atomic_t bg_allocs;
58180- atomic_t bg_extends;
58181+ atomic_unchecked_t moves;
58182+ atomic_unchecked_t local_data;
58183+ atomic_unchecked_t bitmap_data;
58184+ atomic_unchecked_t bg_allocs;
58185+ atomic_unchecked_t bg_extends;
58186 };
58187
58188 enum ocfs2_local_alloc_state
58189diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
58190index 998b17e..aefe414 100644
58191--- a/fs/ocfs2/refcounttree.c
58192+++ b/fs/ocfs2/refcounttree.c
58193@@ -49,7 +49,6 @@
58194
58195 struct ocfs2_cow_context {
58196 struct inode *inode;
58197- struct file *file;
58198 u32 cow_start;
58199 u32 cow_len;
58200 struct ocfs2_extent_tree data_et;
58201@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
58202 u32 *num_clusters,
58203 unsigned int *extent_flags);
58204 int (*cow_duplicate_clusters)(handle_t *handle,
58205- struct file *file,
58206+ struct inode *inode,
58207 u32 cpos, u32 old_cluster,
58208 u32 new_cluster, u32 new_len);
58209 };
58210@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
58211 }
58212
58213 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58214- struct file *file,
58215+ struct inode *inode,
58216 u32 cpos, u32 old_cluster,
58217 u32 new_cluster, u32 new_len)
58218 {
58219 int ret = 0, partial;
58220- struct inode *inode = file_inode(file);
58221- struct ocfs2_caching_info *ci = INODE_CACHE(inode);
58222- struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
58223+ struct super_block *sb = inode->i_sb;
58224 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
58225 struct page *page;
58226 pgoff_t page_index;
58227@@ -2973,13 +2970,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58228 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
58229 BUG_ON(PageDirty(page));
58230
58231- if (PageReadahead(page)) {
58232- page_cache_async_readahead(mapping,
58233- &file->f_ra, file,
58234- page, page_index,
58235- readahead_pages);
58236- }
58237-
58238 if (!PageUptodate(page)) {
58239 ret = block_read_full_page(page, ocfs2_get_block);
58240 if (ret) {
58241@@ -2999,7 +2989,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58242 }
58243 }
58244
58245- ocfs2_map_and_dirty_page(inode, handle, from, to,
58246+ ocfs2_map_and_dirty_page(inode,
58247+ handle, from, to,
58248 page, 0, &new_block);
58249 mark_page_accessed(page);
58250 unlock:
58251@@ -3015,12 +3006,11 @@ unlock:
58252 }
58253
58254 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
58255- struct file *file,
58256+ struct inode *inode,
58257 u32 cpos, u32 old_cluster,
58258 u32 new_cluster, u32 new_len)
58259 {
58260 int ret = 0;
58261- struct inode *inode = file_inode(file);
58262 struct super_block *sb = inode->i_sb;
58263 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
58264 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
58265@@ -3145,7 +3135,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
58266
58267 /*If the old clusters is unwritten, no need to duplicate. */
58268 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
58269- ret = context->cow_duplicate_clusters(handle, context->file,
58270+ ret = context->cow_duplicate_clusters(handle, context->inode,
58271 cpos, old, new, len);
58272 if (ret) {
58273 mlog_errno(ret);
58274@@ -3423,35 +3413,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
58275 return ret;
58276 }
58277
58278-static void ocfs2_readahead_for_cow(struct inode *inode,
58279- struct file *file,
58280- u32 start, u32 len)
58281-{
58282- struct address_space *mapping;
58283- pgoff_t index;
58284- unsigned long num_pages;
58285- int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
58286-
58287- if (!file)
58288- return;
58289-
58290- mapping = file->f_mapping;
58291- num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
58292- if (!num_pages)
58293- num_pages = 1;
58294-
58295- index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
58296- page_cache_sync_readahead(mapping, &file->f_ra, file,
58297- index, num_pages);
58298-}
58299-
58300 /*
58301 * Starting at cpos, try to CoW write_len clusters. Don't CoW
58302 * past max_cpos. This will stop when it runs into a hole or an
58303 * unrefcounted extent.
58304 */
58305 static int ocfs2_refcount_cow_hunk(struct inode *inode,
58306- struct file *file,
58307 struct buffer_head *di_bh,
58308 u32 cpos, u32 write_len, u32 max_cpos)
58309 {
58310@@ -3480,8 +3447,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
58311
58312 BUG_ON(cow_len == 0);
58313
58314- ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
58315-
58316 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
58317 if (!context) {
58318 ret = -ENOMEM;
58319@@ -3503,7 +3468,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
58320 context->ref_root_bh = ref_root_bh;
58321 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
58322 context->get_clusters = ocfs2_di_get_clusters;
58323- context->file = file;
58324
58325 ocfs2_init_dinode_extent_tree(&context->data_et,
58326 INODE_CACHE(inode), di_bh);
58327@@ -3532,7 +3496,6 @@ out:
58328 * clusters between cpos and cpos+write_len are safe to modify.
58329 */
58330 int ocfs2_refcount_cow(struct inode *inode,
58331- struct file *file,
58332 struct buffer_head *di_bh,
58333 u32 cpos, u32 write_len, u32 max_cpos)
58334 {
58335@@ -3552,7 +3515,7 @@ int ocfs2_refcount_cow(struct inode *inode,
58336 num_clusters = write_len;
58337
58338 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
58339- ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
58340+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
58341 num_clusters, max_cpos);
58342 if (ret) {
58343 mlog_errno(ret);
58344diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
58345index 7754608..6422bbcdb 100644
58346--- a/fs/ocfs2/refcounttree.h
58347+++ b/fs/ocfs2/refcounttree.h
58348@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
58349 int *credits,
58350 int *ref_blocks);
58351 int ocfs2_refcount_cow(struct inode *inode,
58352- struct file *filep, struct buffer_head *di_bh,
58353+ struct buffer_head *di_bh,
58354 u32 cpos, u32 write_len, u32 max_cpos);
58355
58356 typedef int (ocfs2_post_refcount_func)(struct inode *inode,
58357@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
58358 u32 cpos, u32 write_len,
58359 struct ocfs2_post_refcount *post);
58360 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
58361- struct file *file,
58362+ struct inode *inode,
58363 u32 cpos, u32 old_cluster,
58364 u32 new_cluster, u32 new_len);
58365 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
58366- struct file *file,
58367+ struct inode *inode,
58368 u32 cpos, u32 old_cluster,
58369 u32 new_cluster, u32 new_len);
58370 int ocfs2_cow_sync_writeback(struct super_block *sb,
58371diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
58372index b7e74b5..19c6536 100644
58373--- a/fs/ocfs2/suballoc.c
58374+++ b/fs/ocfs2/suballoc.c
58375@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
58376 mlog_errno(status);
58377 goto bail;
58378 }
58379- atomic_inc(&osb->alloc_stats.bg_extends);
58380+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
58381
58382 /* You should never ask for this much metadata */
58383 BUG_ON(bits_wanted >
58384@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
58385 mlog_errno(status);
58386 goto bail;
58387 }
58388- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58389+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58390
58391 *suballoc_loc = res.sr_bg_blkno;
58392 *suballoc_bit_start = res.sr_bit_offset;
58393@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
58394 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
58395 res->sr_bits);
58396
58397- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58398+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58399
58400 BUG_ON(res->sr_bits != 1);
58401
58402@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
58403 mlog_errno(status);
58404 goto bail;
58405 }
58406- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58407+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58408
58409 BUG_ON(res.sr_bits != 1);
58410
58411@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58412 cluster_start,
58413 num_clusters);
58414 if (!status)
58415- atomic_inc(&osb->alloc_stats.local_data);
58416+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
58417 } else {
58418 if (min_clusters > (osb->bitmap_cpg - 1)) {
58419 /* The only paths asking for contiguousness
58420@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58421 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
58422 res.sr_bg_blkno,
58423 res.sr_bit_offset);
58424- atomic_inc(&osb->alloc_stats.bitmap_data);
58425+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
58426 *num_clusters = res.sr_bits;
58427 }
58428 }
58429diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
58430index 01b8516..579c4df 100644
58431--- a/fs/ocfs2/super.c
58432+++ b/fs/ocfs2/super.c
58433@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
58434 "%10s => GlobalAllocs: %d LocalAllocs: %d "
58435 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
58436 "Stats",
58437- atomic_read(&osb->alloc_stats.bitmap_data),
58438- atomic_read(&osb->alloc_stats.local_data),
58439- atomic_read(&osb->alloc_stats.bg_allocs),
58440- atomic_read(&osb->alloc_stats.moves),
58441- atomic_read(&osb->alloc_stats.bg_extends));
58442+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
58443+ atomic_read_unchecked(&osb->alloc_stats.local_data),
58444+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
58445+ atomic_read_unchecked(&osb->alloc_stats.moves),
58446+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
58447
58448 out += snprintf(buf + out, len - out,
58449 "%10s => State: %u Descriptor: %llu Size: %u bits "
58450@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
58451 spin_lock_init(&osb->osb_xattr_lock);
58452 ocfs2_init_steal_slots(osb);
58453
58454- atomic_set(&osb->alloc_stats.moves, 0);
58455- atomic_set(&osb->alloc_stats.local_data, 0);
58456- atomic_set(&osb->alloc_stats.bitmap_data, 0);
58457- atomic_set(&osb->alloc_stats.bg_allocs, 0);
58458- atomic_set(&osb->alloc_stats.bg_extends, 0);
58459+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
58460+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
58461+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
58462+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
58463+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
58464
58465 /* Copy the blockcheck stats from the superblock probe */
58466 osb->osb_ecc_stats = *stats;
58467diff --git a/fs/open.c b/fs/open.c
58468index 8c74100..4239c48 100644
58469--- a/fs/open.c
58470+++ b/fs/open.c
58471@@ -32,6 +32,8 @@
58472 #include <linux/dnotify.h>
58473 #include <linux/compat.h>
58474
58475+#define CREATE_TRACE_POINTS
58476+#include <trace/events/fs.h>
58477 #include "internal.h"
58478
58479 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
58480@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
58481 error = locks_verify_truncate(inode, NULL, length);
58482 if (!error)
58483 error = security_path_truncate(path);
58484+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
58485+ error = -EACCES;
58486 if (!error)
58487 error = do_truncate(path->dentry, length, 0, NULL);
58488
58489@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
58490 error = locks_verify_truncate(inode, f.file, length);
58491 if (!error)
58492 error = security_path_truncate(&f.file->f_path);
58493+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
58494+ error = -EACCES;
58495 if (!error)
58496 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
58497 sb_end_write(inode->i_sb);
58498@@ -360,6 +366,9 @@ retry:
58499 if (__mnt_is_readonly(path.mnt))
58500 res = -EROFS;
58501
58502+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
58503+ res = -EACCES;
58504+
58505 out_path_release:
58506 path_put(&path);
58507 if (retry_estale(res, lookup_flags)) {
58508@@ -391,6 +400,8 @@ retry:
58509 if (error)
58510 goto dput_and_out;
58511
58512+ gr_log_chdir(path.dentry, path.mnt);
58513+
58514 set_fs_pwd(current->fs, &path);
58515
58516 dput_and_out:
58517@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
58518 goto out_putf;
58519
58520 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
58521+
58522+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
58523+ error = -EPERM;
58524+
58525+ if (!error)
58526+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
58527+
58528 if (!error)
58529 set_fs_pwd(current->fs, &f.file->f_path);
58530 out_putf:
58531@@ -449,7 +467,13 @@ retry:
58532 if (error)
58533 goto dput_and_out;
58534
58535+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
58536+ goto dput_and_out;
58537+
58538 set_fs_root(current->fs, &path);
58539+
58540+ gr_handle_chroot_chdir(&path);
58541+
58542 error = 0;
58543 dput_and_out:
58544 path_put(&path);
58545@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
58546 if (error)
58547 return error;
58548 mutex_lock(&inode->i_mutex);
58549+
58550+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
58551+ error = -EACCES;
58552+ goto out_unlock;
58553+ }
58554+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
58555+ error = -EACCES;
58556+ goto out_unlock;
58557+ }
58558+
58559 error = security_path_chmod(path, mode);
58560 if (error)
58561 goto out_unlock;
58562@@ -531,6 +565,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
58563 uid = make_kuid(current_user_ns(), user);
58564 gid = make_kgid(current_user_ns(), group);
58565
58566+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
58567+ return -EACCES;
58568+
58569 newattrs.ia_valid = ATTR_CTIME;
58570 if (user != (uid_t) -1) {
58571 if (!uid_valid(uid))
58572@@ -946,6 +983,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
58573 } else {
58574 fsnotify_open(f);
58575 fd_install(fd, f);
58576+ trace_do_sys_open(tmp->name, flags, mode);
58577 }
58578 }
58579 putname(tmp);
58580diff --git a/fs/pipe.c b/fs/pipe.c
58581index d2c45e1..009fe1c 100644
58582--- a/fs/pipe.c
58583+++ b/fs/pipe.c
58584@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
58585
58586 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
58587 {
58588- if (pipe->files)
58589+ if (atomic_read(&pipe->files))
58590 mutex_lock_nested(&pipe->mutex, subclass);
58591 }
58592
58593@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
58594
58595 void pipe_unlock(struct pipe_inode_info *pipe)
58596 {
58597- if (pipe->files)
58598+ if (atomic_read(&pipe->files))
58599 mutex_unlock(&pipe->mutex);
58600 }
58601 EXPORT_SYMBOL(pipe_unlock);
58602@@ -449,9 +449,9 @@ redo:
58603 }
58604 if (bufs) /* More to do? */
58605 continue;
58606- if (!pipe->writers)
58607+ if (!atomic_read(&pipe->writers))
58608 break;
58609- if (!pipe->waiting_writers) {
58610+ if (!atomic_read(&pipe->waiting_writers)) {
58611 /* syscall merging: Usually we must not sleep
58612 * if O_NONBLOCK is set, or if we got some data.
58613 * But if a writer sleeps in kernel space, then
58614@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
58615 ret = 0;
58616 __pipe_lock(pipe);
58617
58618- if (!pipe->readers) {
58619+ if (!atomic_read(&pipe->readers)) {
58620 send_sig(SIGPIPE, current, 0);
58621 ret = -EPIPE;
58622 goto out;
58623@@ -562,7 +562,7 @@ redo1:
58624 for (;;) {
58625 int bufs;
58626
58627- if (!pipe->readers) {
58628+ if (!atomic_read(&pipe->readers)) {
58629 send_sig(SIGPIPE, current, 0);
58630 if (!ret)
58631 ret = -EPIPE;
58632@@ -653,9 +653,9 @@ redo2:
58633 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
58634 do_wakeup = 0;
58635 }
58636- pipe->waiting_writers++;
58637+ atomic_inc(&pipe->waiting_writers);
58638 pipe_wait(pipe);
58639- pipe->waiting_writers--;
58640+ atomic_dec(&pipe->waiting_writers);
58641 }
58642 out:
58643 __pipe_unlock(pipe);
58644@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
58645 mask = 0;
58646 if (filp->f_mode & FMODE_READ) {
58647 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
58648- if (!pipe->writers && filp->f_version != pipe->w_counter)
58649+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
58650 mask |= POLLHUP;
58651 }
58652
58653@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
58654 * Most Unices do not set POLLERR for FIFOs but on Linux they
58655 * behave exactly like pipes for poll().
58656 */
58657- if (!pipe->readers)
58658+ if (!atomic_read(&pipe->readers))
58659 mask |= POLLERR;
58660 }
58661
58662@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
58663
58664 __pipe_lock(pipe);
58665 if (file->f_mode & FMODE_READ)
58666- pipe->readers--;
58667+ atomic_dec(&pipe->readers);
58668 if (file->f_mode & FMODE_WRITE)
58669- pipe->writers--;
58670+ atomic_dec(&pipe->writers);
58671
58672- if (pipe->readers || pipe->writers) {
58673+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
58674 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
58675 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
58676 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
58677 }
58678 spin_lock(&inode->i_lock);
58679- if (!--pipe->files) {
58680+ if (atomic_dec_and_test(&pipe->files)) {
58681 inode->i_pipe = NULL;
58682 kill = 1;
58683 }
58684@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
58685 kfree(pipe);
58686 }
58687
58688-static struct vfsmount *pipe_mnt __read_mostly;
58689+struct vfsmount *pipe_mnt __read_mostly;
58690
58691 /*
58692 * pipefs_dname() is called from d_path().
58693@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
58694 goto fail_iput;
58695
58696 inode->i_pipe = pipe;
58697- pipe->files = 2;
58698- pipe->readers = pipe->writers = 1;
58699+ atomic_set(&pipe->files, 2);
58700+ atomic_set(&pipe->readers, 1);
58701+ atomic_set(&pipe->writers, 1);
58702 inode->i_fop = &pipefifo_fops;
58703
58704 /*
58705@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
58706 spin_lock(&inode->i_lock);
58707 if (inode->i_pipe) {
58708 pipe = inode->i_pipe;
58709- pipe->files++;
58710+ atomic_inc(&pipe->files);
58711 spin_unlock(&inode->i_lock);
58712 } else {
58713 spin_unlock(&inode->i_lock);
58714 pipe = alloc_pipe_info();
58715 if (!pipe)
58716 return -ENOMEM;
58717- pipe->files = 1;
58718+ atomic_set(&pipe->files, 1);
58719 spin_lock(&inode->i_lock);
58720 if (unlikely(inode->i_pipe)) {
58721- inode->i_pipe->files++;
58722+ atomic_inc(&inode->i_pipe->files);
58723 spin_unlock(&inode->i_lock);
58724 free_pipe_info(pipe);
58725 pipe = inode->i_pipe;
58726@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
58727 * opened, even when there is no process writing the FIFO.
58728 */
58729 pipe->r_counter++;
58730- if (pipe->readers++ == 0)
58731+ if (atomic_inc_return(&pipe->readers) == 1)
58732 wake_up_partner(pipe);
58733
58734- if (!is_pipe && !pipe->writers) {
58735+ if (!is_pipe && !atomic_read(&pipe->writers)) {
58736 if ((filp->f_flags & O_NONBLOCK)) {
58737 /* suppress POLLHUP until we have
58738 * seen a writer */
58739@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
58740 * errno=ENXIO when there is no process reading the FIFO.
58741 */
58742 ret = -ENXIO;
58743- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
58744+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
58745 goto err;
58746
58747 pipe->w_counter++;
58748- if (!pipe->writers++)
58749+ if (atomic_inc_return(&pipe->writers) == 1)
58750 wake_up_partner(pipe);
58751
58752- if (!is_pipe && !pipe->readers) {
58753+ if (!is_pipe && !atomic_read(&pipe->readers)) {
58754 if (wait_for_partner(pipe, &pipe->r_counter))
58755 goto err_wr;
58756 }
58757@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
58758 * the process can at least talk to itself.
58759 */
58760
58761- pipe->readers++;
58762- pipe->writers++;
58763+ atomic_inc(&pipe->readers);
58764+ atomic_inc(&pipe->writers);
58765 pipe->r_counter++;
58766 pipe->w_counter++;
58767- if (pipe->readers == 1 || pipe->writers == 1)
58768+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
58769 wake_up_partner(pipe);
58770 break;
58771
58772@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
58773 return 0;
58774
58775 err_rd:
58776- if (!--pipe->readers)
58777+ if (atomic_dec_and_test(&pipe->readers))
58778 wake_up_interruptible(&pipe->wait);
58779 ret = -ERESTARTSYS;
58780 goto err;
58781
58782 err_wr:
58783- if (!--pipe->writers)
58784+ if (atomic_dec_and_test(&pipe->writers))
58785 wake_up_interruptible(&pipe->wait);
58786 ret = -ERESTARTSYS;
58787 goto err;
58788
58789 err:
58790 spin_lock(&inode->i_lock);
58791- if (!--pipe->files) {
58792+ if (atomic_dec_and_test(&pipe->files)) {
58793 inode->i_pipe = NULL;
58794 kill = 1;
58795 }
58796diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
58797index 15af622..0e9f4467 100644
58798--- a/fs/proc/Kconfig
58799+++ b/fs/proc/Kconfig
58800@@ -30,12 +30,12 @@ config PROC_FS
58801
58802 config PROC_KCORE
58803 bool "/proc/kcore support" if !ARM
58804- depends on PROC_FS && MMU
58805+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
58806
58807 config PROC_VMCORE
58808 bool "/proc/vmcore support"
58809- depends on PROC_FS && CRASH_DUMP
58810- default y
58811+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
58812+ default n
58813 help
58814 Exports the dump image of crashed kernel in ELF format.
58815
58816@@ -59,8 +59,8 @@ config PROC_SYSCTL
58817 limited in memory.
58818
58819 config PROC_PAGE_MONITOR
58820- default y
58821- depends on PROC_FS && MMU
58822+ default n
58823+ depends on PROC_FS && MMU && !GRKERNSEC
58824 bool "Enable /proc page monitoring" if EXPERT
58825 help
58826 Various /proc files exist to monitor process memory utilization:
58827diff --git a/fs/proc/array.c b/fs/proc/array.c
58828index cbd0f1b..adec3f0 100644
58829--- a/fs/proc/array.c
58830+++ b/fs/proc/array.c
58831@@ -60,6 +60,7 @@
58832 #include <linux/tty.h>
58833 #include <linux/string.h>
58834 #include <linux/mman.h>
58835+#include <linux/grsecurity.h>
58836 #include <linux/proc_fs.h>
58837 #include <linux/ioport.h>
58838 #include <linux/uaccess.h>
58839@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
58840 seq_putc(m, '\n');
58841 }
58842
58843+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58844+static inline void task_pax(struct seq_file *m, struct task_struct *p)
58845+{
58846+ if (p->mm)
58847+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
58848+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
58849+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
58850+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
58851+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
58852+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
58853+ else
58854+ seq_printf(m, "PaX:\t-----\n");
58855+}
58856+#endif
58857+
58858 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
58859 struct pid *pid, struct task_struct *task)
58860 {
58861@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
58862 task_cpus_allowed(m, task);
58863 cpuset_task_status_allowed(m, task);
58864 task_context_switch_counts(m, task);
58865+
58866+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58867+ task_pax(m, task);
58868+#endif
58869+
58870+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
58871+ task_grsec_rbac(m, task);
58872+#endif
58873+
58874 return 0;
58875 }
58876
58877+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58878+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
58879+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
58880+ _mm->pax_flags & MF_PAX_SEGMEXEC))
58881+#endif
58882+
58883 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58884 struct pid *pid, struct task_struct *task, int whole)
58885 {
58886@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58887 char tcomm[sizeof(task->comm)];
58888 unsigned long flags;
58889
58890+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58891+ if (current->exec_id != m->exec_id) {
58892+ gr_log_badprocpid("stat");
58893+ return 0;
58894+ }
58895+#endif
58896+
58897 state = *get_task_state(task);
58898 vsize = eip = esp = 0;
58899 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
58900@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58901 gtime = task_gtime(task);
58902 }
58903
58904+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58905+ if (PAX_RAND_FLAGS(mm)) {
58906+ eip = 0;
58907+ esp = 0;
58908+ wchan = 0;
58909+ }
58910+#endif
58911+#ifdef CONFIG_GRKERNSEC_HIDESYM
58912+ wchan = 0;
58913+ eip =0;
58914+ esp =0;
58915+#endif
58916+
58917 /* scale priority and nice values from timeslices to -20..20 */
58918 /* to make it look like a "normal" Unix priority/nice value */
58919 priority = task_prio(task);
58920@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58921 seq_put_decimal_ull(m, ' ', vsize);
58922 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
58923 seq_put_decimal_ull(m, ' ', rsslim);
58924+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58925+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
58926+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
58927+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
58928+#else
58929 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
58930 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
58931 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
58932+#endif
58933 seq_put_decimal_ull(m, ' ', esp);
58934 seq_put_decimal_ull(m, ' ', eip);
58935 /* The signal information here is obsolete.
58936@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
58937 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
58938 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
58939
58940- if (mm && permitted) {
58941+ if (mm && permitted
58942+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58943+ && !PAX_RAND_FLAGS(mm)
58944+#endif
58945+ ) {
58946 seq_put_decimal_ull(m, ' ', mm->start_data);
58947 seq_put_decimal_ull(m, ' ', mm->end_data);
58948 seq_put_decimal_ull(m, ' ', mm->start_brk);
58949@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
58950 struct pid *pid, struct task_struct *task)
58951 {
58952 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
58953- struct mm_struct *mm = get_task_mm(task);
58954+ struct mm_struct *mm;
58955
58956+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58957+ if (current->exec_id != m->exec_id) {
58958+ gr_log_badprocpid("statm");
58959+ return 0;
58960+ }
58961+#endif
58962+ mm = get_task_mm(task);
58963 if (mm) {
58964 size = task_statm(mm, &shared, &text, &data, &resident);
58965 mmput(mm);
58966@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
58967 return 0;
58968 }
58969
58970+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
58971+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
58972+{
58973+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
58974+}
58975+#endif
58976+
58977 #ifdef CONFIG_CHECKPOINT_RESTORE
58978 static struct pid *
58979 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
58980diff --git a/fs/proc/base.c b/fs/proc/base.c
58981index c3834da..b402b2b 100644
58982--- a/fs/proc/base.c
58983+++ b/fs/proc/base.c
58984@@ -113,6 +113,14 @@ struct pid_entry {
58985 union proc_op op;
58986 };
58987
58988+struct getdents_callback {
58989+ struct linux_dirent __user * current_dir;
58990+ struct linux_dirent __user * previous;
58991+ struct file * file;
58992+ int count;
58993+ int error;
58994+};
58995+
58996 #define NOD(NAME, MODE, IOP, FOP, OP) { \
58997 .name = (NAME), \
58998 .len = sizeof(NAME) - 1, \
58999@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
59000 if (!mm->arg_end)
59001 goto out_mm; /* Shh! No looking before we're done */
59002
59003+ if (gr_acl_handle_procpidmem(task))
59004+ goto out_mm;
59005+
59006 len = mm->arg_end - mm->arg_start;
59007
59008 if (len > PAGE_SIZE)
59009@@ -237,12 +248,28 @@ out:
59010 return res;
59011 }
59012
59013+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59014+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
59015+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
59016+ _mm->pax_flags & MF_PAX_SEGMEXEC))
59017+#endif
59018+
59019 static int proc_pid_auxv(struct task_struct *task, char *buffer)
59020 {
59021 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
59022 int res = PTR_ERR(mm);
59023 if (mm && !IS_ERR(mm)) {
59024 unsigned int nwords = 0;
59025+
59026+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59027+ /* allow if we're currently ptracing this task */
59028+ if (PAX_RAND_FLAGS(mm) &&
59029+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
59030+ mmput(mm);
59031+ return 0;
59032+ }
59033+#endif
59034+
59035 do {
59036 nwords += 2;
59037 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
59038@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
59039 }
59040
59041
59042-#ifdef CONFIG_KALLSYMS
59043+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59044 /*
59045 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
59046 * Returns the resolved symbol. If that fails, simply return the address.
59047@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
59048 mutex_unlock(&task->signal->cred_guard_mutex);
59049 }
59050
59051-#ifdef CONFIG_STACKTRACE
59052+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59053
59054 #define MAX_STACK_TRACE_DEPTH 64
59055
59056@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
59057 return count;
59058 }
59059
59060-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
59061+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
59062 static int proc_pid_syscall(struct task_struct *task, char *buffer)
59063 {
59064 long nr;
59065@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
59066 /************************************************************************/
59067
59068 /* permission checks */
59069-static int proc_fd_access_allowed(struct inode *inode)
59070+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
59071 {
59072 struct task_struct *task;
59073 int allowed = 0;
59074@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
59075 */
59076 task = get_proc_task(inode);
59077 if (task) {
59078- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
59079+ if (log)
59080+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
59081+ else
59082+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
59083 put_task_struct(task);
59084 }
59085 return allowed;
59086@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
59087 struct task_struct *task,
59088 int hide_pid_min)
59089 {
59090+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59091+ return false;
59092+
59093+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59094+ rcu_read_lock();
59095+ {
59096+ const struct cred *tmpcred = current_cred();
59097+ const struct cred *cred = __task_cred(task);
59098+
59099+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
59100+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59101+ || in_group_p(grsec_proc_gid)
59102+#endif
59103+ ) {
59104+ rcu_read_unlock();
59105+ return true;
59106+ }
59107+ }
59108+ rcu_read_unlock();
59109+
59110+ if (!pid->hide_pid)
59111+ return false;
59112+#endif
59113+
59114 if (pid->hide_pid < hide_pid_min)
59115 return true;
59116 if (in_group_p(pid->pid_gid))
59117 return true;
59118+
59119 return ptrace_may_access(task, PTRACE_MODE_READ);
59120 }
59121
59122@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
59123 put_task_struct(task);
59124
59125 if (!has_perms) {
59126+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59127+ {
59128+#else
59129 if (pid->hide_pid == 2) {
59130+#endif
59131 /*
59132 * Let's make getdents(), stat(), and open()
59133 * consistent with each other. If a process
59134@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
59135 if (!task)
59136 return -ESRCH;
59137
59138+ if (gr_acl_handle_procpidmem(task)) {
59139+ put_task_struct(task);
59140+ return -EPERM;
59141+ }
59142+
59143 mm = mm_access(task, mode);
59144 put_task_struct(task);
59145
59146@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
59147
59148 file->private_data = mm;
59149
59150+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59151+ file->f_version = current->exec_id;
59152+#endif
59153+
59154 return 0;
59155 }
59156
59157@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
59158 ssize_t copied;
59159 char *page;
59160
59161+#ifdef CONFIG_GRKERNSEC
59162+ if (write)
59163+ return -EPERM;
59164+#endif
59165+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59166+ if (file->f_version != current->exec_id) {
59167+ gr_log_badprocpid("mem");
59168+ return 0;
59169+ }
59170+#endif
59171+
59172 if (!mm)
59173 return 0;
59174
59175@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
59176 goto free;
59177
59178 while (count > 0) {
59179- int this_len = min_t(int, count, PAGE_SIZE);
59180+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
59181
59182 if (write && copy_from_user(page, buf, this_len)) {
59183 copied = -EFAULT;
59184@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
59185 if (!mm)
59186 return 0;
59187
59188+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59189+ if (file->f_version != current->exec_id) {
59190+ gr_log_badprocpid("environ");
59191+ return 0;
59192+ }
59193+#endif
59194+
59195 page = (char *)__get_free_page(GFP_TEMPORARY);
59196 if (!page)
59197 return -ENOMEM;
59198@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
59199 goto free;
59200 while (count > 0) {
59201 size_t this_len, max_len;
59202- int retval;
59203+ ssize_t retval;
59204
59205 if (src >= (mm->env_end - mm->env_start))
59206 break;
59207@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
59208 int error = -EACCES;
59209
59210 /* Are we allowed to snoop on the tasks file descriptors? */
59211- if (!proc_fd_access_allowed(inode))
59212+ if (!proc_fd_access_allowed(inode, 0))
59213 goto out;
59214
59215 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
59216@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
59217 struct path path;
59218
59219 /* Are we allowed to snoop on the tasks file descriptors? */
59220- if (!proc_fd_access_allowed(inode))
59221- goto out;
59222+ /* logging this is needed for learning on chromium to work properly,
59223+ but we don't want to flood the logs from 'ps' which does a readlink
59224+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
59225+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
59226+ */
59227+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
59228+ if (!proc_fd_access_allowed(inode,0))
59229+ goto out;
59230+ } else {
59231+ if (!proc_fd_access_allowed(inode,1))
59232+ goto out;
59233+ }
59234
59235 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
59236 if (error)
59237@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
59238 rcu_read_lock();
59239 cred = __task_cred(task);
59240 inode->i_uid = cred->euid;
59241+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59242+ inode->i_gid = grsec_proc_gid;
59243+#else
59244 inode->i_gid = cred->egid;
59245+#endif
59246 rcu_read_unlock();
59247 }
59248 security_task_to_inode(task, inode);
59249@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
59250 return -ENOENT;
59251 }
59252 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
59253+#ifdef CONFIG_GRKERNSEC_PROC_USER
59254+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
59255+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59256+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
59257+#endif
59258 task_dumpable(task)) {
59259 cred = __task_cred(task);
59260 stat->uid = cred->euid;
59261+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59262+ stat->gid = grsec_proc_gid;
59263+#else
59264 stat->gid = cred->egid;
59265+#endif
59266 }
59267 }
59268 rcu_read_unlock();
59269@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
59270
59271 if (task) {
59272 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
59273+#ifdef CONFIG_GRKERNSEC_PROC_USER
59274+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
59275+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59276+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
59277+#endif
59278 task_dumpable(task)) {
59279 rcu_read_lock();
59280 cred = __task_cred(task);
59281 inode->i_uid = cred->euid;
59282+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59283+ inode->i_gid = grsec_proc_gid;
59284+#else
59285 inode->i_gid = cred->egid;
59286+#endif
59287 rcu_read_unlock();
59288 } else {
59289 inode->i_uid = GLOBAL_ROOT_UID;
59290@@ -2196,6 +2314,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
59291 if (!task)
59292 goto out_no_task;
59293
59294+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59295+ goto out;
59296+
59297 /*
59298 * Yes, it does not scale. And it should not. Don't add
59299 * new entries into /proc/<tgid>/ without very good reasons.
59300@@ -2240,6 +2361,9 @@ static int proc_pident_readdir(struct file *filp,
59301 if (!task)
59302 goto out_no_task;
59303
59304+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59305+ goto out;
59306+
59307 ret = 0;
59308 i = filp->f_pos;
59309 switch (i) {
59310@@ -2653,7 +2777,7 @@ static const struct pid_entry tgid_base_stuff[] = {
59311 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
59312 #endif
59313 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
59314-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
59315+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
59316 INF("syscall", S_IRUGO, proc_pid_syscall),
59317 #endif
59318 INF("cmdline", S_IRUGO, proc_pid_cmdline),
59319@@ -2678,10 +2802,10 @@ static const struct pid_entry tgid_base_stuff[] = {
59320 #ifdef CONFIG_SECURITY
59321 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
59322 #endif
59323-#ifdef CONFIG_KALLSYMS
59324+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59325 INF("wchan", S_IRUGO, proc_pid_wchan),
59326 #endif
59327-#ifdef CONFIG_STACKTRACE
59328+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59329 ONE("stack", S_IRUGO, proc_pid_stack),
59330 #endif
59331 #ifdef CONFIG_SCHEDSTATS
59332@@ -2715,6 +2839,9 @@ static const struct pid_entry tgid_base_stuff[] = {
59333 #ifdef CONFIG_HARDWALL
59334 INF("hardwall", S_IRUGO, proc_pid_hardwall),
59335 #endif
59336+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
59337+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
59338+#endif
59339 #ifdef CONFIG_USER_NS
59340 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
59341 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
59342@@ -2847,7 +2974,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
59343 if (!inode)
59344 goto out;
59345
59346+#ifdef CONFIG_GRKERNSEC_PROC_USER
59347+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
59348+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59349+ inode->i_gid = grsec_proc_gid;
59350+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
59351+#else
59352 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
59353+#endif
59354 inode->i_op = &proc_tgid_base_inode_operations;
59355 inode->i_fop = &proc_tgid_base_operations;
59356 inode->i_flags|=S_IMMUTABLE;
59357@@ -2885,7 +3019,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
59358 if (!task)
59359 goto out;
59360
59361+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
59362+ goto out_put_task;
59363+
59364 result = proc_pid_instantiate(dir, dentry, task, NULL);
59365+out_put_task:
59366 put_task_struct(task);
59367 out:
59368 return result;
59369@@ -2948,6 +3086,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
59370 static int fake_filldir(void *buf, const char *name, int namelen,
59371 loff_t offset, u64 ino, unsigned d_type)
59372 {
59373+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
59374+ __buf->error = -EINVAL;
59375 return 0;
59376 }
59377
59378@@ -3007,7 +3147,7 @@ static const struct pid_entry tid_base_stuff[] = {
59379 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
59380 #endif
59381 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
59382-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
59383+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
59384 INF("syscall", S_IRUGO, proc_pid_syscall),
59385 #endif
59386 INF("cmdline", S_IRUGO, proc_pid_cmdline),
59387@@ -3034,10 +3174,10 @@ static const struct pid_entry tid_base_stuff[] = {
59388 #ifdef CONFIG_SECURITY
59389 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
59390 #endif
59391-#ifdef CONFIG_KALLSYMS
59392+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59393 INF("wchan", S_IRUGO, proc_pid_wchan),
59394 #endif
59395-#ifdef CONFIG_STACKTRACE
59396+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59397 ONE("stack", S_IRUGO, proc_pid_stack),
59398 #endif
59399 #ifdef CONFIG_SCHEDSTATS
59400diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
59401index 82676e3..5f8518a 100644
59402--- a/fs/proc/cmdline.c
59403+++ b/fs/proc/cmdline.c
59404@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
59405
59406 static int __init proc_cmdline_init(void)
59407 {
59408+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59409+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
59410+#else
59411 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
59412+#endif
59413 return 0;
59414 }
59415 module_init(proc_cmdline_init);
59416diff --git a/fs/proc/devices.c b/fs/proc/devices.c
59417index b143471..bb105e5 100644
59418--- a/fs/proc/devices.c
59419+++ b/fs/proc/devices.c
59420@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
59421
59422 static int __init proc_devices_init(void)
59423 {
59424+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59425+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
59426+#else
59427 proc_create("devices", 0, NULL, &proc_devinfo_operations);
59428+#endif
59429 return 0;
59430 }
59431 module_init(proc_devices_init);
59432diff --git a/fs/proc/fd.c b/fs/proc/fd.c
59433index d7a4a28..0201742 100644
59434--- a/fs/proc/fd.c
59435+++ b/fs/proc/fd.c
59436@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
59437 if (!task)
59438 return -ENOENT;
59439
59440- files = get_files_struct(task);
59441+ if (!gr_acl_handle_procpidmem(task))
59442+ files = get_files_struct(task);
59443 put_task_struct(task);
59444
59445 if (files) {
59446@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
59447 */
59448 int proc_fd_permission(struct inode *inode, int mask)
59449 {
59450+ struct task_struct *task;
59451 int rv = generic_permission(inode, mask);
59452- if (rv == 0)
59453- return 0;
59454+
59455 if (task_pid(current) == proc_pid(inode))
59456 rv = 0;
59457+
59458+ task = get_proc_task(inode);
59459+ if (task == NULL)
59460+ return rv;
59461+
59462+ if (gr_acl_handle_procpidmem(task))
59463+ rv = -EACCES;
59464+
59465+ put_task_struct(task);
59466+
59467 return rv;
59468 }
59469
59470diff --git a/fs/proc/inode.c b/fs/proc/inode.c
59471index 073aea6..0630370 100644
59472--- a/fs/proc/inode.c
59473+++ b/fs/proc/inode.c
59474@@ -23,11 +23,17 @@
59475 #include <linux/slab.h>
59476 #include <linux/mount.h>
59477 #include <linux/magic.h>
59478+#include <linux/grsecurity.h>
59479
59480 #include <asm/uaccess.h>
59481
59482 #include "internal.h"
59483
59484+#ifdef CONFIG_PROC_SYSCTL
59485+extern const struct inode_operations proc_sys_inode_operations;
59486+extern const struct inode_operations proc_sys_dir_operations;
59487+#endif
59488+
59489 static void proc_evict_inode(struct inode *inode)
59490 {
59491 struct proc_dir_entry *de;
59492@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
59493 ns = PROC_I(inode)->ns.ns;
59494 if (ns_ops && ns)
59495 ns_ops->put(ns);
59496+
59497+#ifdef CONFIG_PROC_SYSCTL
59498+ if (inode->i_op == &proc_sys_inode_operations ||
59499+ inode->i_op == &proc_sys_dir_operations)
59500+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
59501+#endif
59502+
59503 }
59504
59505 static struct kmem_cache * proc_inode_cachep;
59506@@ -385,7 +398,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
59507 if (de->mode) {
59508 inode->i_mode = de->mode;
59509 inode->i_uid = de->uid;
59510+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
59511+ inode->i_gid = grsec_proc_gid;
59512+#else
59513 inode->i_gid = de->gid;
59514+#endif
59515 }
59516 if (de->size)
59517 inode->i_size = de->size;
59518diff --git a/fs/proc/internal.h b/fs/proc/internal.h
59519index d600fb0..3b495fe 100644
59520--- a/fs/proc/internal.h
59521+++ b/fs/proc/internal.h
59522@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
59523 struct pid *, struct task_struct *);
59524 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
59525 struct pid *, struct task_struct *);
59526+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
59527+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
59528+#endif
59529
59530 /*
59531 * base.c
59532diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
59533index 0a22194..a9fc8c1 100644
59534--- a/fs/proc/kcore.c
59535+++ b/fs/proc/kcore.c
59536@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59537 * the addresses in the elf_phdr on our list.
59538 */
59539 start = kc_offset_to_vaddr(*fpos - elf_buflen);
59540- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
59541+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
59542+ if (tsz > buflen)
59543 tsz = buflen;
59544-
59545+
59546 while (buflen) {
59547 struct kcore_list *m;
59548
59549@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59550 kfree(elf_buf);
59551 } else {
59552 if (kern_addr_valid(start)) {
59553- unsigned long n;
59554+ char *elf_buf;
59555+ mm_segment_t oldfs;
59556
59557- n = copy_to_user(buffer, (char *)start, tsz);
59558- /*
59559- * We cannot distinguish between fault on source
59560- * and fault on destination. When this happens
59561- * we clear too and hope it will trigger the
59562- * EFAULT again.
59563- */
59564- if (n) {
59565- if (clear_user(buffer + tsz - n,
59566- n))
59567+ elf_buf = kmalloc(tsz, GFP_KERNEL);
59568+ if (!elf_buf)
59569+ return -ENOMEM;
59570+ oldfs = get_fs();
59571+ set_fs(KERNEL_DS);
59572+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
59573+ set_fs(oldfs);
59574+ if (copy_to_user(buffer, elf_buf, tsz)) {
59575+ kfree(elf_buf);
59576 return -EFAULT;
59577+ }
59578 }
59579+ set_fs(oldfs);
59580+ kfree(elf_buf);
59581 } else {
59582 if (clear_user(buffer, tsz))
59583 return -EFAULT;
59584@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
59585
59586 static int open_kcore(struct inode *inode, struct file *filp)
59587 {
59588+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59589+ return -EPERM;
59590+#endif
59591 if (!capable(CAP_SYS_RAWIO))
59592 return -EPERM;
59593 if (kcore_need_update)
59594diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
59595index 5aa847a..f77c8d4 100644
59596--- a/fs/proc/meminfo.c
59597+++ b/fs/proc/meminfo.c
59598@@ -159,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
59599 vmi.used >> 10,
59600 vmi.largest_chunk >> 10
59601 #ifdef CONFIG_MEMORY_FAILURE
59602- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
59603+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
59604 #endif
59605 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
59606 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
59607diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
59608index ccfd99b..1b7e255 100644
59609--- a/fs/proc/nommu.c
59610+++ b/fs/proc/nommu.c
59611@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
59612 if (len < 1)
59613 len = 1;
59614 seq_printf(m, "%*c", len, ' ');
59615- seq_path(m, &file->f_path, "");
59616+ seq_path(m, &file->f_path, "\n\\");
59617 }
59618
59619 seq_putc(m, '\n');
59620diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
59621index 986e832..6e8e859 100644
59622--- a/fs/proc/proc_net.c
59623+++ b/fs/proc/proc_net.c
59624@@ -23,6 +23,7 @@
59625 #include <linux/nsproxy.h>
59626 #include <net/net_namespace.h>
59627 #include <linux/seq_file.h>
59628+#include <linux/grsecurity.h>
59629
59630 #include "internal.h"
59631
59632@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
59633 struct task_struct *task;
59634 struct nsproxy *ns;
59635 struct net *net = NULL;
59636+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59637+ const struct cred *cred = current_cred();
59638+#endif
59639+
59640+#ifdef CONFIG_GRKERNSEC_PROC_USER
59641+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
59642+ return net;
59643+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59644+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
59645+ return net;
59646+#endif
59647
59648 rcu_read_lock();
59649 task = pid_task(proc_pid(dir), PIDTYPE_PID);
59650diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
59651index ac05f33..1e6dc7e 100644
59652--- a/fs/proc/proc_sysctl.c
59653+++ b/fs/proc/proc_sysctl.c
59654@@ -13,11 +13,15 @@
59655 #include <linux/module.h>
59656 #include "internal.h"
59657
59658+extern int gr_handle_chroot_sysctl(const int op);
59659+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59660+ const int op);
59661+
59662 static const struct dentry_operations proc_sys_dentry_operations;
59663 static const struct file_operations proc_sys_file_operations;
59664-static const struct inode_operations proc_sys_inode_operations;
59665+const struct inode_operations proc_sys_inode_operations;
59666 static const struct file_operations proc_sys_dir_file_operations;
59667-static const struct inode_operations proc_sys_dir_operations;
59668+const struct inode_operations proc_sys_dir_operations;
59669
59670 void proc_sys_poll_notify(struct ctl_table_poll *poll)
59671 {
59672@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
59673
59674 err = NULL;
59675 d_set_d_op(dentry, &proc_sys_dentry_operations);
59676+
59677+ gr_handle_proc_create(dentry, inode);
59678+
59679 d_add(dentry, inode);
59680
59681 out:
59682@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59683 struct inode *inode = file_inode(filp);
59684 struct ctl_table_header *head = grab_header(inode);
59685 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
59686+ int op = write ? MAY_WRITE : MAY_READ;
59687 ssize_t error;
59688 size_t res;
59689
59690@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59691 * and won't be until we finish.
59692 */
59693 error = -EPERM;
59694- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
59695+ if (sysctl_perm(head, table, op))
59696 goto out;
59697
59698 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
59699@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
59700 if (!table->proc_handler)
59701 goto out;
59702
59703+#ifdef CONFIG_GRKERNSEC
59704+ error = -EPERM;
59705+ if (gr_handle_chroot_sysctl(op))
59706+ goto out;
59707+ dget(filp->f_path.dentry);
59708+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
59709+ dput(filp->f_path.dentry);
59710+ goto out;
59711+ }
59712+ dput(filp->f_path.dentry);
59713+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
59714+ goto out;
59715+ if (write && !capable(CAP_SYS_ADMIN))
59716+ goto out;
59717+#endif
59718+
59719 /* careful: calling conventions are nasty here */
59720 res = count;
59721 error = table->proc_handler(table, write, buf, &res, ppos);
59722@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
59723 return -ENOMEM;
59724 } else {
59725 d_set_d_op(child, &proc_sys_dentry_operations);
59726+
59727+ gr_handle_proc_create(child, inode);
59728+
59729 d_add(child, inode);
59730 }
59731 } else {
59732@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
59733 if ((*pos)++ < file->f_pos)
59734 return 0;
59735
59736+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
59737+ return 0;
59738+
59739 if (unlikely(S_ISLNK(table->mode)))
59740 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
59741 else
59742@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
59743 if (IS_ERR(head))
59744 return PTR_ERR(head);
59745
59746+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
59747+ return -ENOENT;
59748+
59749 generic_fillattr(inode, stat);
59750 if (table)
59751 stat->mode = (stat->mode & S_IFMT) | table->mode;
59752@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
59753 .llseek = generic_file_llseek,
59754 };
59755
59756-static const struct inode_operations proc_sys_inode_operations = {
59757+const struct inode_operations proc_sys_inode_operations = {
59758 .permission = proc_sys_permission,
59759 .setattr = proc_sys_setattr,
59760 .getattr = proc_sys_getattr,
59761 };
59762
59763-static const struct inode_operations proc_sys_dir_operations = {
59764+const struct inode_operations proc_sys_dir_operations = {
59765 .lookup = proc_sys_lookup,
59766 .permission = proc_sys_permission,
59767 .setattr = proc_sys_setattr,
59768@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
59769 static struct ctl_dir *new_dir(struct ctl_table_set *set,
59770 const char *name, int namelen)
59771 {
59772- struct ctl_table *table;
59773+ ctl_table_no_const *table;
59774 struct ctl_dir *new;
59775 struct ctl_node *node;
59776 char *new_name;
59777@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
59778 return NULL;
59779
59780 node = (struct ctl_node *)(new + 1);
59781- table = (struct ctl_table *)(node + 1);
59782+ table = (ctl_table_no_const *)(node + 1);
59783 new_name = (char *)(table + 2);
59784 memcpy(new_name, name, namelen);
59785 new_name[namelen] = '\0';
59786@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
59787 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
59788 struct ctl_table_root *link_root)
59789 {
59790- struct ctl_table *link_table, *entry, *link;
59791+ ctl_table_no_const *link_table, *link;
59792+ struct ctl_table *entry;
59793 struct ctl_table_header *links;
59794 struct ctl_node *node;
59795 char *link_name;
59796@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
59797 return NULL;
59798
59799 node = (struct ctl_node *)(links + 1);
59800- link_table = (struct ctl_table *)(node + nr_entries);
59801+ link_table = (ctl_table_no_const *)(node + nr_entries);
59802 link_name = (char *)&link_table[nr_entries + 1];
59803
59804 for (link = link_table, entry = table; entry->procname; link++, entry++) {
59805@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59806 struct ctl_table_header ***subheader, struct ctl_table_set *set,
59807 struct ctl_table *table)
59808 {
59809- struct ctl_table *ctl_table_arg = NULL;
59810- struct ctl_table *entry, *files;
59811+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
59812+ struct ctl_table *entry;
59813 int nr_files = 0;
59814 int nr_dirs = 0;
59815 int err = -ENOMEM;
59816@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59817 nr_files++;
59818 }
59819
59820- files = table;
59821 /* If there are mixed files and directories we need a new table */
59822 if (nr_dirs && nr_files) {
59823- struct ctl_table *new;
59824+ ctl_table_no_const *new;
59825 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
59826 GFP_KERNEL);
59827 if (!files)
59828@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
59829 /* Register everything except a directory full of subdirectories */
59830 if (nr_files || !nr_dirs) {
59831 struct ctl_table_header *header;
59832- header = __register_sysctl_table(set, path, files);
59833+ header = __register_sysctl_table(set, path, files ? files : table);
59834 if (!header) {
59835 kfree(ctl_table_arg);
59836 goto out;
59837diff --git a/fs/proc/root.c b/fs/proc/root.c
59838index 41a6ea9..23eaa92 100644
59839--- a/fs/proc/root.c
59840+++ b/fs/proc/root.c
59841@@ -182,7 +182,15 @@ void __init proc_root_init(void)
59842 #ifdef CONFIG_PROC_DEVICETREE
59843 proc_device_tree_init();
59844 #endif
59845+#ifdef CONFIG_GRKERNSEC_PROC_ADD
59846+#ifdef CONFIG_GRKERNSEC_PROC_USER
59847+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
59848+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59849+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
59850+#endif
59851+#else
59852 proc_mkdir("bus", NULL);
59853+#endif
59854 proc_sys_init();
59855 }
59856
59857diff --git a/fs/proc/self.c b/fs/proc/self.c
59858index 6b6a993..807cccc 100644
59859--- a/fs/proc/self.c
59860+++ b/fs/proc/self.c
59861@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
59862 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
59863 void *cookie)
59864 {
59865- char *s = nd_get_link(nd);
59866+ const char *s = nd_get_link(nd);
59867 if (!IS_ERR(s))
59868 kfree(s);
59869 }
59870diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
59871index 65fc60a..350cc48 100644
59872--- a/fs/proc/task_mmu.c
59873+++ b/fs/proc/task_mmu.c
59874@@ -11,12 +11,19 @@
59875 #include <linux/rmap.h>
59876 #include <linux/swap.h>
59877 #include <linux/swapops.h>
59878+#include <linux/grsecurity.h>
59879
59880 #include <asm/elf.h>
59881 #include <asm/uaccess.h>
59882 #include <asm/tlbflush.h>
59883 #include "internal.h"
59884
59885+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59886+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
59887+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
59888+ _mm->pax_flags & MF_PAX_SEGMEXEC))
59889+#endif
59890+
59891 void task_mem(struct seq_file *m, struct mm_struct *mm)
59892 {
59893 unsigned long data, text, lib, swap;
59894@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
59895 "VmExe:\t%8lu kB\n"
59896 "VmLib:\t%8lu kB\n"
59897 "VmPTE:\t%8lu kB\n"
59898- "VmSwap:\t%8lu kB\n",
59899- hiwater_vm << (PAGE_SHIFT-10),
59900+ "VmSwap:\t%8lu kB\n"
59901+
59902+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59903+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
59904+#endif
59905+
59906+ ,hiwater_vm << (PAGE_SHIFT-10),
59907 total_vm << (PAGE_SHIFT-10),
59908 mm->locked_vm << (PAGE_SHIFT-10),
59909 mm->pinned_vm << (PAGE_SHIFT-10),
59910@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
59911 data << (PAGE_SHIFT-10),
59912 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
59913 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
59914- swap << (PAGE_SHIFT-10));
59915+ swap << (PAGE_SHIFT-10)
59916+
59917+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59918+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59919+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
59920+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
59921+#else
59922+ , mm->context.user_cs_base
59923+ , mm->context.user_cs_limit
59924+#endif
59925+#endif
59926+
59927+ );
59928 }
59929
59930 unsigned long task_vsize(struct mm_struct *mm)
59931@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
59932 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
59933 }
59934
59935- /* We don't show the stack guard page in /proc/maps */
59936+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59937+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
59938+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
59939+#else
59940 start = vma->vm_start;
59941- if (stack_guard_page_start(vma, start))
59942- start += PAGE_SIZE;
59943 end = vma->vm_end;
59944- if (stack_guard_page_end(vma, end))
59945- end -= PAGE_SIZE;
59946+#endif
59947
59948 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
59949 start,
59950@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
59951 flags & VM_WRITE ? 'w' : '-',
59952 flags & VM_EXEC ? 'x' : '-',
59953 flags & VM_MAYSHARE ? 's' : 'p',
59954+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59955+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
59956+#else
59957 pgoff,
59958+#endif
59959 MAJOR(dev), MINOR(dev), ino, &len);
59960
59961 /*
59962@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
59963 */
59964 if (file) {
59965 pad_len_spaces(m, len);
59966- seq_path(m, &file->f_path, "\n");
59967+ seq_path(m, &file->f_path, "\n\\");
59968 goto done;
59969 }
59970
59971@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
59972 * Thread stack in /proc/PID/task/TID/maps or
59973 * the main process stack.
59974 */
59975- if (!is_pid || (vma->vm_start <= mm->start_stack &&
59976- vma->vm_end >= mm->start_stack)) {
59977+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
59978+ (vma->vm_start <= mm->start_stack &&
59979+ vma->vm_end >= mm->start_stack)) {
59980 name = "[stack]";
59981 } else {
59982 /* Thread stack in /proc/PID/maps */
59983@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
59984 struct proc_maps_private *priv = m->private;
59985 struct task_struct *task = priv->task;
59986
59987+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59988+ if (current->exec_id != m->exec_id) {
59989+ gr_log_badprocpid("maps");
59990+ return 0;
59991+ }
59992+#endif
59993+
59994 show_map_vma(m, vma, is_pid);
59995
59996 if (m->count < m->size) /* vma is copied successfully */
59997@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
59998 .private = &mss,
59999 };
60000
60001+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60002+ if (current->exec_id != m->exec_id) {
60003+ gr_log_badprocpid("smaps");
60004+ return 0;
60005+ }
60006+#endif
60007 memset(&mss, 0, sizeof mss);
60008- mss.vma = vma;
60009- /* mmap_sem is held in m_start */
60010- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
60011- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
60012-
60013+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60014+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
60015+#endif
60016+ mss.vma = vma;
60017+ /* mmap_sem is held in m_start */
60018+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
60019+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
60020+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60021+ }
60022+#endif
60023 show_map_vma(m, vma, is_pid);
60024
60025 seq_printf(m,
60026@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
60027 "KernelPageSize: %8lu kB\n"
60028 "MMUPageSize: %8lu kB\n"
60029 "Locked: %8lu kB\n",
60030+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60031+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
60032+#else
60033 (vma->vm_end - vma->vm_start) >> 10,
60034+#endif
60035 mss.resident >> 10,
60036 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
60037 mss.shared_clean >> 10,
60038@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
60039 int n;
60040 char buffer[50];
60041
60042+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60043+ if (current->exec_id != m->exec_id) {
60044+ gr_log_badprocpid("numa_maps");
60045+ return 0;
60046+ }
60047+#endif
60048+
60049 if (!mm)
60050 return 0;
60051
60052@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
60053 mpol_to_str(buffer, sizeof(buffer), pol);
60054 mpol_cond_put(pol);
60055
60056+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60057+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
60058+#else
60059 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
60060+#endif
60061
60062 if (file) {
60063 seq_printf(m, " file=");
60064- seq_path(m, &file->f_path, "\n\t= ");
60065+ seq_path(m, &file->f_path, "\n\t\\= ");
60066 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
60067 seq_printf(m, " heap");
60068 } else {
60069diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
60070index 56123a6..5a2f6ec 100644
60071--- a/fs/proc/task_nommu.c
60072+++ b/fs/proc/task_nommu.c
60073@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
60074 else
60075 bytes += kobjsize(mm);
60076
60077- if (current->fs && current->fs->users > 1)
60078+ if (current->fs && atomic_read(&current->fs->users) > 1)
60079 sbytes += kobjsize(current->fs);
60080 else
60081 bytes += kobjsize(current->fs);
60082@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
60083
60084 if (file) {
60085 pad_len_spaces(m, len);
60086- seq_path(m, &file->f_path, "");
60087+ seq_path(m, &file->f_path, "\n\\");
60088 } else if (mm) {
60089 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
60090
60091diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
60092index 17f7e08..e4b1529 100644
60093--- a/fs/proc/vmcore.c
60094+++ b/fs/proc/vmcore.c
60095@@ -99,9 +99,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
60096 nr_bytes = count;
60097
60098 /* If pfn is not ram, return zeros for sparse dump files */
60099- if (pfn_is_ram(pfn) == 0)
60100- memset(buf, 0, nr_bytes);
60101- else {
60102+ if (pfn_is_ram(pfn) == 0) {
60103+ if (userbuf) {
60104+ if (clear_user((char __force_user *)buf, nr_bytes))
60105+ return -EFAULT;
60106+ } else
60107+ memset(buf, 0, nr_bytes);
60108+ } else {
60109 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
60110 offset, userbuf);
60111 if (tmp < 0)
60112@@ -186,7 +190,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
60113 if (tsz > nr_bytes)
60114 tsz = nr_bytes;
60115
60116- tmp = read_from_oldmem(buffer, tsz, &start, 1);
60117+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
60118 if (tmp < 0)
60119 return tmp;
60120 buflen -= tsz;
60121diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
60122index b00fcc9..e0c6381 100644
60123--- a/fs/qnx6/qnx6.h
60124+++ b/fs/qnx6/qnx6.h
60125@@ -74,7 +74,7 @@ enum {
60126 BYTESEX_BE,
60127 };
60128
60129-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
60130+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
60131 {
60132 if (sbi->s_bytesex == BYTESEX_LE)
60133 return le64_to_cpu((__force __le64)n);
60134@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
60135 return (__force __fs64)cpu_to_be64(n);
60136 }
60137
60138-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
60139+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
60140 {
60141 if (sbi->s_bytesex == BYTESEX_LE)
60142 return le32_to_cpu((__force __le32)n);
60143diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
60144index 16e8abb..2dcf914 100644
60145--- a/fs/quota/netlink.c
60146+++ b/fs/quota/netlink.c
60147@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
60148 void quota_send_warning(struct kqid qid, dev_t dev,
60149 const char warntype)
60150 {
60151- static atomic_t seq;
60152+ static atomic_unchecked_t seq;
60153 struct sk_buff *skb;
60154 void *msg_head;
60155 int ret;
60156@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
60157 "VFS: Not enough memory to send quota warning.\n");
60158 return;
60159 }
60160- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
60161+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
60162 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
60163 if (!msg_head) {
60164 printk(KERN_ERR
60165diff --git a/fs/read_write.c b/fs/read_write.c
60166index 2cefa41..c7e2fe0 100644
60167--- a/fs/read_write.c
60168+++ b/fs/read_write.c
60169@@ -411,7 +411,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
60170
60171 old_fs = get_fs();
60172 set_fs(get_ds());
60173- p = (__force const char __user *)buf;
60174+ p = (const char __force_user *)buf;
60175 if (count > MAX_RW_COUNT)
60176 count = MAX_RW_COUNT;
60177 if (file->f_op->write)
60178diff --git a/fs/readdir.c b/fs/readdir.c
60179index fee38e0..12fdf47 100644
60180--- a/fs/readdir.c
60181+++ b/fs/readdir.c
60182@@ -17,6 +17,7 @@
60183 #include <linux/security.h>
60184 #include <linux/syscalls.h>
60185 #include <linux/unistd.h>
60186+#include <linux/namei.h>
60187
60188 #include <asm/uaccess.h>
60189
60190@@ -67,6 +68,7 @@ struct old_linux_dirent {
60191
60192 struct readdir_callback {
60193 struct old_linux_dirent __user * dirent;
60194+ struct file * file;
60195 int result;
60196 };
60197
60198@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
60199 buf->result = -EOVERFLOW;
60200 return -EOVERFLOW;
60201 }
60202+
60203+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60204+ return 0;
60205+
60206 buf->result++;
60207 dirent = buf->dirent;
60208 if (!access_ok(VERIFY_WRITE, dirent,
60209@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60210
60211 buf.result = 0;
60212 buf.dirent = dirent;
60213+ buf.file = f.file;
60214
60215 error = vfs_readdir(f.file, fillonedir, &buf);
60216 if (buf.result)
60217@@ -139,6 +146,7 @@ struct linux_dirent {
60218 struct getdents_callback {
60219 struct linux_dirent __user * current_dir;
60220 struct linux_dirent __user * previous;
60221+ struct file * file;
60222 int count;
60223 int error;
60224 };
60225@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
60226 buf->error = -EOVERFLOW;
60227 return -EOVERFLOW;
60228 }
60229+
60230+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60231+ return 0;
60232+
60233 dirent = buf->previous;
60234 if (dirent) {
60235 if (__put_user(offset, &dirent->d_off))
60236@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
60237 buf.previous = NULL;
60238 buf.count = count;
60239 buf.error = 0;
60240+ buf.file = f.file;
60241
60242 error = vfs_readdir(f.file, filldir, &buf);
60243 if (error >= 0)
60244@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
60245 struct getdents_callback64 {
60246 struct linux_dirent64 __user * current_dir;
60247 struct linux_dirent64 __user * previous;
60248+ struct file *file;
60249 int count;
60250 int error;
60251 };
60252@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
60253 buf->error = -EINVAL; /* only used if we fail.. */
60254 if (reclen > buf->count)
60255 return -EINVAL;
60256+
60257+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60258+ return 0;
60259+
60260 dirent = buf->previous;
60261 if (dirent) {
60262 if (__put_user(offset, &dirent->d_off))
60263@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60264
60265 buf.current_dir = dirent;
60266 buf.previous = NULL;
60267+ buf.file = f.file;
60268 buf.count = count;
60269 buf.error = 0;
60270
60271@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60272 error = buf.error;
60273 lastdirent = buf.previous;
60274 if (lastdirent) {
60275- typeof(lastdirent->d_off) d_off = f.file->f_pos;
60276+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
60277 if (__put_user(d_off, &lastdirent->d_off))
60278 error = -EFAULT;
60279 else
60280diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
60281index 2b7882b..1c5ef48 100644
60282--- a/fs/reiserfs/do_balan.c
60283+++ b/fs/reiserfs/do_balan.c
60284@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
60285 return;
60286 }
60287
60288- atomic_inc(&(fs_generation(tb->tb_sb)));
60289+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
60290 do_balance_starts(tb);
60291
60292 /* balance leaf returns 0 except if combining L R and S into
60293diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
60294index 1d48974..2f8f4e0 100644
60295--- a/fs/reiserfs/procfs.c
60296+++ b/fs/reiserfs/procfs.c
60297@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
60298 "SMALL_TAILS " : "NO_TAILS ",
60299 replay_only(sb) ? "REPLAY_ONLY " : "",
60300 convert_reiserfs(sb) ? "CONV " : "",
60301- atomic_read(&r->s_generation_counter),
60302+ atomic_read_unchecked(&r->s_generation_counter),
60303 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
60304 SF(s_do_balance), SF(s_unneeded_left_neighbor),
60305 SF(s_good_search_by_key_reada), SF(s_bmaps),
60306diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
60307index 157e474..65a6114 100644
60308--- a/fs/reiserfs/reiserfs.h
60309+++ b/fs/reiserfs/reiserfs.h
60310@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
60311 /* Comment? -Hans */
60312 wait_queue_head_t s_wait;
60313 /* To be obsoleted soon by per buffer seals.. -Hans */
60314- atomic_t s_generation_counter; // increased by one every time the
60315+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60316 // tree gets re-balanced
60317 unsigned long s_properties; /* File system properties. Currently holds
60318 on-disk FS format */
60319@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60320 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60321
60322 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60323-#define get_generation(s) atomic_read (&fs_generation(s))
60324+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60325 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60326 #define __fs_changed(gen,s) (gen != get_generation (s))
60327 #define fs_changed(gen,s) \
60328diff --git a/fs/select.c b/fs/select.c
60329index 8c1c96c..a0f9b6d 100644
60330--- a/fs/select.c
60331+++ b/fs/select.c
60332@@ -20,6 +20,7 @@
60333 #include <linux/export.h>
60334 #include <linux/slab.h>
60335 #include <linux/poll.h>
60336+#include <linux/security.h>
60337 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
60338 #include <linux/file.h>
60339 #include <linux/fdtable.h>
60340@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
60341 struct poll_list *walk = head;
60342 unsigned long todo = nfds;
60343
60344+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
60345 if (nfds > rlimit(RLIMIT_NOFILE))
60346 return -EINVAL;
60347
60348diff --git a/fs/seq_file.c b/fs/seq_file.c
60349index 774c1eb..b67582a 100644
60350--- a/fs/seq_file.c
60351+++ b/fs/seq_file.c
60352@@ -10,6 +10,7 @@
60353 #include <linux/seq_file.h>
60354 #include <linux/slab.h>
60355 #include <linux/cred.h>
60356+#include <linux/sched.h>
60357
60358 #include <asm/uaccess.h>
60359 #include <asm/page.h>
60360@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
60361 #ifdef CONFIG_USER_NS
60362 p->user_ns = file->f_cred->user_ns;
60363 #endif
60364+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60365+ p->exec_id = current->exec_id;
60366+#endif
60367
60368 /*
60369 * Wrappers around seq_open(e.g. swaps_open) need to be
60370@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
60371 return 0;
60372 }
60373 if (!m->buf) {
60374- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
60375+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
60376 if (!m->buf)
60377 return -ENOMEM;
60378 }
60379@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
60380 Eoverflow:
60381 m->op->stop(m, p);
60382 kfree(m->buf);
60383- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
60384+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
60385 return !m->buf ? -ENOMEM : -EAGAIN;
60386 }
60387
60388@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
60389
60390 /* grab buffer if we didn't have one */
60391 if (!m->buf) {
60392- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
60393+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
60394 if (!m->buf)
60395 goto Enomem;
60396 }
60397@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
60398 goto Fill;
60399 m->op->stop(m, p);
60400 kfree(m->buf);
60401- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
60402+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
60403 if (!m->buf)
60404 goto Enomem;
60405 m->count = 0;
60406@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
60407 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
60408 void *data)
60409 {
60410- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
60411+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
60412 int res = -ENOMEM;
60413
60414 if (op) {
60415diff --git a/fs/splice.c b/fs/splice.c
60416index d37431d..81c3044 100644
60417--- a/fs/splice.c
60418+++ b/fs/splice.c
60419@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60420 pipe_lock(pipe);
60421
60422 for (;;) {
60423- if (!pipe->readers) {
60424+ if (!atomic_read(&pipe->readers)) {
60425 send_sig(SIGPIPE, current, 0);
60426 if (!ret)
60427 ret = -EPIPE;
60428@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60429 page_nr++;
60430 ret += buf->len;
60431
60432- if (pipe->files)
60433+ if (atomic_read(&pipe->files))
60434 do_wakeup = 1;
60435
60436 if (!--spd->nr_pages)
60437@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
60438 do_wakeup = 0;
60439 }
60440
60441- pipe->waiting_writers++;
60442+ atomic_inc(&pipe->waiting_writers);
60443 pipe_wait(pipe);
60444- pipe->waiting_writers--;
60445+ atomic_dec(&pipe->waiting_writers);
60446 }
60447
60448 pipe_unlock(pipe);
60449@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
60450 old_fs = get_fs();
60451 set_fs(get_ds());
60452 /* The cast to a user pointer is valid due to the set_fs() */
60453- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
60454+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
60455 set_fs(old_fs);
60456
60457 return res;
60458@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
60459 old_fs = get_fs();
60460 set_fs(get_ds());
60461 /* The cast to a user pointer is valid due to the set_fs() */
60462- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
60463+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
60464 set_fs(old_fs);
60465
60466 return res;
60467@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
60468 goto err;
60469
60470 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
60471- vec[i].iov_base = (void __user *) page_address(page);
60472+ vec[i].iov_base = (void __force_user *) page_address(page);
60473 vec[i].iov_len = this_len;
60474 spd.pages[i] = page;
60475 spd.nr_pages++;
60476@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
60477 ops->release(pipe, buf);
60478 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
60479 pipe->nrbufs--;
60480- if (pipe->files)
60481+ if (atomic_read(&pipe->files))
60482 sd->need_wakeup = true;
60483 }
60484
60485@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
60486 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
60487 {
60488 while (!pipe->nrbufs) {
60489- if (!pipe->writers)
60490+ if (!atomic_read(&pipe->writers))
60491 return 0;
60492
60493- if (!pipe->waiting_writers && sd->num_spliced)
60494+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
60495 return 0;
60496
60497 if (sd->flags & SPLICE_F_NONBLOCK)
60498@@ -1193,7 +1193,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
60499 * out of the pipe right after the splice_to_pipe(). So set
60500 * PIPE_READERS appropriately.
60501 */
60502- pipe->readers = 1;
60503+ atomic_set(&pipe->readers, 1);
60504
60505 current->splice_pipe = pipe;
60506 }
60507@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60508 ret = -ERESTARTSYS;
60509 break;
60510 }
60511- if (!pipe->writers)
60512+ if (!atomic_read(&pipe->writers))
60513 break;
60514- if (!pipe->waiting_writers) {
60515+ if (!atomic_read(&pipe->waiting_writers)) {
60516 if (flags & SPLICE_F_NONBLOCK) {
60517 ret = -EAGAIN;
60518 break;
60519@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60520 pipe_lock(pipe);
60521
60522 while (pipe->nrbufs >= pipe->buffers) {
60523- if (!pipe->readers) {
60524+ if (!atomic_read(&pipe->readers)) {
60525 send_sig(SIGPIPE, current, 0);
60526 ret = -EPIPE;
60527 break;
60528@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
60529 ret = -ERESTARTSYS;
60530 break;
60531 }
60532- pipe->waiting_writers++;
60533+ atomic_inc(&pipe->waiting_writers);
60534 pipe_wait(pipe);
60535- pipe->waiting_writers--;
60536+ atomic_dec(&pipe->waiting_writers);
60537 }
60538
60539 pipe_unlock(pipe);
60540@@ -1854,14 +1854,14 @@ retry:
60541 pipe_double_lock(ipipe, opipe);
60542
60543 do {
60544- if (!opipe->readers) {
60545+ if (!atomic_read(&opipe->readers)) {
60546 send_sig(SIGPIPE, current, 0);
60547 if (!ret)
60548 ret = -EPIPE;
60549 break;
60550 }
60551
60552- if (!ipipe->nrbufs && !ipipe->writers)
60553+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
60554 break;
60555
60556 /*
60557@@ -1958,7 +1958,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
60558 pipe_double_lock(ipipe, opipe);
60559
60560 do {
60561- if (!opipe->readers) {
60562+ if (!atomic_read(&opipe->readers)) {
60563 send_sig(SIGPIPE, current, 0);
60564 if (!ret)
60565 ret = -EPIPE;
60566@@ -2003,7 +2003,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
60567 * return EAGAIN if we have the potential of some data in the
60568 * future, otherwise just return 0
60569 */
60570- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
60571+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
60572 ret = -EAGAIN;
60573
60574 pipe_unlock(ipipe);
60575diff --git a/fs/stat.c b/fs/stat.c
60576index 04ce1ac..a13dd1e 100644
60577--- a/fs/stat.c
60578+++ b/fs/stat.c
60579@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
60580 stat->gid = inode->i_gid;
60581 stat->rdev = inode->i_rdev;
60582 stat->size = i_size_read(inode);
60583- stat->atime = inode->i_atime;
60584- stat->mtime = inode->i_mtime;
60585+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
60586+ stat->atime = inode->i_ctime;
60587+ stat->mtime = inode->i_ctime;
60588+ } else {
60589+ stat->atime = inode->i_atime;
60590+ stat->mtime = inode->i_mtime;
60591+ }
60592 stat->ctime = inode->i_ctime;
60593 stat->blksize = (1 << inode->i_blkbits);
60594 stat->blocks = inode->i_blocks;
60595@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
60596 if (retval)
60597 return retval;
60598
60599- if (inode->i_op->getattr)
60600- return inode->i_op->getattr(path->mnt, path->dentry, stat);
60601+ if (inode->i_op->getattr) {
60602+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
60603+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
60604+ stat->atime = stat->ctime;
60605+ stat->mtime = stat->ctime;
60606+ }
60607+ return retval;
60608+ }
60609
60610 generic_fillattr(inode, stat);
60611 return 0;
60612diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
60613index 15c68f9..36a8b3e 100644
60614--- a/fs/sysfs/bin.c
60615+++ b/fs/sysfs/bin.c
60616@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
60617 return ret;
60618 }
60619
60620-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
60621- void *buf, int len, int write)
60622+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
60623+ void *buf, size_t len, int write)
60624 {
60625 struct file *file = vma->vm_file;
60626 struct bin_buffer *bb = file->private_data;
60627 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
60628- int ret;
60629+ ssize_t ret;
60630
60631 if (!bb->vm_ops)
60632 return -EINVAL;
60633diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
60634index e8e0e71..79c28ac5 100644
60635--- a/fs/sysfs/dir.c
60636+++ b/fs/sysfs/dir.c
60637@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
60638 *
60639 * Returns 31 bit hash of ns + name (so it fits in an off_t )
60640 */
60641-static unsigned int sysfs_name_hash(const void *ns, const char *name)
60642+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
60643 {
60644 unsigned long hash = init_name_hash();
60645 unsigned int len = strlen(name);
60646@@ -679,6 +679,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
60647 struct sysfs_dirent *sd;
60648 int rc;
60649
60650+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60651+ const char *parent_name = parent_sd->s_name;
60652+
60653+ mode = S_IFDIR | S_IRWXU;
60654+
60655+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
60656+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
60657+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
60658+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
60659+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60660+#endif
60661+
60662 /* allocate */
60663 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
60664 if (!sd)
60665diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
60666index 602f56d..6853db8 100644
60667--- a/fs/sysfs/file.c
60668+++ b/fs/sysfs/file.c
60669@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
60670
60671 struct sysfs_open_dirent {
60672 atomic_t refcnt;
60673- atomic_t event;
60674+ atomic_unchecked_t event;
60675 wait_queue_head_t poll;
60676 struct list_head buffers; /* goes through sysfs_buffer.list */
60677 };
60678@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
60679 if (!sysfs_get_active(attr_sd))
60680 return -ENODEV;
60681
60682- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
60683+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
60684 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
60685
60686 sysfs_put_active(attr_sd);
60687@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
60688 return -ENOMEM;
60689
60690 atomic_set(&new_od->refcnt, 0);
60691- atomic_set(&new_od->event, 1);
60692+ atomic_set_unchecked(&new_od->event, 1);
60693 init_waitqueue_head(&new_od->poll);
60694 INIT_LIST_HEAD(&new_od->buffers);
60695 goto retry;
60696@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
60697
60698 sysfs_put_active(attr_sd);
60699
60700- if (buffer->event != atomic_read(&od->event))
60701+ if (buffer->event != atomic_read_unchecked(&od->event))
60702 goto trigger;
60703
60704 return DEFAULT_POLLMASK;
60705@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
60706
60707 od = sd->s_attr.open;
60708 if (od) {
60709- atomic_inc(&od->event);
60710+ atomic_inc_unchecked(&od->event);
60711 wake_up_interruptible(&od->poll);
60712 }
60713
60714diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
60715index 8c940df..25b733e 100644
60716--- a/fs/sysfs/symlink.c
60717+++ b/fs/sysfs/symlink.c
60718@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
60719
60720 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
60721 {
60722- char *page = nd_get_link(nd);
60723+ const char *page = nd_get_link(nd);
60724 if (!IS_ERR(page))
60725 free_page((unsigned long)page);
60726 }
60727diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
60728index 69d4889..a810bd4 100644
60729--- a/fs/sysv/sysv.h
60730+++ b/fs/sysv/sysv.h
60731@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
60732 #endif
60733 }
60734
60735-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
60736+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
60737 {
60738 if (sbi->s_bytesex == BYTESEX_PDP)
60739 return PDP_swab((__force __u32)n);
60740diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
60741index e18b988..f1d4ad0f 100644
60742--- a/fs/ubifs/io.c
60743+++ b/fs/ubifs/io.c
60744@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
60745 return err;
60746 }
60747
60748-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
60749+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
60750 {
60751 int err;
60752
60753diff --git a/fs/udf/misc.c b/fs/udf/misc.c
60754index c175b4d..8f36a16 100644
60755--- a/fs/udf/misc.c
60756+++ b/fs/udf/misc.c
60757@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
60758
60759 u8 udf_tag_checksum(const struct tag *t)
60760 {
60761- u8 *data = (u8 *)t;
60762+ const u8 *data = (const u8 *)t;
60763 u8 checksum = 0;
60764 int i;
60765 for (i = 0; i < sizeof(struct tag); ++i)
60766diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
60767index 8d974c4..b82f6ec 100644
60768--- a/fs/ufs/swab.h
60769+++ b/fs/ufs/swab.h
60770@@ -22,7 +22,7 @@ enum {
60771 BYTESEX_BE
60772 };
60773
60774-static inline u64
60775+static inline u64 __intentional_overflow(-1)
60776 fs64_to_cpu(struct super_block *sbp, __fs64 n)
60777 {
60778 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
60779@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
60780 return (__force __fs64)cpu_to_be64(n);
60781 }
60782
60783-static inline u32
60784+static inline u32 __intentional_overflow(-1)
60785 fs32_to_cpu(struct super_block *sbp, __fs32 n)
60786 {
60787 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
60788diff --git a/fs/utimes.c b/fs/utimes.c
60789index f4fb7ec..3fe03c0 100644
60790--- a/fs/utimes.c
60791+++ b/fs/utimes.c
60792@@ -1,6 +1,7 @@
60793 #include <linux/compiler.h>
60794 #include <linux/file.h>
60795 #include <linux/fs.h>
60796+#include <linux/security.h>
60797 #include <linux/linkage.h>
60798 #include <linux/mount.h>
60799 #include <linux/namei.h>
60800@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
60801 goto mnt_drop_write_and_out;
60802 }
60803 }
60804+
60805+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
60806+ error = -EACCES;
60807+ goto mnt_drop_write_and_out;
60808+ }
60809+
60810 mutex_lock(&inode->i_mutex);
60811 error = notify_change(path->dentry, &newattrs);
60812 mutex_unlock(&inode->i_mutex);
60813diff --git a/fs/xattr.c b/fs/xattr.c
60814index 3377dff..4d074d9 100644
60815--- a/fs/xattr.c
60816+++ b/fs/xattr.c
60817@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
60818 return rc;
60819 }
60820
60821+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
60822+ssize_t
60823+pax_getxattr(struct dentry *dentry, void *value, size_t size)
60824+{
60825+ struct inode *inode = dentry->d_inode;
60826+ ssize_t error;
60827+
60828+ error = inode_permission(inode, MAY_EXEC);
60829+ if (error)
60830+ return error;
60831+
60832+ if (inode->i_op->getxattr)
60833+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
60834+ else
60835+ error = -EOPNOTSUPP;
60836+
60837+ return error;
60838+}
60839+EXPORT_SYMBOL(pax_getxattr);
60840+#endif
60841+
60842 ssize_t
60843 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
60844 {
60845@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
60846 * Extended attribute SET operations
60847 */
60848 static long
60849-setxattr(struct dentry *d, const char __user *name, const void __user *value,
60850+setxattr(struct path *path, const char __user *name, const void __user *value,
60851 size_t size, int flags)
60852 {
60853 int error;
60854@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
60855 posix_acl_fix_xattr_from_user(kvalue, size);
60856 }
60857
60858- error = vfs_setxattr(d, kname, kvalue, size, flags);
60859+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
60860+ error = -EACCES;
60861+ goto out;
60862+ }
60863+
60864+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
60865 out:
60866 if (vvalue)
60867 vfree(vvalue);
60868@@ -377,7 +403,7 @@ retry:
60869 return error;
60870 error = mnt_want_write(path.mnt);
60871 if (!error) {
60872- error = setxattr(path.dentry, name, value, size, flags);
60873+ error = setxattr(&path, name, value, size, flags);
60874 mnt_drop_write(path.mnt);
60875 }
60876 path_put(&path);
60877@@ -401,7 +427,7 @@ retry:
60878 return error;
60879 error = mnt_want_write(path.mnt);
60880 if (!error) {
60881- error = setxattr(path.dentry, name, value, size, flags);
60882+ error = setxattr(&path, name, value, size, flags);
60883 mnt_drop_write(path.mnt);
60884 }
60885 path_put(&path);
60886@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
60887 const void __user *,value, size_t, size, int, flags)
60888 {
60889 struct fd f = fdget(fd);
60890- struct dentry *dentry;
60891 int error = -EBADF;
60892
60893 if (!f.file)
60894 return error;
60895- dentry = f.file->f_path.dentry;
60896- audit_inode(NULL, dentry, 0);
60897+ audit_inode(NULL, f.file->f_path.dentry, 0);
60898 error = mnt_want_write_file(f.file);
60899 if (!error) {
60900- error = setxattr(dentry, name, value, size, flags);
60901+ error = setxattr(&f.file->f_path, name, value, size, flags);
60902 mnt_drop_write_file(f.file);
60903 }
60904 fdput(f);
60905diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
60906index 9fbea87..6b19972 100644
60907--- a/fs/xattr_acl.c
60908+++ b/fs/xattr_acl.c
60909@@ -76,8 +76,8 @@ struct posix_acl *
60910 posix_acl_from_xattr(struct user_namespace *user_ns,
60911 const void *value, size_t size)
60912 {
60913- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
60914- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
60915+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
60916+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
60917 int count;
60918 struct posix_acl *acl;
60919 struct posix_acl_entry *acl_e;
60920diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
60921index 8904284..ee0e14b 100644
60922--- a/fs/xfs/xfs_bmap.c
60923+++ b/fs/xfs/xfs_bmap.c
60924@@ -765,7 +765,7 @@ xfs_bmap_validate_ret(
60925
60926 #else
60927 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
60928-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
60929+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
60930 #endif /* DEBUG */
60931
60932 /*
60933diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
60934index 6157424..ac98f6d 100644
60935--- a/fs/xfs/xfs_dir2_sf.c
60936+++ b/fs/xfs/xfs_dir2_sf.c
60937@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
60938 }
60939
60940 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
60941- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
60942+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
60943+ char name[sfep->namelen];
60944+ memcpy(name, sfep->name, sfep->namelen);
60945+ if (filldir(dirent, name, sfep->namelen,
60946+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
60947+ *offset = off & 0x7fffffff;
60948+ return 0;
60949+ }
60950+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
60951 off & 0x7fffffff, ino, DT_UNKNOWN)) {
60952 *offset = off & 0x7fffffff;
60953 return 0;
60954diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
60955index 5e99968..45bd327 100644
60956--- a/fs/xfs/xfs_ioctl.c
60957+++ b/fs/xfs/xfs_ioctl.c
60958@@ -127,7 +127,7 @@ xfs_find_handle(
60959 }
60960
60961 error = -EFAULT;
60962- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
60963+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
60964 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
60965 goto out_put;
60966
60967diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
60968index ca9ecaa..60100c7 100644
60969--- a/fs/xfs/xfs_iops.c
60970+++ b/fs/xfs/xfs_iops.c
60971@@ -395,7 +395,7 @@ xfs_vn_put_link(
60972 struct nameidata *nd,
60973 void *p)
60974 {
60975- char *s = nd_get_link(nd);
60976+ const char *s = nd_get_link(nd);
60977
60978 if (!IS_ERR(s))
60979 kfree(s);
60980diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
60981new file mode 100644
60982index 0000000..76e84b9
60983--- /dev/null
60984+++ b/grsecurity/Kconfig
60985@@ -0,0 +1,1063 @@
60986+#
60987+# grecurity configuration
60988+#
60989+menu "Memory Protections"
60990+depends on GRKERNSEC
60991+
60992+config GRKERNSEC_KMEM
60993+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
60994+ default y if GRKERNSEC_CONFIG_AUTO
60995+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
60996+ help
60997+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
60998+ be written to or read from to modify or leak the contents of the running
60999+ kernel. /dev/port will also not be allowed to be opened and support
61000+ for /dev/cpu/*/msr will be removed. If you have module
61001+ support disabled, enabling this will close up five ways that are
61002+ currently used to insert malicious code into the running kernel.
61003+
61004+ Even with all these features enabled, we still highly recommend that
61005+ you use the RBAC system, as it is still possible for an attacker to
61006+ modify the running kernel through privileged I/O granted by ioperm/iopl.
61007+
61008+ If you are not using XFree86, you may be able to stop this additional
61009+ case by enabling the 'Disable privileged I/O' option. Though nothing
61010+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
61011+ but only to video memory, which is the only writing we allow in this
61012+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
61013+ not be allowed to mprotect it with PROT_WRITE later.
61014+ Enabling this feature will prevent the "cpupower" and "powertop" tools
61015+ from working.
61016+
61017+ It is highly recommended that you say Y here if you meet all the
61018+ conditions above.
61019+
61020+config GRKERNSEC_VM86
61021+ bool "Restrict VM86 mode"
61022+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
61023+ depends on X86_32
61024+
61025+ help
61026+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
61027+ make use of a special execution mode on 32bit x86 processors called
61028+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
61029+ video cards and will still work with this option enabled. The purpose
61030+ of the option is to prevent exploitation of emulation errors in
61031+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
61032+ Nearly all users should be able to enable this option.
61033+
61034+config GRKERNSEC_IO
61035+ bool "Disable privileged I/O"
61036+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
61037+ depends on X86
61038+ select RTC_CLASS
61039+ select RTC_INTF_DEV
61040+ select RTC_DRV_CMOS
61041+
61042+ help
61043+ If you say Y here, all ioperm and iopl calls will return an error.
61044+ Ioperm and iopl can be used to modify the running kernel.
61045+ Unfortunately, some programs need this access to operate properly,
61046+ the most notable of which are XFree86 and hwclock. hwclock can be
61047+ remedied by having RTC support in the kernel, so real-time
61048+ clock support is enabled if this option is enabled, to ensure
61049+ that hwclock operates correctly. XFree86 still will not
61050+ operate correctly with this option enabled, so DO NOT CHOOSE Y
61051+ IF YOU USE XFree86. If you use XFree86 and you still want to
61052+ protect your kernel against modification, use the RBAC system.
61053+
61054+config GRKERNSEC_JIT_HARDEN
61055+ bool "Harden BPF JIT against spray attacks"
61056+ default y if GRKERNSEC_CONFIG_AUTO
61057+ depends on BPF_JIT
61058+ help
61059+ If you say Y here, the native code generated by the kernel's Berkeley
61060+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
61061+ attacks that attempt to fit attacker-beneficial instructions in
61062+ 32bit immediate fields of JIT-generated native instructions. The
61063+ attacker will generally aim to cause an unintended instruction sequence
61064+ of JIT-generated native code to execute by jumping into the middle of
61065+ a generated instruction. This feature effectively randomizes the 32bit
61066+ immediate constants present in the generated code to thwart such attacks.
61067+
61068+ If you're using KERNEXEC, it's recommended that you enable this option
61069+ to supplement the hardening of the kernel.
61070+
61071+config GRKERNSEC_PERF_HARDEN
61072+ bool "Disable unprivileged PERF_EVENTS usage by default"
61073+ default y if GRKERNSEC_CONFIG_AUTO
61074+ depends on PERF_EVENTS
61075+ help
61076+ If you say Y here, the range of acceptable values for the
61077+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
61078+ default to a new value: 3. When the sysctl is set to this value, no
61079+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
61080+
61081+ Though PERF_EVENTS can be used legitimately for performance monitoring
61082+ and low-level application profiling, it is forced on regardless of
61083+ configuration, has been at fault for several vulnerabilities, and
61084+ creates new opportunities for side channels and other information leaks.
61085+
61086+ This feature puts PERF_EVENTS into a secure default state and permits
61087+ the administrator to change out of it temporarily if unprivileged
61088+ application profiling is needed.
61089+
61090+config GRKERNSEC_RAND_THREADSTACK
61091+ bool "Insert random gaps between thread stacks"
61092+ default y if GRKERNSEC_CONFIG_AUTO
61093+ depends on PAX_RANDMMAP && !PPC
61094+ help
61095+ If you say Y here, a random-sized gap will be enforced between allocated
61096+ thread stacks. Glibc's NPTL and other threading libraries that
61097+ pass MAP_STACK to the kernel for thread stack allocation are supported.
61098+ The implementation currently provides 8 bits of entropy for the gap.
61099+
61100+ Many distributions do not compile threaded remote services with the
61101+ -fstack-check argument to GCC, causing the variable-sized stack-based
61102+ allocator, alloca(), to not probe the stack on allocation. This
61103+ permits an unbounded alloca() to skip over any guard page and potentially
61104+ modify another thread's stack reliably. An enforced random gap
61105+ reduces the reliability of such an attack and increases the chance
61106+ that such a read/write to another thread's stack instead lands in
61107+ an unmapped area, causing a crash and triggering grsecurity's
61108+ anti-bruteforcing logic.
61109+
61110+config GRKERNSEC_PROC_MEMMAP
61111+ bool "Harden ASLR against information leaks and entropy reduction"
61112+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
61113+ depends on PAX_NOEXEC || PAX_ASLR
61114+ help
61115+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
61116+ give no information about the addresses of its mappings if
61117+ PaX features that rely on random addresses are enabled on the task.
61118+ In addition to sanitizing this information and disabling other
61119+ dangerous sources of information, this option causes reads of sensitive
61120+ /proc/<pid> entries where the file descriptor was opened in a different
61121+ task than the one performing the read. Such attempts are logged.
61122+ This option also limits argv/env strings for suid/sgid binaries
61123+ to 512KB to prevent a complete exhaustion of the stack entropy provided
61124+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
61125+ binaries to prevent alternative mmap layouts from being abused.
61126+
61127+ If you use PaX it is essential that you say Y here as it closes up
61128+ several holes that make full ASLR useless locally.
61129+
61130+config GRKERNSEC_BRUTE
61131+ bool "Deter exploit bruteforcing"
61132+ default y if GRKERNSEC_CONFIG_AUTO
61133+ help
61134+ If you say Y here, attempts to bruteforce exploits against forking
61135+ daemons such as apache or sshd, as well as against suid/sgid binaries
61136+ will be deterred. When a child of a forking daemon is killed by PaX
61137+ or crashes due to an illegal instruction or other suspicious signal,
61138+ the parent process will be delayed 30 seconds upon every subsequent
61139+ fork until the administrator is able to assess the situation and
61140+ restart the daemon.
61141+ In the suid/sgid case, the attempt is logged, the user has all their
61142+ existing instances of the suid/sgid binary terminated and will
61143+ be unable to execute any suid/sgid binaries for 15 minutes.
61144+
61145+ It is recommended that you also enable signal logging in the auditing
61146+ section so that logs are generated when a process triggers a suspicious
61147+ signal.
61148+ If the sysctl option is enabled, a sysctl option with name
61149+ "deter_bruteforce" is created.
61150+
61151+
61152+config GRKERNSEC_MODHARDEN
61153+ bool "Harden module auto-loading"
61154+ default y if GRKERNSEC_CONFIG_AUTO
61155+ depends on MODULES
61156+ help
61157+ If you say Y here, module auto-loading in response to use of some
61158+ feature implemented by an unloaded module will be restricted to
61159+ root users. Enabling this option helps defend against attacks
61160+ by unprivileged users who abuse the auto-loading behavior to
61161+ cause a vulnerable module to load that is then exploited.
61162+
61163+ If this option prevents a legitimate use of auto-loading for a
61164+ non-root user, the administrator can execute modprobe manually
61165+ with the exact name of the module mentioned in the alert log.
61166+ Alternatively, the administrator can add the module to the list
61167+ of modules loaded at boot by modifying init scripts.
61168+
61169+ Modification of init scripts will most likely be needed on
61170+ Ubuntu servers with encrypted home directory support enabled,
61171+ as the first non-root user logging in will cause the ecb(aes),
61172+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
61173+
61174+config GRKERNSEC_HIDESYM
61175+ bool "Hide kernel symbols"
61176+ default y if GRKERNSEC_CONFIG_AUTO
61177+ select PAX_USERCOPY_SLABS
61178+ help
61179+ If you say Y here, getting information on loaded modules, and
61180+ displaying all kernel symbols through a syscall will be restricted
61181+ to users with CAP_SYS_MODULE. For software compatibility reasons,
61182+ /proc/kallsyms will be restricted to the root user. The RBAC
61183+ system can hide that entry even from root.
61184+
61185+ This option also prevents leaking of kernel addresses through
61186+ several /proc entries.
61187+
61188+ Note that this option is only effective provided the following
61189+ conditions are met:
61190+ 1) The kernel using grsecurity is not precompiled by some distribution
61191+ 2) You have also enabled GRKERNSEC_DMESG
61192+ 3) You are using the RBAC system and hiding other files such as your
61193+ kernel image and System.map. Alternatively, enabling this option
61194+ causes the permissions on /boot, /lib/modules, and the kernel
61195+ source directory to change at compile time to prevent
61196+ reading by non-root users.
61197+ If the above conditions are met, this option will aid in providing a
61198+ useful protection against local kernel exploitation of overflows
61199+ and arbitrary read/write vulnerabilities.
61200+
61201+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
61202+ in addition to this feature.
61203+
61204+config GRKERNSEC_KERN_LOCKOUT
61205+ bool "Active kernel exploit response"
61206+ default y if GRKERNSEC_CONFIG_AUTO
61207+ depends on X86 || ARM || PPC || SPARC
61208+ help
61209+ If you say Y here, when a PaX alert is triggered due to suspicious
61210+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
61211+ or an OOPS occurs due to bad memory accesses, instead of just
61212+ terminating the offending process (and potentially allowing
61213+ a subsequent exploit from the same user), we will take one of two
61214+ actions:
61215+ If the user was root, we will panic the system
61216+ If the user was non-root, we will log the attempt, terminate
61217+ all processes owned by the user, then prevent them from creating
61218+ any new processes until the system is restarted
61219+ This deters repeated kernel exploitation/bruteforcing attempts
61220+ and is useful for later forensics.
61221+
61222+endmenu
61223+menu "Role Based Access Control Options"
61224+depends on GRKERNSEC
61225+
61226+config GRKERNSEC_RBAC_DEBUG
61227+ bool
61228+
61229+config GRKERNSEC_NO_RBAC
61230+ bool "Disable RBAC system"
61231+ help
61232+ If you say Y here, the /dev/grsec device will be removed from the kernel,
61233+ preventing the RBAC system from being enabled. You should only say Y
61234+ here if you have no intention of using the RBAC system, so as to prevent
61235+ an attacker with root access from misusing the RBAC system to hide files
61236+ and processes when loadable module support and /dev/[k]mem have been
61237+ locked down.
61238+
61239+config GRKERNSEC_ACL_HIDEKERN
61240+ bool "Hide kernel processes"
61241+ help
61242+ If you say Y here, all kernel threads will be hidden to all
61243+ processes but those whose subject has the "view hidden processes"
61244+ flag.
61245+
61246+config GRKERNSEC_ACL_MAXTRIES
61247+ int "Maximum tries before password lockout"
61248+ default 3
61249+ help
61250+ This option enforces the maximum number of times a user can attempt
61251+ to authorize themselves with the grsecurity RBAC system before being
61252+ denied the ability to attempt authorization again for a specified time.
61253+ The lower the number, the harder it will be to brute-force a password.
61254+
61255+config GRKERNSEC_ACL_TIMEOUT
61256+ int "Time to wait after max password tries, in seconds"
61257+ default 30
61258+ help
61259+ This option specifies the time the user must wait after attempting to
61260+ authorize to the RBAC system with the maximum number of invalid
61261+ passwords. The higher the number, the harder it will be to brute-force
61262+ a password.
61263+
61264+endmenu
61265+menu "Filesystem Protections"
61266+depends on GRKERNSEC
61267+
61268+config GRKERNSEC_PROC
61269+ bool "Proc restrictions"
61270+ default y if GRKERNSEC_CONFIG_AUTO
61271+ help
61272+ If you say Y here, the permissions of the /proc filesystem
61273+ will be altered to enhance system security and privacy. You MUST
61274+ choose either a user only restriction or a user and group restriction.
61275+ Depending upon the option you choose, you can either restrict users to
61276+ see only the processes they themselves run, or choose a group that can
61277+ view all processes and files normally restricted to root if you choose
61278+ the "restrict to user only" option. NOTE: If you're running identd or
61279+ ntpd as a non-root user, you will have to run it as the group you
61280+ specify here.
61281+
61282+config GRKERNSEC_PROC_USER
61283+ bool "Restrict /proc to user only"
61284+ depends on GRKERNSEC_PROC
61285+ help
61286+ If you say Y here, non-root users will only be able to view their own
61287+ processes, and restricts them from viewing network-related information,
61288+ and viewing kernel symbol and module information.
61289+
61290+config GRKERNSEC_PROC_USERGROUP
61291+ bool "Allow special group"
61292+ default y if GRKERNSEC_CONFIG_AUTO
61293+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
61294+ help
61295+ If you say Y here, you will be able to select a group that will be
61296+ able to view all processes and network-related information. If you've
61297+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
61298+ remain hidden. This option is useful if you want to run identd as
61299+ a non-root user. The group you select may also be chosen at boot time
61300+ via "grsec_proc_gid=" on the kernel commandline.
61301+
61302+config GRKERNSEC_PROC_GID
61303+ int "GID for special group"
61304+ depends on GRKERNSEC_PROC_USERGROUP
61305+ default 1001
61306+
61307+config GRKERNSEC_PROC_ADD
61308+ bool "Additional restrictions"
61309+ default y if GRKERNSEC_CONFIG_AUTO
61310+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
61311+ help
61312+ If you say Y here, additional restrictions will be placed on
61313+ /proc that keep normal users from viewing device information and
61314+ slabinfo information that could be useful for exploits.
61315+
61316+config GRKERNSEC_LINK
61317+ bool "Linking restrictions"
61318+ default y if GRKERNSEC_CONFIG_AUTO
61319+ help
61320+ If you say Y here, /tmp race exploits will be prevented, since users
61321+ will no longer be able to follow symlinks owned by other users in
61322+ world-writable +t directories (e.g. /tmp), unless the owner of the
61323+ symlink is the owner of the directory. users will also not be
61324+ able to hardlink to files they do not own. If the sysctl option is
61325+ enabled, a sysctl option with name "linking_restrictions" is created.
61326+
61327+config GRKERNSEC_SYMLINKOWN
61328+ bool "Kernel-enforced SymlinksIfOwnerMatch"
61329+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
61330+ help
61331+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
61332+ that prevents it from being used as a security feature. As Apache
61333+ verifies the symlink by performing a stat() against the target of
61334+ the symlink before it is followed, an attacker can setup a symlink
61335+ to point to a same-owned file, then replace the symlink with one
61336+ that targets another user's file just after Apache "validates" the
61337+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
61338+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
61339+ will be in place for the group you specify. If the sysctl option
61340+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
61341+ created.
61342+
61343+config GRKERNSEC_SYMLINKOWN_GID
61344+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
61345+ depends on GRKERNSEC_SYMLINKOWN
61346+ default 1006
61347+ help
61348+ Setting this GID determines what group kernel-enforced
61349+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
61350+ is enabled, a sysctl option with name "symlinkown_gid" is created.
61351+
61352+config GRKERNSEC_FIFO
61353+ bool "FIFO restrictions"
61354+ default y if GRKERNSEC_CONFIG_AUTO
61355+ help
61356+ If you say Y here, users will not be able to write to FIFOs they don't
61357+ own in world-writable +t directories (e.g. /tmp), unless the owner of
61358+ the FIFO is the same owner of the directory it's held in. If the sysctl
61359+ option is enabled, a sysctl option with name "fifo_restrictions" is
61360+ created.
61361+
61362+config GRKERNSEC_SYSFS_RESTRICT
61363+ bool "Sysfs/debugfs restriction"
61364+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
61365+ depends on SYSFS
61366+ help
61367+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
61368+ any filesystem normally mounted under it (e.g. debugfs) will be
61369+ mostly accessible only by root. These filesystems generally provide access
61370+ to hardware and debug information that isn't appropriate for unprivileged
61371+ users of the system. Sysfs and debugfs have also become a large source
61372+ of new vulnerabilities, ranging from infoleaks to local compromise.
61373+ There has been very little oversight with an eye toward security involved
61374+ in adding new exporters of information to these filesystems, so their
61375+ use is discouraged.
61376+ For reasons of compatibility, a few directories have been whitelisted
61377+ for access by non-root users:
61378+ /sys/fs/selinux
61379+ /sys/fs/fuse
61380+ /sys/devices/system/cpu
61381+
61382+config GRKERNSEC_ROFS
61383+ bool "Runtime read-only mount protection"
61384+ help
61385+ If you say Y here, a sysctl option with name "romount_protect" will
61386+ be created. By setting this option to 1 at runtime, filesystems
61387+ will be protected in the following ways:
61388+ * No new writable mounts will be allowed
61389+ * Existing read-only mounts won't be able to be remounted read/write
61390+ * Write operations will be denied on all block devices
61391+ This option acts independently of grsec_lock: once it is set to 1,
61392+ it cannot be turned off. Therefore, please be mindful of the resulting
61393+ behavior if this option is enabled in an init script on a read-only
61394+ filesystem. This feature is mainly intended for secure embedded systems.
61395+
61396+config GRKERNSEC_DEVICE_SIDECHANNEL
61397+ bool "Eliminate stat/notify-based device sidechannels"
61398+ default y if GRKERNSEC_CONFIG_AUTO
61399+ help
61400+ If you say Y here, timing analyses on block or character
61401+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
61402+ will be thwarted for unprivileged users. If a process without
61403+ CAP_MKNOD stats such a device, the last access and last modify times
61404+ will match the device's create time. No access or modify events
61405+ will be triggered through inotify/dnotify/fanotify for such devices.
61406+ This feature will prevent attacks that may at a minimum
61407+ allow an attacker to determine the administrator's password length.
61408+
61409+config GRKERNSEC_CHROOT
61410+ bool "Chroot jail restrictions"
61411+ default y if GRKERNSEC_CONFIG_AUTO
61412+ help
61413+ If you say Y here, you will be able to choose several options that will
61414+ make breaking out of a chrooted jail much more difficult. If you
61415+ encounter no software incompatibilities with the following options, it
61416+ is recommended that you enable each one.
61417+
61418+config GRKERNSEC_CHROOT_MOUNT
61419+ bool "Deny mounts"
61420+ default y if GRKERNSEC_CONFIG_AUTO
61421+ depends on GRKERNSEC_CHROOT
61422+ help
61423+ If you say Y here, processes inside a chroot will not be able to
61424+ mount or remount filesystems. If the sysctl option is enabled, a
61425+ sysctl option with name "chroot_deny_mount" is created.
61426+
61427+config GRKERNSEC_CHROOT_DOUBLE
61428+ bool "Deny double-chroots"
61429+ default y if GRKERNSEC_CONFIG_AUTO
61430+ depends on GRKERNSEC_CHROOT
61431+ help
61432+ If you say Y here, processes inside a chroot will not be able to chroot
61433+ again outside the chroot. This is a widely used method of breaking
61434+ out of a chroot jail and should not be allowed. If the sysctl
61435+ option is enabled, a sysctl option with name
61436+ "chroot_deny_chroot" is created.
61437+
61438+config GRKERNSEC_CHROOT_PIVOT
61439+ bool "Deny pivot_root in chroot"
61440+ default y if GRKERNSEC_CONFIG_AUTO
61441+ depends on GRKERNSEC_CHROOT
61442+ help
61443+ If you say Y here, processes inside a chroot will not be able to use
61444+ a function called pivot_root() that was introduced in Linux 2.3.41. It
61445+ works similar to chroot in that it changes the root filesystem. This
61446+ function could be misused in a chrooted process to attempt to break out
61447+ of the chroot, and therefore should not be allowed. If the sysctl
61448+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
61449+ created.
61450+
61451+config GRKERNSEC_CHROOT_CHDIR
61452+ bool "Enforce chdir(\"/\") on all chroots"
61453+ default y if GRKERNSEC_CONFIG_AUTO
61454+ depends on GRKERNSEC_CHROOT
61455+ help
61456+ If you say Y here, the current working directory of all newly-chrooted
61457+ applications will be set to the the root directory of the chroot.
61458+ The man page on chroot(2) states:
61459+ Note that this call does not change the current working
61460+ directory, so that `.' can be outside the tree rooted at
61461+ `/'. In particular, the super-user can escape from a
61462+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
61463+
61464+ It is recommended that you say Y here, since it's not known to break
61465+ any software. If the sysctl option is enabled, a sysctl option with
61466+ name "chroot_enforce_chdir" is created.
61467+
61468+config GRKERNSEC_CHROOT_CHMOD
61469+ bool "Deny (f)chmod +s"
61470+ default y if GRKERNSEC_CONFIG_AUTO
61471+ depends on GRKERNSEC_CHROOT
61472+ help
61473+ If you say Y here, processes inside a chroot will not be able to chmod
61474+ or fchmod files to make them have suid or sgid bits. This protects
61475+ against another published method of breaking a chroot. If the sysctl
61476+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
61477+ created.
61478+
61479+config GRKERNSEC_CHROOT_FCHDIR
61480+ bool "Deny fchdir out of chroot"
61481+ default y if GRKERNSEC_CONFIG_AUTO
61482+ depends on GRKERNSEC_CHROOT
61483+ help
61484+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
61485+ to a file descriptor of the chrooting process that points to a directory
61486+ outside the filesystem will be stopped. If the sysctl option
61487+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
61488+
61489+config GRKERNSEC_CHROOT_MKNOD
61490+ bool "Deny mknod"
61491+ default y if GRKERNSEC_CONFIG_AUTO
61492+ depends on GRKERNSEC_CHROOT
61493+ help
61494+ If you say Y here, processes inside a chroot will not be allowed to
61495+ mknod. The problem with using mknod inside a chroot is that it
61496+ would allow an attacker to create a device entry that is the same
61497+ as one on the physical root of your system, which could range from
61498+ anything from the console device to a device for your harddrive (which
61499+ they could then use to wipe the drive or steal data). It is recommended
61500+ that you say Y here, unless you run into software incompatibilities.
61501+ If the sysctl option is enabled, a sysctl option with name
61502+ "chroot_deny_mknod" is created.
61503+
61504+config GRKERNSEC_CHROOT_SHMAT
61505+ bool "Deny shmat() out of chroot"
61506+ default y if GRKERNSEC_CONFIG_AUTO
61507+ depends on GRKERNSEC_CHROOT
61508+ help
61509+ If you say Y here, processes inside a chroot will not be able to attach
61510+ to shared memory segments that were created outside of the chroot jail.
61511+ It is recommended that you say Y here. If the sysctl option is enabled,
61512+ a sysctl option with name "chroot_deny_shmat" is created.
61513+
61514+config GRKERNSEC_CHROOT_UNIX
61515+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
61516+ default y if GRKERNSEC_CONFIG_AUTO
61517+ depends on GRKERNSEC_CHROOT
61518+ help
61519+ If you say Y here, processes inside a chroot will not be able to
61520+ connect to abstract (meaning not belonging to a filesystem) Unix
61521+ domain sockets that were bound outside of a chroot. It is recommended
61522+ that you say Y here. If the sysctl option is enabled, a sysctl option
61523+ with name "chroot_deny_unix" is created.
61524+
61525+config GRKERNSEC_CHROOT_FINDTASK
61526+ bool "Protect outside processes"
61527+ default y if GRKERNSEC_CONFIG_AUTO
61528+ depends on GRKERNSEC_CHROOT
61529+ help
61530+ If you say Y here, processes inside a chroot will not be able to
61531+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
61532+ getsid, or view any process outside of the chroot. If the sysctl
61533+ option is enabled, a sysctl option with name "chroot_findtask" is
61534+ created.
61535+
61536+config GRKERNSEC_CHROOT_NICE
61537+ bool "Restrict priority changes"
61538+ default y if GRKERNSEC_CONFIG_AUTO
61539+ depends on GRKERNSEC_CHROOT
61540+ help
61541+ If you say Y here, processes inside a chroot will not be able to raise
61542+ the priority of processes in the chroot, or alter the priority of
61543+ processes outside the chroot. This provides more security than simply
61544+ removing CAP_SYS_NICE from the process' capability set. If the
61545+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
61546+ is created.
61547+
61548+config GRKERNSEC_CHROOT_SYSCTL
61549+ bool "Deny sysctl writes"
61550+ default y if GRKERNSEC_CONFIG_AUTO
61551+ depends on GRKERNSEC_CHROOT
61552+ help
61553+ If you say Y here, an attacker in a chroot will not be able to
61554+ write to sysctl entries, either by sysctl(2) or through a /proc
61555+ interface. It is strongly recommended that you say Y here. If the
61556+ sysctl option is enabled, a sysctl option with name
61557+ "chroot_deny_sysctl" is created.
61558+
61559+config GRKERNSEC_CHROOT_CAPS
61560+ bool "Capability restrictions"
61561+ default y if GRKERNSEC_CONFIG_AUTO
61562+ depends on GRKERNSEC_CHROOT
61563+ help
61564+ If you say Y here, the capabilities on all processes within a
61565+ chroot jail will be lowered to stop module insertion, raw i/o,
61566+ system and net admin tasks, rebooting the system, modifying immutable
61567+ files, modifying IPC owned by another, and changing the system time.
61568+ This is left an option because it can break some apps. Disable this
61569+ if your chrooted apps are having problems performing those kinds of
61570+ tasks. If the sysctl option is enabled, a sysctl option with
61571+ name "chroot_caps" is created.
61572+
61573+config GRKERNSEC_CHROOT_INITRD
61574+ bool "Exempt initrd tasks from restrictions"
61575+ default y if GRKERNSEC_CONFIG_AUTO
61576+ depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
61577+ help
61578+ If you say Y here, tasks started prior to init will be exempted from
61579+ grsecurity's chroot restrictions. This option is mainly meant to
61580+ resolve Plymouth's performing privileged operations unnecessarily
61581+ in a chroot.
61582+
61583+endmenu
61584+menu "Kernel Auditing"
61585+depends on GRKERNSEC
61586+
61587+config GRKERNSEC_AUDIT_GROUP
61588+ bool "Single group for auditing"
61589+ help
61590+ If you say Y here, the exec and chdir logging features will only operate
61591+ on a group you specify. This option is recommended if you only want to
61592+ watch certain users instead of having a large amount of logs from the
61593+ entire system. If the sysctl option is enabled, a sysctl option with
61594+ name "audit_group" is created.
61595+
61596+config GRKERNSEC_AUDIT_GID
61597+ int "GID for auditing"
61598+ depends on GRKERNSEC_AUDIT_GROUP
61599+ default 1007
61600+
61601+config GRKERNSEC_EXECLOG
61602+ bool "Exec logging"
61603+ help
61604+ If you say Y here, all execve() calls will be logged (since the
61605+ other exec*() calls are frontends to execve(), all execution
61606+ will be logged). Useful for shell-servers that like to keep track
61607+ of their users. If the sysctl option is enabled, a sysctl option with
61608+ name "exec_logging" is created.
61609+ WARNING: This option when enabled will produce a LOT of logs, especially
61610+ on an active system.
61611+
61612+config GRKERNSEC_RESLOG
61613+ bool "Resource logging"
61614+ default y if GRKERNSEC_CONFIG_AUTO
61615+ help
61616+ If you say Y here, all attempts to overstep resource limits will
61617+ be logged with the resource name, the requested size, and the current
61618+ limit. It is highly recommended that you say Y here. If the sysctl
61619+ option is enabled, a sysctl option with name "resource_logging" is
61620+ created. If the RBAC system is enabled, the sysctl value is ignored.
61621+
61622+config GRKERNSEC_CHROOT_EXECLOG
61623+ bool "Log execs within chroot"
61624+ help
61625+ If you say Y here, all executions inside a chroot jail will be logged
61626+ to syslog. This can cause a large amount of logs if certain
61627+ applications (eg. djb's daemontools) are installed on the system, and
61628+ is therefore left as an option. If the sysctl option is enabled, a
61629+ sysctl option with name "chroot_execlog" is created.
61630+
61631+config GRKERNSEC_AUDIT_PTRACE
61632+ bool "Ptrace logging"
61633+ help
61634+ If you say Y here, all attempts to attach to a process via ptrace
61635+ will be logged. If the sysctl option is enabled, a sysctl option
61636+ with name "audit_ptrace" is created.
61637+
61638+config GRKERNSEC_AUDIT_CHDIR
61639+ bool "Chdir logging"
61640+ help
61641+ If you say Y here, all chdir() calls will be logged. If the sysctl
61642+ option is enabled, a sysctl option with name "audit_chdir" is created.
61643+
61644+config GRKERNSEC_AUDIT_MOUNT
61645+ bool "(Un)Mount logging"
61646+ help
61647+ If you say Y here, all mounts and unmounts will be logged. If the
61648+ sysctl option is enabled, a sysctl option with name "audit_mount" is
61649+ created.
61650+
61651+config GRKERNSEC_SIGNAL
61652+ bool "Signal logging"
61653+ default y if GRKERNSEC_CONFIG_AUTO
61654+ help
61655+ If you say Y here, certain important signals will be logged, such as
61656+ SIGSEGV, which will as a result inform you of when a error in a program
61657+ occurred, which in some cases could mean a possible exploit attempt.
61658+ If the sysctl option is enabled, a sysctl option with name
61659+ "signal_logging" is created.
61660+
61661+config GRKERNSEC_FORKFAIL
61662+ bool "Fork failure logging"
61663+ help
61664+ If you say Y here, all failed fork() attempts will be logged.
61665+ This could suggest a fork bomb, or someone attempting to overstep
61666+ their process limit. If the sysctl option is enabled, a sysctl option
61667+ with name "forkfail_logging" is created.
61668+
61669+config GRKERNSEC_TIME
61670+ bool "Time change logging"
61671+ default y if GRKERNSEC_CONFIG_AUTO
61672+ help
61673+ If you say Y here, any changes of the system clock will be logged.
61674+ If the sysctl option is enabled, a sysctl option with name
61675+ "timechange_logging" is created.
61676+
61677+config GRKERNSEC_PROC_IPADDR
61678+ bool "/proc/<pid>/ipaddr support"
61679+ default y if GRKERNSEC_CONFIG_AUTO
61680+ help
61681+ If you say Y here, a new entry will be added to each /proc/<pid>
61682+ directory that contains the IP address of the person using the task.
61683+ The IP is carried across local TCP and AF_UNIX stream sockets.
61684+ This information can be useful for IDS/IPSes to perform remote response
61685+ to a local attack. The entry is readable by only the owner of the
61686+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
61687+ the RBAC system), and thus does not create privacy concerns.
61688+
61689+config GRKERNSEC_RWXMAP_LOG
61690+ bool 'Denied RWX mmap/mprotect logging'
61691+ default y if GRKERNSEC_CONFIG_AUTO
61692+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
61693+ help
61694+ If you say Y here, calls to mmap() and mprotect() with explicit
61695+ usage of PROT_WRITE and PROT_EXEC together will be logged when
61696+ denied by the PAX_MPROTECT feature. This feature will also
61697+ log other problematic scenarios that can occur when PAX_MPROTECT
61698+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
61699+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
61700+ is created.
61701+
61702+endmenu
61703+
61704+menu "Executable Protections"
61705+depends on GRKERNSEC
61706+
61707+config GRKERNSEC_DMESG
61708+ bool "Dmesg(8) restriction"
61709+ default y if GRKERNSEC_CONFIG_AUTO
61710+ help
61711+ If you say Y here, non-root users will not be able to use dmesg(8)
61712+ to view the contents of the kernel's circular log buffer.
61713+ The kernel's log buffer often contains kernel addresses and other
61714+ identifying information useful to an attacker in fingerprinting a
61715+ system for a targeted exploit.
61716+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
61717+ created.
61718+
61719+config GRKERNSEC_HARDEN_PTRACE
61720+ bool "Deter ptrace-based process snooping"
61721+ default y if GRKERNSEC_CONFIG_AUTO
61722+ help
61723+ If you say Y here, TTY sniffers and other malicious monitoring
61724+ programs implemented through ptrace will be defeated. If you
61725+ have been using the RBAC system, this option has already been
61726+ enabled for several years for all users, with the ability to make
61727+ fine-grained exceptions.
61728+
61729+ This option only affects the ability of non-root users to ptrace
61730+ processes that are not a descendent of the ptracing process.
61731+ This means that strace ./binary and gdb ./binary will still work,
61732+ but attaching to arbitrary processes will not. If the sysctl
61733+ option is enabled, a sysctl option with name "harden_ptrace" is
61734+ created.
61735+
61736+config GRKERNSEC_PTRACE_READEXEC
61737+ bool "Require read access to ptrace sensitive binaries"
61738+ default y if GRKERNSEC_CONFIG_AUTO
61739+ help
61740+ If you say Y here, unprivileged users will not be able to ptrace unreadable
61741+ binaries. This option is useful in environments that
61742+ remove the read bits (e.g. file mode 4711) from suid binaries to
61743+ prevent infoleaking of their contents. This option adds
61744+ consistency to the use of that file mode, as the binary could normally
61745+ be read out when run without privileges while ptracing.
61746+
61747+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
61748+ is created.
61749+
61750+config GRKERNSEC_SETXID
61751+ bool "Enforce consistent multithreaded privileges"
61752+ default y if GRKERNSEC_CONFIG_AUTO
61753+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
61754+ help
61755+ If you say Y here, a change from a root uid to a non-root uid
61756+ in a multithreaded application will cause the resulting uids,
61757+ gids, supplementary groups, and capabilities in that thread
61758+ to be propagated to the other threads of the process. In most
61759+ cases this is unnecessary, as glibc will emulate this behavior
61760+ on behalf of the application. Other libcs do not act in the
61761+ same way, allowing the other threads of the process to continue
61762+ running with root privileges. If the sysctl option is enabled,
61763+ a sysctl option with name "consistent_setxid" is created.
61764+
61765+config GRKERNSEC_TPE
61766+ bool "Trusted Path Execution (TPE)"
61767+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
61768+ help
61769+ If you say Y here, you will be able to choose a gid to add to the
61770+ supplementary groups of users you want to mark as "untrusted."
61771+ These users will not be able to execute any files that are not in
61772+ root-owned directories writable only by root. If the sysctl option
61773+ is enabled, a sysctl option with name "tpe" is created.
61774+
61775+config GRKERNSEC_TPE_ALL
61776+ bool "Partially restrict all non-root users"
61777+ depends on GRKERNSEC_TPE
61778+ help
61779+ If you say Y here, all non-root users will be covered under
61780+ a weaker TPE restriction. This is separate from, and in addition to,
61781+ the main TPE options that you have selected elsewhere. Thus, if a
61782+ "trusted" GID is chosen, this restriction applies to even that GID.
61783+ Under this restriction, all non-root users will only be allowed to
61784+ execute files in directories they own that are not group or
61785+ world-writable, or in directories owned by root and writable only by
61786+ root. If the sysctl option is enabled, a sysctl option with name
61787+ "tpe_restrict_all" is created.
61788+
61789+config GRKERNSEC_TPE_INVERT
61790+ bool "Invert GID option"
61791+ depends on GRKERNSEC_TPE
61792+ help
61793+ If you say Y here, the group you specify in the TPE configuration will
61794+ decide what group TPE restrictions will be *disabled* for. This
61795+ option is useful if you want TPE restrictions to be applied to most
61796+ users on the system. If the sysctl option is enabled, a sysctl option
61797+ with name "tpe_invert" is created. Unlike other sysctl options, this
61798+ entry will default to on for backward-compatibility.
61799+
61800+config GRKERNSEC_TPE_GID
61801+ int
61802+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
61803+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
61804+
61805+config GRKERNSEC_TPE_UNTRUSTED_GID
61806+ int "GID for TPE-untrusted users"
61807+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
61808+ default 1005
61809+ help
61810+ Setting this GID determines what group TPE restrictions will be
61811+ *enabled* for. If the sysctl option is enabled, a sysctl option
61812+ with name "tpe_gid" is created.
61813+
61814+config GRKERNSEC_TPE_TRUSTED_GID
61815+ int "GID for TPE-trusted users"
61816+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
61817+ default 1005
61818+ help
61819+ Setting this GID determines what group TPE restrictions will be
61820+ *disabled* for. If the sysctl option is enabled, a sysctl option
61821+ with name "tpe_gid" is created.
61822+
61823+endmenu
61824+menu "Network Protections"
61825+depends on GRKERNSEC
61826+
61827+config GRKERNSEC_RANDNET
61828+ bool "Larger entropy pools"
61829+ default y if GRKERNSEC_CONFIG_AUTO
61830+ help
61831+ If you say Y here, the entropy pools used for many features of Linux
61832+ and grsecurity will be doubled in size. Since several grsecurity
61833+ features use additional randomness, it is recommended that you say Y
61834+ here. Saying Y here has a similar effect as modifying
61835+ /proc/sys/kernel/random/poolsize.
61836+
61837+config GRKERNSEC_BLACKHOLE
61838+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
61839+ default y if GRKERNSEC_CONFIG_AUTO
61840+ depends on NET
61841+ help
61842+ If you say Y here, neither TCP resets nor ICMP
61843+ destination-unreachable packets will be sent in response to packets
61844+ sent to ports for which no associated listening process exists.
61845+ This feature supports both IPV4 and IPV6 and exempts the
61846+ loopback interface from blackholing. Enabling this feature
61847+ makes a host more resilient to DoS attacks and reduces network
61848+ visibility against scanners.
61849+
61850+ The blackhole feature as-implemented is equivalent to the FreeBSD
61851+ blackhole feature, as it prevents RST responses to all packets, not
61852+ just SYNs. Under most application behavior this causes no
61853+ problems, but applications (like haproxy) may not close certain
61854+ connections in a way that cleanly terminates them on the remote
61855+ end, leaving the remote host in LAST_ACK state. Because of this
61856+ side-effect and to prevent intentional LAST_ACK DoSes, this
61857+ feature also adds automatic mitigation against such attacks.
61858+ The mitigation drastically reduces the amount of time a socket
61859+ can spend in LAST_ACK state. If you're using haproxy and not
61860+ all servers it connects to have this option enabled, consider
61861+ disabling this feature on the haproxy host.
61862+
61863+ If the sysctl option is enabled, two sysctl options with names
61864+ "ip_blackhole" and "lastack_retries" will be created.
61865+ While "ip_blackhole" takes the standard zero/non-zero on/off
61866+ toggle, "lastack_retries" uses the same kinds of values as
61867+ "tcp_retries1" and "tcp_retries2". The default value of 4
61868+ prevents a socket from lasting more than 45 seconds in LAST_ACK
61869+ state.
61870+
61871+config GRKERNSEC_NO_SIMULT_CONNECT
61872+ bool "Disable TCP Simultaneous Connect"
61873+ default y if GRKERNSEC_CONFIG_AUTO
61874+ depends on NET
61875+ help
61876+ If you say Y here, a feature by Willy Tarreau will be enabled that
61877+ removes a weakness in Linux's strict implementation of TCP that
61878+ allows two clients to connect to each other without either entering
61879+ a listening state. The weakness allows an attacker to easily prevent
61880+ a client from connecting to a known server provided the source port
61881+ for the connection is guessed correctly.
61882+
61883+ As the weakness could be used to prevent an antivirus or IPS from
61884+ fetching updates, or prevent an SSL gateway from fetching a CRL,
61885+ it should be eliminated by enabling this option. Though Linux is
61886+ one of few operating systems supporting simultaneous connect, it
61887+ has no legitimate use in practice and is rarely supported by firewalls.
61888+
61889+config GRKERNSEC_SOCKET
61890+ bool "Socket restrictions"
61891+ depends on NET
61892+ help
61893+ If you say Y here, you will be able to choose from several options.
61894+ If you assign a GID on your system and add it to the supplementary
61895+ groups of users you want to restrict socket access to, this patch
61896+ will perform up to three things, based on the option(s) you choose.
61897+
61898+config GRKERNSEC_SOCKET_ALL
61899+ bool "Deny any sockets to group"
61900+ depends on GRKERNSEC_SOCKET
61901+ help
61902+ If you say Y here, you will be able to choose a GID of whose users will
61903+ be unable to connect to other hosts from your machine or run server
61904+ applications from your machine. If the sysctl option is enabled, a
61905+ sysctl option with name "socket_all" is created.
61906+
61907+config GRKERNSEC_SOCKET_ALL_GID
61908+ int "GID to deny all sockets for"
61909+ depends on GRKERNSEC_SOCKET_ALL
61910+ default 1004
61911+ help
61912+ Here you can choose the GID to disable socket access for. Remember to
61913+ add the users you want socket access disabled for to the GID
61914+ specified here. If the sysctl option is enabled, a sysctl option
61915+ with name "socket_all_gid" is created.
61916+
61917+config GRKERNSEC_SOCKET_CLIENT
61918+ bool "Deny client sockets to group"
61919+ depends on GRKERNSEC_SOCKET
61920+ help
61921+ If you say Y here, you will be able to choose a GID of whose users will
61922+ be unable to connect to other hosts from your machine, but will be
61923+ able to run servers. If this option is enabled, all users in the group
61924+ you specify will have to use passive mode when initiating ftp transfers
61925+ from the shell on your machine. If the sysctl option is enabled, a
61926+ sysctl option with name "socket_client" is created.
61927+
61928+config GRKERNSEC_SOCKET_CLIENT_GID
61929+ int "GID to deny client sockets for"
61930+ depends on GRKERNSEC_SOCKET_CLIENT
61931+ default 1003
61932+ help
61933+ Here you can choose the GID to disable client socket access for.
61934+ Remember to add the users you want client socket access disabled for to
61935+ the GID specified here. If the sysctl option is enabled, a sysctl
61936+ option with name "socket_client_gid" is created.
61937+
61938+config GRKERNSEC_SOCKET_SERVER
61939+ bool "Deny server sockets to group"
61940+ depends on GRKERNSEC_SOCKET
61941+ help
61942+ If you say Y here, you will be able to choose a GID of whose users will
61943+ be unable to run server applications from your machine. If the sysctl
61944+ option is enabled, a sysctl option with name "socket_server" is created.
61945+
61946+config GRKERNSEC_SOCKET_SERVER_GID
61947+ int "GID to deny server sockets for"
61948+ depends on GRKERNSEC_SOCKET_SERVER
61949+ default 1002
61950+ help
61951+ Here you can choose the GID to disable server socket access for.
61952+ Remember to add the users you want server socket access disabled for to
61953+ the GID specified here. If the sysctl option is enabled, a sysctl
61954+ option with name "socket_server_gid" is created.
61955+
61956+endmenu
61957+
61958+menu "Physical Protections"
61959+depends on GRKERNSEC
61960+
61961+config GRKERNSEC_DENYUSB
61962+ bool "Deny new USB connections after toggle"
61963+ default y if GRKERNSEC_CONFIG_AUTO
61964+ help
61965+ If you say Y here, a new sysctl option with name "deny_new_usb"
61966+ will be created. Setting its value to 1 will prevent any new
61967+ USB devices from being recognized by the OS. Any attempted USB
61968+ device insertion will be logged. This option is intended to be
61969+ used against custom USB devices designed to exploit vulnerabilities
61970+ in various USB device drivers.
61971+
61972+ For greatest effectiveness, this sysctl should be set after any
61973+ relevant init scripts. Once set, it cannot be unset.
61974+
61975+endmenu
61976+
61977+menu "Sysctl Support"
61978+depends on GRKERNSEC && SYSCTL
61979+
61980+config GRKERNSEC_SYSCTL
61981+ bool "Sysctl support"
61982+ default y if GRKERNSEC_CONFIG_AUTO
61983+ help
61984+ If you say Y here, you will be able to change the options that
61985+ grsecurity runs with at bootup, without having to recompile your
61986+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
61987+ to enable (1) or disable (0) various features. All the sysctl entries
61988+ are mutable until the "grsec_lock" entry is set to a non-zero value.
61989+ All features enabled in the kernel configuration are disabled at boot
61990+ if you do not say Y to the "Turn on features by default" option.
61991+ All options should be set at startup, and the grsec_lock entry should
61992+ be set to a non-zero value after all the options are set.
61993+ *THIS IS EXTREMELY IMPORTANT*
61994+
61995+config GRKERNSEC_SYSCTL_DISTRO
61996+ bool "Extra sysctl support for distro makers (READ HELP)"
61997+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
61998+ help
61999+ If you say Y here, additional sysctl options will be created
62000+ for features that affect processes running as root. Therefore,
62001+ it is critical when using this option that the grsec_lock entry be
62002+ enabled after boot. Only distros with prebuilt kernel packages
62003+ with this option enabled that can ensure grsec_lock is enabled
62004+ after boot should use this option.
62005+ *Failure to set grsec_lock after boot makes all grsec features
62006+ this option covers useless*
62007+
62008+ Currently this option creates the following sysctl entries:
62009+ "Disable Privileged I/O": "disable_priv_io"
62010+
62011+config GRKERNSEC_SYSCTL_ON
62012+ bool "Turn on features by default"
62013+ default y if GRKERNSEC_CONFIG_AUTO
62014+ depends on GRKERNSEC_SYSCTL
62015+ help
62016+ If you say Y here, instead of having all features enabled in the
62017+ kernel configuration disabled at boot time, the features will be
62018+ enabled at boot time. It is recommended you say Y here unless
62019+ there is some reason you would want all sysctl-tunable features to
62020+ be disabled by default. As mentioned elsewhere, it is important
62021+ to enable the grsec_lock entry once you have finished modifying
62022+ the sysctl entries.
62023+
62024+endmenu
62025+menu "Logging Options"
62026+depends on GRKERNSEC
62027+
62028+config GRKERNSEC_FLOODTIME
62029+ int "Seconds in between log messages (minimum)"
62030+ default 10
62031+ help
62032+ This option allows you to enforce the number of seconds between
62033+ grsecurity log messages. The default should be suitable for most
62034+ people, however, if you choose to change it, choose a value small enough
62035+ to allow informative logs to be produced, but large enough to
62036+ prevent flooding.
62037+
62038+config GRKERNSEC_FLOODBURST
62039+ int "Number of messages in a burst (maximum)"
62040+ default 6
62041+ help
62042+ This option allows you to choose the maximum number of messages allowed
62043+ within the flood time interval you chose in a separate option. The
62044+ default should be suitable for most people, however if you find that
62045+ many of your logs are being interpreted as flooding, you may want to
62046+ raise this value.
62047+
62048+endmenu
62049diff --git a/grsecurity/Makefile b/grsecurity/Makefile
62050new file mode 100644
62051index 0000000..b0b77d5
62052--- /dev/null
62053+++ b/grsecurity/Makefile
62054@@ -0,0 +1,43 @@
62055+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
62056+# during 2001-2009 it has been completely redesigned by Brad Spengler
62057+# into an RBAC system
62058+#
62059+# All code in this directory and various hooks inserted throughout the kernel
62060+# are copyright Brad Spengler - Open Source Security, Inc., and released
62061+# under the GPL v2 or higher
62062+
62063+KBUILD_CFLAGS += -Werror
62064+
62065+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
62066+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
62067+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
62068+ grsec_usb.o
62069+
62070+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
62071+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
62072+ gracl_learn.o grsec_log.o
62073+ifdef CONFIG_COMPAT
62074+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
62075+endif
62076+
62077+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
62078+
62079+ifdef CONFIG_NET
62080+obj-y += grsec_sock.o
62081+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
62082+endif
62083+
62084+ifndef CONFIG_GRKERNSEC
62085+obj-y += grsec_disabled.o
62086+endif
62087+
62088+ifdef CONFIG_GRKERNSEC_HIDESYM
62089+extra-y := grsec_hidesym.o
62090+$(obj)/grsec_hidesym.o:
62091+ @-chmod -f 500 /boot
62092+ @-chmod -f 500 /lib/modules
62093+ @-chmod -f 500 /lib64/modules
62094+ @-chmod -f 500 /lib32/modules
62095+ @-chmod -f 700 .
62096+ @echo ' grsec: protected kernel image paths'
62097+endif
62098diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
62099new file mode 100644
62100index 0000000..c0793fd
62101--- /dev/null
62102+++ b/grsecurity/gracl.c
62103@@ -0,0 +1,4178 @@
62104+#include <linux/kernel.h>
62105+#include <linux/module.h>
62106+#include <linux/sched.h>
62107+#include <linux/mm.h>
62108+#include <linux/file.h>
62109+#include <linux/fs.h>
62110+#include <linux/namei.h>
62111+#include <linux/mount.h>
62112+#include <linux/tty.h>
62113+#include <linux/proc_fs.h>
62114+#include <linux/lglock.h>
62115+#include <linux/slab.h>
62116+#include <linux/vmalloc.h>
62117+#include <linux/types.h>
62118+#include <linux/sysctl.h>
62119+#include <linux/netdevice.h>
62120+#include <linux/ptrace.h>
62121+#include <linux/gracl.h>
62122+#include <linux/gralloc.h>
62123+#include <linux/security.h>
62124+#include <linux/grinternal.h>
62125+#include <linux/pid_namespace.h>
62126+#include <linux/stop_machine.h>
62127+#include <linux/fdtable.h>
62128+#include <linux/percpu.h>
62129+#include <linux/lglock.h>
62130+#include <linux/hugetlb.h>
62131+#include <linux/posix-timers.h>
62132+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
62133+#include <linux/magic.h>
62134+#include <linux/pagemap.h>
62135+#include "../fs/btrfs/async-thread.h"
62136+#include "../fs/btrfs/ctree.h"
62137+#include "../fs/btrfs/btrfs_inode.h"
62138+#endif
62139+#include "../fs/mount.h"
62140+
62141+#include <asm/uaccess.h>
62142+#include <asm/errno.h>
62143+#include <asm/mman.h>
62144+
62145+extern struct lglock vfsmount_lock;
62146+
62147+static struct acl_role_db acl_role_set;
62148+static struct name_db name_set;
62149+static struct inodev_db inodev_set;
62150+
62151+/* for keeping track of userspace pointers used for subjects, so we
62152+ can share references in the kernel as well
62153+*/
62154+
62155+static struct path real_root;
62156+
62157+static struct acl_subj_map_db subj_map_set;
62158+
62159+static struct acl_role_label *default_role;
62160+
62161+static struct acl_role_label *role_list;
62162+
62163+static u16 acl_sp_role_value;
62164+
62165+extern char *gr_shared_page[4];
62166+static DEFINE_MUTEX(gr_dev_mutex);
62167+DEFINE_RWLOCK(gr_inode_lock);
62168+
62169+struct gr_arg *gr_usermode;
62170+
62171+static unsigned int gr_status __read_only = GR_STATUS_INIT;
62172+
62173+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
62174+extern void gr_clear_learn_entries(void);
62175+
62176+unsigned char *gr_system_salt;
62177+unsigned char *gr_system_sum;
62178+
62179+static struct sprole_pw **acl_special_roles = NULL;
62180+static __u16 num_sprole_pws = 0;
62181+
62182+static struct acl_role_label *kernel_role = NULL;
62183+
62184+static unsigned int gr_auth_attempts = 0;
62185+static unsigned long gr_auth_expires = 0UL;
62186+
62187+#ifdef CONFIG_NET
62188+extern struct vfsmount *sock_mnt;
62189+#endif
62190+
62191+extern struct vfsmount *pipe_mnt;
62192+extern struct vfsmount *shm_mnt;
62193+
62194+#ifdef CONFIG_HUGETLBFS
62195+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
62196+#endif
62197+
62198+static struct acl_object_label *fakefs_obj_rw;
62199+static struct acl_object_label *fakefs_obj_rwx;
62200+
62201+extern int gr_init_uidset(void);
62202+extern void gr_free_uidset(void);
62203+extern void gr_remove_uid(uid_t uid);
62204+extern int gr_find_uid(uid_t uid);
62205+
62206+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
62207+{
62208+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
62209+ return -EFAULT;
62210+
62211+ return 0;
62212+}
62213+
62214+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
62215+{
62216+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
62217+ return -EFAULT;
62218+
62219+ return 0;
62220+}
62221+
62222+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
62223+{
62224+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
62225+ return -EFAULT;
62226+
62227+ return 0;
62228+}
62229+
62230+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
62231+{
62232+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
62233+ return -EFAULT;
62234+
62235+ return 0;
62236+}
62237+
62238+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
62239+{
62240+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
62241+ return -EFAULT;
62242+
62243+ return 0;
62244+}
62245+
62246+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
62247+{
62248+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
62249+ return -EFAULT;
62250+
62251+ return 0;
62252+}
62253+
62254+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
62255+{
62256+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
62257+ return -EFAULT;
62258+
62259+ return 0;
62260+}
62261+
62262+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
62263+{
62264+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
62265+ return -EFAULT;
62266+
62267+ return 0;
62268+}
62269+
62270+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
62271+{
62272+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
62273+ return -EFAULT;
62274+
62275+ return 0;
62276+}
62277+
62278+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
62279+{
62280+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
62281+ return -EFAULT;
62282+
62283+ if ((uwrap->version != GRSECURITY_VERSION) || (uwrap->size != sizeof(struct gr_arg)))
62284+ return -EINVAL;
62285+
62286+ return 0;
62287+}
62288+
62289+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
62290+{
62291+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
62292+ return -EFAULT;
62293+
62294+ return 0;
62295+}
62296+
62297+static size_t get_gr_arg_wrapper_size_normal(void)
62298+{
62299+ return sizeof(struct gr_arg_wrapper);
62300+}
62301+
62302+#ifdef CONFIG_COMPAT
62303+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
62304+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
62305+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
62306+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
62307+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
62308+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
62309+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
62310+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
62311+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
62312+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
62313+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
62314+extern size_t get_gr_arg_wrapper_size_compat(void);
62315+
62316+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
62317+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
62318+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
62319+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
62320+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
62321+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
62322+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
62323+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
62324+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
62325+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
62326+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
62327+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
62328+
62329+#else
62330+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
62331+#define copy_gr_arg copy_gr_arg_normal
62332+#define copy_gr_hash_struct copy_gr_hash_struct_normal
62333+#define copy_acl_object_label copy_acl_object_label_normal
62334+#define copy_acl_subject_label copy_acl_subject_label_normal
62335+#define copy_acl_role_label copy_acl_role_label_normal
62336+#define copy_acl_ip_label copy_acl_ip_label_normal
62337+#define copy_pointer_from_array copy_pointer_from_array_normal
62338+#define copy_sprole_pw copy_sprole_pw_normal
62339+#define copy_role_transition copy_role_transition_normal
62340+#define copy_role_allowed_ip copy_role_allowed_ip_normal
62341+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
62342+#endif
62343+
62344+__inline__ int
62345+gr_acl_is_enabled(void)
62346+{
62347+ return (gr_status & GR_READY);
62348+}
62349+
62350+static inline dev_t __get_dev(const struct dentry *dentry)
62351+{
62352+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
62353+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
62354+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
62355+ else
62356+#endif
62357+ return dentry->d_sb->s_dev;
62358+}
62359+
62360+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62361+{
62362+ return __get_dev(dentry);
62363+}
62364+
62365+static char gr_task_roletype_to_char(struct task_struct *task)
62366+{
62367+ switch (task->role->roletype &
62368+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
62369+ GR_ROLE_SPECIAL)) {
62370+ case GR_ROLE_DEFAULT:
62371+ return 'D';
62372+ case GR_ROLE_USER:
62373+ return 'U';
62374+ case GR_ROLE_GROUP:
62375+ return 'G';
62376+ case GR_ROLE_SPECIAL:
62377+ return 'S';
62378+ }
62379+
62380+ return 'X';
62381+}
62382+
62383+char gr_roletype_to_char(void)
62384+{
62385+ return gr_task_roletype_to_char(current);
62386+}
62387+
62388+__inline__ int
62389+gr_acl_tpe_check(void)
62390+{
62391+ if (unlikely(!(gr_status & GR_READY)))
62392+ return 0;
62393+ if (current->role->roletype & GR_ROLE_TPE)
62394+ return 1;
62395+ else
62396+ return 0;
62397+}
62398+
62399+int
62400+gr_handle_rawio(const struct inode *inode)
62401+{
62402+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62403+ if (inode && S_ISBLK(inode->i_mode) &&
62404+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62405+ !capable(CAP_SYS_RAWIO))
62406+ return 1;
62407+#endif
62408+ return 0;
62409+}
62410+
62411+static int
62412+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
62413+{
62414+ if (likely(lena != lenb))
62415+ return 0;
62416+
62417+ return !memcmp(a, b, lena);
62418+}
62419+
62420+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
62421+{
62422+ *buflen -= namelen;
62423+ if (*buflen < 0)
62424+ return -ENAMETOOLONG;
62425+ *buffer -= namelen;
62426+ memcpy(*buffer, str, namelen);
62427+ return 0;
62428+}
62429+
62430+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
62431+{
62432+ return prepend(buffer, buflen, name->name, name->len);
62433+}
62434+
62435+static int prepend_path(const struct path *path, struct path *root,
62436+ char **buffer, int *buflen)
62437+{
62438+ struct dentry *dentry = path->dentry;
62439+ struct vfsmount *vfsmnt = path->mnt;
62440+ struct mount *mnt = real_mount(vfsmnt);
62441+ bool slash = false;
62442+ int error = 0;
62443+
62444+ while (dentry != root->dentry || vfsmnt != root->mnt) {
62445+ struct dentry * parent;
62446+
62447+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
62448+ /* Global root? */
62449+ if (!mnt_has_parent(mnt)) {
62450+ goto out;
62451+ }
62452+ dentry = mnt->mnt_mountpoint;
62453+ mnt = mnt->mnt_parent;
62454+ vfsmnt = &mnt->mnt;
62455+ continue;
62456+ }
62457+ parent = dentry->d_parent;
62458+ prefetch(parent);
62459+ spin_lock(&dentry->d_lock);
62460+ error = prepend_name(buffer, buflen, &dentry->d_name);
62461+ spin_unlock(&dentry->d_lock);
62462+ if (!error)
62463+ error = prepend(buffer, buflen, "/", 1);
62464+ if (error)
62465+ break;
62466+
62467+ slash = true;
62468+ dentry = parent;
62469+ }
62470+
62471+out:
62472+ if (!error && !slash)
62473+ error = prepend(buffer, buflen, "/", 1);
62474+
62475+ return error;
62476+}
62477+
62478+/* this must be called with vfsmount_lock and rename_lock held */
62479+
62480+static char *__our_d_path(const struct path *path, struct path *root,
62481+ char *buf, int buflen)
62482+{
62483+ char *res = buf + buflen;
62484+ int error;
62485+
62486+ prepend(&res, &buflen, "\0", 1);
62487+ error = prepend_path(path, root, &res, &buflen);
62488+ if (error)
62489+ return ERR_PTR(error);
62490+
62491+ return res;
62492+}
62493+
62494+static char *
62495+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
62496+{
62497+ char *retval;
62498+
62499+ retval = __our_d_path(path, root, buf, buflen);
62500+ if (unlikely(IS_ERR(retval)))
62501+ retval = strcpy(buf, "<path too long>");
62502+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
62503+ retval[1] = '\0';
62504+
62505+ return retval;
62506+}
62507+
62508+static char *
62509+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
62510+ char *buf, int buflen)
62511+{
62512+ struct path path;
62513+ char *res;
62514+
62515+ path.dentry = (struct dentry *)dentry;
62516+ path.mnt = (struct vfsmount *)vfsmnt;
62517+
62518+ /* we can use real_root.dentry, real_root.mnt, because this is only called
62519+ by the RBAC system */
62520+ res = gen_full_path(&path, &real_root, buf, buflen);
62521+
62522+ return res;
62523+}
62524+
62525+static char *
62526+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
62527+ char *buf, int buflen)
62528+{
62529+ char *res;
62530+ struct path path;
62531+ struct path root;
62532+ struct task_struct *reaper = init_pid_ns.child_reaper;
62533+
62534+ path.dentry = (struct dentry *)dentry;
62535+ path.mnt = (struct vfsmount *)vfsmnt;
62536+
62537+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
62538+ get_fs_root(reaper->fs, &root);
62539+
62540+ br_read_lock(&vfsmount_lock);
62541+ write_seqlock(&rename_lock);
62542+ res = gen_full_path(&path, &root, buf, buflen);
62543+ write_sequnlock(&rename_lock);
62544+ br_read_unlock(&vfsmount_lock);
62545+
62546+ path_put(&root);
62547+ return res;
62548+}
62549+
62550+static char *
62551+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
62552+{
62553+ char *ret;
62554+ br_read_lock(&vfsmount_lock);
62555+ write_seqlock(&rename_lock);
62556+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
62557+ PAGE_SIZE);
62558+ write_sequnlock(&rename_lock);
62559+ br_read_unlock(&vfsmount_lock);
62560+ return ret;
62561+}
62562+
62563+static char *
62564+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
62565+{
62566+ char *ret;
62567+ char *buf;
62568+ int buflen;
62569+
62570+ br_read_lock(&vfsmount_lock);
62571+ write_seqlock(&rename_lock);
62572+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
62573+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
62574+ buflen = (int)(ret - buf);
62575+ if (buflen >= 5)
62576+ prepend(&ret, &buflen, "/proc", 5);
62577+ else
62578+ ret = strcpy(buf, "<path too long>");
62579+ write_sequnlock(&rename_lock);
62580+ br_read_unlock(&vfsmount_lock);
62581+ return ret;
62582+}
62583+
62584+char *
62585+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
62586+{
62587+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
62588+ PAGE_SIZE);
62589+}
62590+
62591+char *
62592+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
62593+{
62594+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
62595+ PAGE_SIZE);
62596+}
62597+
62598+char *
62599+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
62600+{
62601+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
62602+ PAGE_SIZE);
62603+}
62604+
62605+char *
62606+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
62607+{
62608+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
62609+ PAGE_SIZE);
62610+}
62611+
62612+char *
62613+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
62614+{
62615+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
62616+ PAGE_SIZE);
62617+}
62618+
62619+__inline__ __u32
62620+to_gr_audit(const __u32 reqmode)
62621+{
62622+ /* masks off auditable permission flags, then shifts them to create
62623+ auditing flags, and adds the special case of append auditing if
62624+ we're requesting write */
62625+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
62626+}
62627+
62628+struct acl_subject_label *
62629+lookup_subject_map(const struct acl_subject_label *userp)
62630+{
62631+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
62632+ struct subject_map *match;
62633+
62634+ match = subj_map_set.s_hash[index];
62635+
62636+ while (match && match->user != userp)
62637+ match = match->next;
62638+
62639+ if (match != NULL)
62640+ return match->kernel;
62641+ else
62642+ return NULL;
62643+}
62644+
62645+static void
62646+insert_subj_map_entry(struct subject_map *subjmap)
62647+{
62648+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
62649+ struct subject_map **curr;
62650+
62651+ subjmap->prev = NULL;
62652+
62653+ curr = &subj_map_set.s_hash[index];
62654+ if (*curr != NULL)
62655+ (*curr)->prev = subjmap;
62656+
62657+ subjmap->next = *curr;
62658+ *curr = subjmap;
62659+
62660+ return;
62661+}
62662+
62663+static struct acl_role_label *
62664+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
62665+ const gid_t gid)
62666+{
62667+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
62668+ struct acl_role_label *match;
62669+ struct role_allowed_ip *ipp;
62670+ unsigned int x;
62671+ u32 curr_ip = task->signal->curr_ip;
62672+
62673+ task->signal->saved_ip = curr_ip;
62674+
62675+ match = acl_role_set.r_hash[index];
62676+
62677+ while (match) {
62678+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
62679+ for (x = 0; x < match->domain_child_num; x++) {
62680+ if (match->domain_children[x] == uid)
62681+ goto found;
62682+ }
62683+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
62684+ break;
62685+ match = match->next;
62686+ }
62687+found:
62688+ if (match == NULL) {
62689+ try_group:
62690+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
62691+ match = acl_role_set.r_hash[index];
62692+
62693+ while (match) {
62694+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
62695+ for (x = 0; x < match->domain_child_num; x++) {
62696+ if (match->domain_children[x] == gid)
62697+ goto found2;
62698+ }
62699+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
62700+ break;
62701+ match = match->next;
62702+ }
62703+found2:
62704+ if (match == NULL)
62705+ match = default_role;
62706+ if (match->allowed_ips == NULL)
62707+ return match;
62708+ else {
62709+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
62710+ if (likely
62711+ ((ntohl(curr_ip) & ipp->netmask) ==
62712+ (ntohl(ipp->addr) & ipp->netmask)))
62713+ return match;
62714+ }
62715+ match = default_role;
62716+ }
62717+ } else if (match->allowed_ips == NULL) {
62718+ return match;
62719+ } else {
62720+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
62721+ if (likely
62722+ ((ntohl(curr_ip) & ipp->netmask) ==
62723+ (ntohl(ipp->addr) & ipp->netmask)))
62724+ return match;
62725+ }
62726+ goto try_group;
62727+ }
62728+
62729+ return match;
62730+}
62731+
62732+struct acl_subject_label *
62733+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
62734+ const struct acl_role_label *role)
62735+{
62736+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
62737+ struct acl_subject_label *match;
62738+
62739+ match = role->subj_hash[index];
62740+
62741+ while (match && (match->inode != ino || match->device != dev ||
62742+ (match->mode & GR_DELETED))) {
62743+ match = match->next;
62744+ }
62745+
62746+ if (match && !(match->mode & GR_DELETED))
62747+ return match;
62748+ else
62749+ return NULL;
62750+}
62751+
62752+struct acl_subject_label *
62753+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
62754+ const struct acl_role_label *role)
62755+{
62756+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
62757+ struct acl_subject_label *match;
62758+
62759+ match = role->subj_hash[index];
62760+
62761+ while (match && (match->inode != ino || match->device != dev ||
62762+ !(match->mode & GR_DELETED))) {
62763+ match = match->next;
62764+ }
62765+
62766+ if (match && (match->mode & GR_DELETED))
62767+ return match;
62768+ else
62769+ return NULL;
62770+}
62771+
62772+static struct acl_object_label *
62773+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
62774+ const struct acl_subject_label *subj)
62775+{
62776+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
62777+ struct acl_object_label *match;
62778+
62779+ match = subj->obj_hash[index];
62780+
62781+ while (match && (match->inode != ino || match->device != dev ||
62782+ (match->mode & GR_DELETED))) {
62783+ match = match->next;
62784+ }
62785+
62786+ if (match && !(match->mode & GR_DELETED))
62787+ return match;
62788+ else
62789+ return NULL;
62790+}
62791+
62792+static struct acl_object_label *
62793+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
62794+ const struct acl_subject_label *subj)
62795+{
62796+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
62797+ struct acl_object_label *match;
62798+
62799+ match = subj->obj_hash[index];
62800+
62801+ while (match && (match->inode != ino || match->device != dev ||
62802+ !(match->mode & GR_DELETED))) {
62803+ match = match->next;
62804+ }
62805+
62806+ if (match && (match->mode & GR_DELETED))
62807+ return match;
62808+
62809+ match = subj->obj_hash[index];
62810+
62811+ while (match && (match->inode != ino || match->device != dev ||
62812+ (match->mode & GR_DELETED))) {
62813+ match = match->next;
62814+ }
62815+
62816+ if (match && !(match->mode & GR_DELETED))
62817+ return match;
62818+ else
62819+ return NULL;
62820+}
62821+
62822+static struct name_entry *
62823+lookup_name_entry(const char *name)
62824+{
62825+ unsigned int len = strlen(name);
62826+ unsigned int key = full_name_hash(name, len);
62827+ unsigned int index = key % name_set.n_size;
62828+ struct name_entry *match;
62829+
62830+ match = name_set.n_hash[index];
62831+
62832+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
62833+ match = match->next;
62834+
62835+ return match;
62836+}
62837+
62838+static struct name_entry *
62839+lookup_name_entry_create(const char *name)
62840+{
62841+ unsigned int len = strlen(name);
62842+ unsigned int key = full_name_hash(name, len);
62843+ unsigned int index = key % name_set.n_size;
62844+ struct name_entry *match;
62845+
62846+ match = name_set.n_hash[index];
62847+
62848+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
62849+ !match->deleted))
62850+ match = match->next;
62851+
62852+ if (match && match->deleted)
62853+ return match;
62854+
62855+ match = name_set.n_hash[index];
62856+
62857+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
62858+ match->deleted))
62859+ match = match->next;
62860+
62861+ if (match && !match->deleted)
62862+ return match;
62863+ else
62864+ return NULL;
62865+}
62866+
62867+static struct inodev_entry *
62868+lookup_inodev_entry(const ino_t ino, const dev_t dev)
62869+{
62870+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
62871+ struct inodev_entry *match;
62872+
62873+ match = inodev_set.i_hash[index];
62874+
62875+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
62876+ match = match->next;
62877+
62878+ return match;
62879+}
62880+
62881+static void
62882+insert_inodev_entry(struct inodev_entry *entry)
62883+{
62884+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
62885+ inodev_set.i_size);
62886+ struct inodev_entry **curr;
62887+
62888+ entry->prev = NULL;
62889+
62890+ curr = &inodev_set.i_hash[index];
62891+ if (*curr != NULL)
62892+ (*curr)->prev = entry;
62893+
62894+ entry->next = *curr;
62895+ *curr = entry;
62896+
62897+ return;
62898+}
62899+
62900+static void
62901+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
62902+{
62903+ unsigned int index =
62904+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
62905+ struct acl_role_label **curr;
62906+ struct acl_role_label *tmp, *tmp2;
62907+
62908+ curr = &acl_role_set.r_hash[index];
62909+
62910+ /* simple case, slot is empty, just set it to our role */
62911+ if (*curr == NULL) {
62912+ *curr = role;
62913+ } else {
62914+ /* example:
62915+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
62916+ 2 -> 3
62917+ */
62918+ /* first check to see if we can already be reached via this slot */
62919+ tmp = *curr;
62920+ while (tmp && tmp != role)
62921+ tmp = tmp->next;
62922+ if (tmp == role) {
62923+ /* we don't need to add ourselves to this slot's chain */
62924+ return;
62925+ }
62926+ /* we need to add ourselves to this chain, two cases */
62927+ if (role->next == NULL) {
62928+ /* simple case, append the current chain to our role */
62929+ role->next = *curr;
62930+ *curr = role;
62931+ } else {
62932+ /* 1 -> 2 -> 3 -> 4
62933+ 2 -> 3 -> 4
62934+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
62935+ */
62936+ /* trickier case: walk our role's chain until we find
62937+ the role for the start of the current slot's chain */
62938+ tmp = role;
62939+ tmp2 = *curr;
62940+ while (tmp->next && tmp->next != tmp2)
62941+ tmp = tmp->next;
62942+ if (tmp->next == tmp2) {
62943+ /* from example above, we found 3, so just
62944+ replace this slot's chain with ours */
62945+ *curr = role;
62946+ } else {
62947+ /* we didn't find a subset of our role's chain
62948+ in the current slot's chain, so append their
62949+ chain to ours, and set us as the first role in
62950+ the slot's chain
62951+
62952+ we could fold this case with the case above,
62953+ but making it explicit for clarity
62954+ */
62955+ tmp->next = tmp2;
62956+ *curr = role;
62957+ }
62958+ }
62959+ }
62960+
62961+ return;
62962+}
62963+
62964+static void
62965+insert_acl_role_label(struct acl_role_label *role)
62966+{
62967+ int i;
62968+
62969+ if (role_list == NULL) {
62970+ role_list = role;
62971+ role->prev = NULL;
62972+ } else {
62973+ role->prev = role_list;
62974+ role_list = role;
62975+ }
62976+
62977+ /* used for hash chains */
62978+ role->next = NULL;
62979+
62980+ if (role->roletype & GR_ROLE_DOMAIN) {
62981+ for (i = 0; i < role->domain_child_num; i++)
62982+ __insert_acl_role_label(role, role->domain_children[i]);
62983+ } else
62984+ __insert_acl_role_label(role, role->uidgid);
62985+}
62986+
62987+static int
62988+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
62989+{
62990+ struct name_entry **curr, *nentry;
62991+ struct inodev_entry *ientry;
62992+ unsigned int len = strlen(name);
62993+ unsigned int key = full_name_hash(name, len);
62994+ unsigned int index = key % name_set.n_size;
62995+
62996+ curr = &name_set.n_hash[index];
62997+
62998+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
62999+ curr = &((*curr)->next);
63000+
63001+ if (*curr != NULL)
63002+ return 1;
63003+
63004+ nentry = acl_alloc(sizeof (struct name_entry));
63005+ if (nentry == NULL)
63006+ return 0;
63007+ ientry = acl_alloc(sizeof (struct inodev_entry));
63008+ if (ientry == NULL)
63009+ return 0;
63010+ ientry->nentry = nentry;
63011+
63012+ nentry->key = key;
63013+ nentry->name = name;
63014+ nentry->inode = inode;
63015+ nentry->device = device;
63016+ nentry->len = len;
63017+ nentry->deleted = deleted;
63018+
63019+ nentry->prev = NULL;
63020+ curr = &name_set.n_hash[index];
63021+ if (*curr != NULL)
63022+ (*curr)->prev = nentry;
63023+ nentry->next = *curr;
63024+ *curr = nentry;
63025+
63026+ /* insert us into the table searchable by inode/dev */
63027+ insert_inodev_entry(ientry);
63028+
63029+ return 1;
63030+}
63031+
63032+static void
63033+insert_acl_obj_label(struct acl_object_label *obj,
63034+ struct acl_subject_label *subj)
63035+{
63036+ unsigned int index =
63037+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
63038+ struct acl_object_label **curr;
63039+
63040+
63041+ obj->prev = NULL;
63042+
63043+ curr = &subj->obj_hash[index];
63044+ if (*curr != NULL)
63045+ (*curr)->prev = obj;
63046+
63047+ obj->next = *curr;
63048+ *curr = obj;
63049+
63050+ return;
63051+}
63052+
63053+static void
63054+insert_acl_subj_label(struct acl_subject_label *obj,
63055+ struct acl_role_label *role)
63056+{
63057+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
63058+ struct acl_subject_label **curr;
63059+
63060+ obj->prev = NULL;
63061+
63062+ curr = &role->subj_hash[index];
63063+ if (*curr != NULL)
63064+ (*curr)->prev = obj;
63065+
63066+ obj->next = *curr;
63067+ *curr = obj;
63068+
63069+ return;
63070+}
63071+
63072+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
63073+
63074+static void *
63075+create_table(__u32 * len, int elementsize)
63076+{
63077+ unsigned int table_sizes[] = {
63078+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
63079+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
63080+ 4194301, 8388593, 16777213, 33554393, 67108859
63081+ };
63082+ void *newtable = NULL;
63083+ unsigned int pwr = 0;
63084+
63085+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
63086+ table_sizes[pwr] <= *len)
63087+ pwr++;
63088+
63089+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
63090+ return newtable;
63091+
63092+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
63093+ newtable =
63094+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
63095+ else
63096+ newtable = vmalloc(table_sizes[pwr] * elementsize);
63097+
63098+ *len = table_sizes[pwr];
63099+
63100+ return newtable;
63101+}
63102+
63103+static int
63104+init_variables(const struct gr_arg *arg)
63105+{
63106+ struct task_struct *reaper = init_pid_ns.child_reaper;
63107+ unsigned int stacksize;
63108+
63109+ subj_map_set.s_size = arg->role_db.num_subjects;
63110+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
63111+ name_set.n_size = arg->role_db.num_objects;
63112+ inodev_set.i_size = arg->role_db.num_objects;
63113+
63114+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
63115+ !name_set.n_size || !inodev_set.i_size)
63116+ return 1;
63117+
63118+ if (!gr_init_uidset())
63119+ return 1;
63120+
63121+ /* set up the stack that holds allocation info */
63122+
63123+ stacksize = arg->role_db.num_pointers + 5;
63124+
63125+ if (!acl_alloc_stack_init(stacksize))
63126+ return 1;
63127+
63128+ /* grab reference for the real root dentry and vfsmount */
63129+ get_fs_root(reaper->fs, &real_root);
63130+
63131+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63132+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
63133+#endif
63134+
63135+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
63136+ if (fakefs_obj_rw == NULL)
63137+ return 1;
63138+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
63139+
63140+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
63141+ if (fakefs_obj_rwx == NULL)
63142+ return 1;
63143+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
63144+
63145+ subj_map_set.s_hash =
63146+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
63147+ acl_role_set.r_hash =
63148+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
63149+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
63150+ inodev_set.i_hash =
63151+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
63152+
63153+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
63154+ !name_set.n_hash || !inodev_set.i_hash)
63155+ return 1;
63156+
63157+ memset(subj_map_set.s_hash, 0,
63158+ sizeof(struct subject_map *) * subj_map_set.s_size);
63159+ memset(acl_role_set.r_hash, 0,
63160+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
63161+ memset(name_set.n_hash, 0,
63162+ sizeof (struct name_entry *) * name_set.n_size);
63163+ memset(inodev_set.i_hash, 0,
63164+ sizeof (struct inodev_entry *) * inodev_set.i_size);
63165+
63166+ return 0;
63167+}
63168+
63169+/* free information not needed after startup
63170+ currently contains user->kernel pointer mappings for subjects
63171+*/
63172+
63173+static void
63174+free_init_variables(void)
63175+{
63176+ __u32 i;
63177+
63178+ if (subj_map_set.s_hash) {
63179+ for (i = 0; i < subj_map_set.s_size; i++) {
63180+ if (subj_map_set.s_hash[i]) {
63181+ kfree(subj_map_set.s_hash[i]);
63182+ subj_map_set.s_hash[i] = NULL;
63183+ }
63184+ }
63185+
63186+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
63187+ PAGE_SIZE)
63188+ kfree(subj_map_set.s_hash);
63189+ else
63190+ vfree(subj_map_set.s_hash);
63191+ }
63192+
63193+ return;
63194+}
63195+
63196+static void
63197+free_variables(void)
63198+{
63199+ struct acl_subject_label *s;
63200+ struct acl_role_label *r;
63201+ struct task_struct *task, *task2;
63202+ unsigned int x;
63203+
63204+ gr_clear_learn_entries();
63205+
63206+ read_lock(&tasklist_lock);
63207+ do_each_thread(task2, task) {
63208+ task->acl_sp_role = 0;
63209+ task->acl_role_id = 0;
63210+ task->acl = NULL;
63211+ task->role = NULL;
63212+ } while_each_thread(task2, task);
63213+ read_unlock(&tasklist_lock);
63214+
63215+ /* release the reference to the real root dentry and vfsmount */
63216+ path_put(&real_root);
63217+ memset(&real_root, 0, sizeof(real_root));
63218+
63219+ /* free all object hash tables */
63220+
63221+ FOR_EACH_ROLE_START(r)
63222+ if (r->subj_hash == NULL)
63223+ goto next_role;
63224+ FOR_EACH_SUBJECT_START(r, s, x)
63225+ if (s->obj_hash == NULL)
63226+ break;
63227+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
63228+ kfree(s->obj_hash);
63229+ else
63230+ vfree(s->obj_hash);
63231+ FOR_EACH_SUBJECT_END(s, x)
63232+ FOR_EACH_NESTED_SUBJECT_START(r, s)
63233+ if (s->obj_hash == NULL)
63234+ break;
63235+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
63236+ kfree(s->obj_hash);
63237+ else
63238+ vfree(s->obj_hash);
63239+ FOR_EACH_NESTED_SUBJECT_END(s)
63240+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
63241+ kfree(r->subj_hash);
63242+ else
63243+ vfree(r->subj_hash);
63244+ r->subj_hash = NULL;
63245+next_role:
63246+ FOR_EACH_ROLE_END(r)
63247+
63248+ acl_free_all();
63249+
63250+ if (acl_role_set.r_hash) {
63251+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
63252+ PAGE_SIZE)
63253+ kfree(acl_role_set.r_hash);
63254+ else
63255+ vfree(acl_role_set.r_hash);
63256+ }
63257+ if (name_set.n_hash) {
63258+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
63259+ PAGE_SIZE)
63260+ kfree(name_set.n_hash);
63261+ else
63262+ vfree(name_set.n_hash);
63263+ }
63264+
63265+ if (inodev_set.i_hash) {
63266+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
63267+ PAGE_SIZE)
63268+ kfree(inodev_set.i_hash);
63269+ else
63270+ vfree(inodev_set.i_hash);
63271+ }
63272+
63273+ gr_free_uidset();
63274+
63275+ memset(&name_set, 0, sizeof (struct name_db));
63276+ memset(&inodev_set, 0, sizeof (struct inodev_db));
63277+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
63278+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
63279+
63280+ default_role = NULL;
63281+ kernel_role = NULL;
63282+ role_list = NULL;
63283+
63284+ return;
63285+}
63286+
63287+static struct acl_subject_label *
63288+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
63289+
63290+static int alloc_and_copy_string(char **name, unsigned int maxlen)
63291+{
63292+ unsigned int len = strnlen_user(*name, maxlen);
63293+ char *tmp;
63294+
63295+ if (!len || len >= maxlen)
63296+ return -EINVAL;
63297+
63298+ if ((tmp = (char *) acl_alloc(len)) == NULL)
63299+ return -ENOMEM;
63300+
63301+ if (copy_from_user(tmp, *name, len))
63302+ return -EFAULT;
63303+
63304+ tmp[len-1] = '\0';
63305+ *name = tmp;
63306+
63307+ return 0;
63308+}
63309+
63310+static int
63311+copy_user_glob(struct acl_object_label *obj)
63312+{
63313+ struct acl_object_label *g_tmp, **guser;
63314+ int error;
63315+
63316+ if (obj->globbed == NULL)
63317+ return 0;
63318+
63319+ guser = &obj->globbed;
63320+ while (*guser) {
63321+ g_tmp = (struct acl_object_label *)
63322+ acl_alloc(sizeof (struct acl_object_label));
63323+ if (g_tmp == NULL)
63324+ return -ENOMEM;
63325+
63326+ if (copy_acl_object_label(g_tmp, *guser))
63327+ return -EFAULT;
63328+
63329+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
63330+ if (error)
63331+ return error;
63332+
63333+ *guser = g_tmp;
63334+ guser = &(g_tmp->next);
63335+ }
63336+
63337+ return 0;
63338+}
63339+
63340+static int
63341+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
63342+ struct acl_role_label *role)
63343+{
63344+ struct acl_object_label *o_tmp;
63345+ int ret;
63346+
63347+ while (userp) {
63348+ if ((o_tmp = (struct acl_object_label *)
63349+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
63350+ return -ENOMEM;
63351+
63352+ if (copy_acl_object_label(o_tmp, userp))
63353+ return -EFAULT;
63354+
63355+ userp = o_tmp->prev;
63356+
63357+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
63358+ if (ret)
63359+ return ret;
63360+
63361+ insert_acl_obj_label(o_tmp, subj);
63362+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
63363+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
63364+ return -ENOMEM;
63365+
63366+ ret = copy_user_glob(o_tmp);
63367+ if (ret)
63368+ return ret;
63369+
63370+ if (o_tmp->nested) {
63371+ int already_copied;
63372+
63373+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
63374+ if (IS_ERR(o_tmp->nested))
63375+ return PTR_ERR(o_tmp->nested);
63376+
63377+ /* insert into nested subject list if we haven't copied this one yet
63378+ to prevent duplicate entries */
63379+ if (!already_copied) {
63380+ o_tmp->nested->next = role->hash->first;
63381+ role->hash->first = o_tmp->nested;
63382+ }
63383+ }
63384+ }
63385+
63386+ return 0;
63387+}
63388+
63389+static __u32
63390+count_user_subjs(struct acl_subject_label *userp)
63391+{
63392+ struct acl_subject_label s_tmp;
63393+ __u32 num = 0;
63394+
63395+ while (userp) {
63396+ if (copy_acl_subject_label(&s_tmp, userp))
63397+ break;
63398+
63399+ userp = s_tmp.prev;
63400+ }
63401+
63402+ return num;
63403+}
63404+
63405+static int
63406+copy_user_allowedips(struct acl_role_label *rolep)
63407+{
63408+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
63409+
63410+ ruserip = rolep->allowed_ips;
63411+
63412+ while (ruserip) {
63413+ rlast = rtmp;
63414+
63415+ if ((rtmp = (struct role_allowed_ip *)
63416+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
63417+ return -ENOMEM;
63418+
63419+ if (copy_role_allowed_ip(rtmp, ruserip))
63420+ return -EFAULT;
63421+
63422+ ruserip = rtmp->prev;
63423+
63424+ if (!rlast) {
63425+ rtmp->prev = NULL;
63426+ rolep->allowed_ips = rtmp;
63427+ } else {
63428+ rlast->next = rtmp;
63429+ rtmp->prev = rlast;
63430+ }
63431+
63432+ if (!ruserip)
63433+ rtmp->next = NULL;
63434+ }
63435+
63436+ return 0;
63437+}
63438+
63439+static int
63440+copy_user_transitions(struct acl_role_label *rolep)
63441+{
63442+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
63443+ int error;
63444+
63445+ rusertp = rolep->transitions;
63446+
63447+ while (rusertp) {
63448+ rlast = rtmp;
63449+
63450+ if ((rtmp = (struct role_transition *)
63451+ acl_alloc(sizeof (struct role_transition))) == NULL)
63452+ return -ENOMEM;
63453+
63454+ if (copy_role_transition(rtmp, rusertp))
63455+ return -EFAULT;
63456+
63457+ rusertp = rtmp->prev;
63458+
63459+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
63460+ if (error)
63461+ return error;
63462+
63463+ if (!rlast) {
63464+ rtmp->prev = NULL;
63465+ rolep->transitions = rtmp;
63466+ } else {
63467+ rlast->next = rtmp;
63468+ rtmp->prev = rlast;
63469+ }
63470+
63471+ if (!rusertp)
63472+ rtmp->next = NULL;
63473+ }
63474+
63475+ return 0;
63476+}
63477+
63478+static __u32 count_user_objs(const struct acl_object_label __user *userp)
63479+{
63480+ struct acl_object_label o_tmp;
63481+ __u32 num = 0;
63482+
63483+ while (userp) {
63484+ if (copy_acl_object_label(&o_tmp, userp))
63485+ break;
63486+
63487+ userp = o_tmp.prev;
63488+ num++;
63489+ }
63490+
63491+ return num;
63492+}
63493+
63494+static struct acl_subject_label *
63495+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
63496+{
63497+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
63498+ __u32 num_objs;
63499+ struct acl_ip_label **i_tmp, *i_utmp2;
63500+ struct gr_hash_struct ghash;
63501+ struct subject_map *subjmap;
63502+ unsigned int i_num;
63503+ int err;
63504+
63505+ if (already_copied != NULL)
63506+ *already_copied = 0;
63507+
63508+ s_tmp = lookup_subject_map(userp);
63509+
63510+ /* we've already copied this subject into the kernel, just return
63511+ the reference to it, and don't copy it over again
63512+ */
63513+ if (s_tmp) {
63514+ if (already_copied != NULL)
63515+ *already_copied = 1;
63516+ return(s_tmp);
63517+ }
63518+
63519+ if ((s_tmp = (struct acl_subject_label *)
63520+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
63521+ return ERR_PTR(-ENOMEM);
63522+
63523+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
63524+ if (subjmap == NULL)
63525+ return ERR_PTR(-ENOMEM);
63526+
63527+ subjmap->user = userp;
63528+ subjmap->kernel = s_tmp;
63529+ insert_subj_map_entry(subjmap);
63530+
63531+ if (copy_acl_subject_label(s_tmp, userp))
63532+ return ERR_PTR(-EFAULT);
63533+
63534+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
63535+ if (err)
63536+ return ERR_PTR(err);
63537+
63538+ if (!strcmp(s_tmp->filename, "/"))
63539+ role->root_label = s_tmp;
63540+
63541+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
63542+ return ERR_PTR(-EFAULT);
63543+
63544+ /* copy user and group transition tables */
63545+
63546+ if (s_tmp->user_trans_num) {
63547+ uid_t *uidlist;
63548+
63549+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
63550+ if (uidlist == NULL)
63551+ return ERR_PTR(-ENOMEM);
63552+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
63553+ return ERR_PTR(-EFAULT);
63554+
63555+ s_tmp->user_transitions = uidlist;
63556+ }
63557+
63558+ if (s_tmp->group_trans_num) {
63559+ gid_t *gidlist;
63560+
63561+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
63562+ if (gidlist == NULL)
63563+ return ERR_PTR(-ENOMEM);
63564+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
63565+ return ERR_PTR(-EFAULT);
63566+
63567+ s_tmp->group_transitions = gidlist;
63568+ }
63569+
63570+ /* set up object hash table */
63571+ num_objs = count_user_objs(ghash.first);
63572+
63573+ s_tmp->obj_hash_size = num_objs;
63574+ s_tmp->obj_hash =
63575+ (struct acl_object_label **)
63576+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
63577+
63578+ if (!s_tmp->obj_hash)
63579+ return ERR_PTR(-ENOMEM);
63580+
63581+ memset(s_tmp->obj_hash, 0,
63582+ s_tmp->obj_hash_size *
63583+ sizeof (struct acl_object_label *));
63584+
63585+ /* add in objects */
63586+ err = copy_user_objs(ghash.first, s_tmp, role);
63587+
63588+ if (err)
63589+ return ERR_PTR(err);
63590+
63591+ /* set pointer for parent subject */
63592+ if (s_tmp->parent_subject) {
63593+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
63594+
63595+ if (IS_ERR(s_tmp2))
63596+ return s_tmp2;
63597+
63598+ s_tmp->parent_subject = s_tmp2;
63599+ }
63600+
63601+ /* add in ip acls */
63602+
63603+ if (!s_tmp->ip_num) {
63604+ s_tmp->ips = NULL;
63605+ goto insert;
63606+ }
63607+
63608+ i_tmp =
63609+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
63610+ sizeof (struct acl_ip_label *));
63611+
63612+ if (!i_tmp)
63613+ return ERR_PTR(-ENOMEM);
63614+
63615+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
63616+ *(i_tmp + i_num) =
63617+ (struct acl_ip_label *)
63618+ acl_alloc(sizeof (struct acl_ip_label));
63619+ if (!*(i_tmp + i_num))
63620+ return ERR_PTR(-ENOMEM);
63621+
63622+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
63623+ return ERR_PTR(-EFAULT);
63624+
63625+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
63626+ return ERR_PTR(-EFAULT);
63627+
63628+ if ((*(i_tmp + i_num))->iface == NULL)
63629+ continue;
63630+
63631+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
63632+ if (err)
63633+ return ERR_PTR(err);
63634+ }
63635+
63636+ s_tmp->ips = i_tmp;
63637+
63638+insert:
63639+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
63640+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
63641+ return ERR_PTR(-ENOMEM);
63642+
63643+ return s_tmp;
63644+}
63645+
63646+static int
63647+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
63648+{
63649+ struct acl_subject_label s_pre;
63650+ struct acl_subject_label * ret;
63651+ int err;
63652+
63653+ while (userp) {
63654+ if (copy_acl_subject_label(&s_pre, userp))
63655+ return -EFAULT;
63656+
63657+ ret = do_copy_user_subj(userp, role, NULL);
63658+
63659+ err = PTR_ERR(ret);
63660+ if (IS_ERR(ret))
63661+ return err;
63662+
63663+ insert_acl_subj_label(ret, role);
63664+
63665+ userp = s_pre.prev;
63666+ }
63667+
63668+ return 0;
63669+}
63670+
63671+static int
63672+copy_user_acl(struct gr_arg *arg)
63673+{
63674+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
63675+ struct acl_subject_label *subj_list;
63676+ struct sprole_pw *sptmp;
63677+ struct gr_hash_struct *ghash;
63678+ uid_t *domainlist;
63679+ unsigned int r_num;
63680+ int err = 0;
63681+ __u16 i;
63682+ __u32 num_subjs;
63683+
63684+ /* we need a default and kernel role */
63685+ if (arg->role_db.num_roles < 2)
63686+ return -EINVAL;
63687+
63688+ /* copy special role authentication info from userspace */
63689+
63690+ num_sprole_pws = arg->num_sprole_pws;
63691+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
63692+
63693+ if (!acl_special_roles && num_sprole_pws)
63694+ return -ENOMEM;
63695+
63696+ for (i = 0; i < num_sprole_pws; i++) {
63697+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
63698+ if (!sptmp)
63699+ return -ENOMEM;
63700+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
63701+ return -EFAULT;
63702+
63703+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
63704+ if (err)
63705+ return err;
63706+
63707+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63708+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
63709+#endif
63710+
63711+ acl_special_roles[i] = sptmp;
63712+ }
63713+
63714+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
63715+
63716+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
63717+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
63718+
63719+ if (!r_tmp)
63720+ return -ENOMEM;
63721+
63722+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
63723+ return -EFAULT;
63724+
63725+ if (copy_acl_role_label(r_tmp, r_utmp2))
63726+ return -EFAULT;
63727+
63728+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
63729+ if (err)
63730+ return err;
63731+
63732+ if (!strcmp(r_tmp->rolename, "default")
63733+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
63734+ default_role = r_tmp;
63735+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
63736+ kernel_role = r_tmp;
63737+ }
63738+
63739+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
63740+ return -ENOMEM;
63741+
63742+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
63743+ return -EFAULT;
63744+
63745+ r_tmp->hash = ghash;
63746+
63747+ num_subjs = count_user_subjs(r_tmp->hash->first);
63748+
63749+ r_tmp->subj_hash_size = num_subjs;
63750+ r_tmp->subj_hash =
63751+ (struct acl_subject_label **)
63752+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
63753+
63754+ if (!r_tmp->subj_hash)
63755+ return -ENOMEM;
63756+
63757+ err = copy_user_allowedips(r_tmp);
63758+ if (err)
63759+ return err;
63760+
63761+ /* copy domain info */
63762+ if (r_tmp->domain_children != NULL) {
63763+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
63764+ if (domainlist == NULL)
63765+ return -ENOMEM;
63766+
63767+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
63768+ return -EFAULT;
63769+
63770+ r_tmp->domain_children = domainlist;
63771+ }
63772+
63773+ err = copy_user_transitions(r_tmp);
63774+ if (err)
63775+ return err;
63776+
63777+ memset(r_tmp->subj_hash, 0,
63778+ r_tmp->subj_hash_size *
63779+ sizeof (struct acl_subject_label *));
63780+
63781+ /* acquire the list of subjects, then NULL out
63782+ the list prior to parsing the subjects for this role,
63783+ as during this parsing the list is replaced with a list
63784+ of *nested* subjects for the role
63785+ */
63786+ subj_list = r_tmp->hash->first;
63787+
63788+ /* set nested subject list to null */
63789+ r_tmp->hash->first = NULL;
63790+
63791+ err = copy_user_subjs(subj_list, r_tmp);
63792+
63793+ if (err)
63794+ return err;
63795+
63796+ insert_acl_role_label(r_tmp);
63797+ }
63798+
63799+ if (default_role == NULL || kernel_role == NULL)
63800+ return -EINVAL;
63801+
63802+ return err;
63803+}
63804+
63805+static int
63806+gracl_init(struct gr_arg *args)
63807+{
63808+ int error = 0;
63809+
63810+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
63811+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
63812+
63813+ if (init_variables(args)) {
63814+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
63815+ error = -ENOMEM;
63816+ free_variables();
63817+ goto out;
63818+ }
63819+
63820+ error = copy_user_acl(args);
63821+ free_init_variables();
63822+ if (error) {
63823+ free_variables();
63824+ goto out;
63825+ }
63826+
63827+ if ((error = gr_set_acls(0))) {
63828+ free_variables();
63829+ goto out;
63830+ }
63831+
63832+ pax_open_kernel();
63833+ gr_status |= GR_READY;
63834+ pax_close_kernel();
63835+
63836+ out:
63837+ return error;
63838+}
63839+
63840+/* derived from glibc fnmatch() 0: match, 1: no match*/
63841+
63842+static int
63843+glob_match(const char *p, const char *n)
63844+{
63845+ char c;
63846+
63847+ while ((c = *p++) != '\0') {
63848+ switch (c) {
63849+ case '?':
63850+ if (*n == '\0')
63851+ return 1;
63852+ else if (*n == '/')
63853+ return 1;
63854+ break;
63855+ case '\\':
63856+ if (*n != c)
63857+ return 1;
63858+ break;
63859+ case '*':
63860+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
63861+ if (*n == '/')
63862+ return 1;
63863+ else if (c == '?') {
63864+ if (*n == '\0')
63865+ return 1;
63866+ else
63867+ ++n;
63868+ }
63869+ }
63870+ if (c == '\0') {
63871+ return 0;
63872+ } else {
63873+ const char *endp;
63874+
63875+ if ((endp = strchr(n, '/')) == NULL)
63876+ endp = n + strlen(n);
63877+
63878+ if (c == '[') {
63879+ for (--p; n < endp; ++n)
63880+ if (!glob_match(p, n))
63881+ return 0;
63882+ } else if (c == '/') {
63883+ while (*n != '\0' && *n != '/')
63884+ ++n;
63885+ if (*n == '/' && !glob_match(p, n + 1))
63886+ return 0;
63887+ } else {
63888+ for (--p; n < endp; ++n)
63889+ if (*n == c && !glob_match(p, n))
63890+ return 0;
63891+ }
63892+
63893+ return 1;
63894+ }
63895+ case '[':
63896+ {
63897+ int not;
63898+ char cold;
63899+
63900+ if (*n == '\0' || *n == '/')
63901+ return 1;
63902+
63903+ not = (*p == '!' || *p == '^');
63904+ if (not)
63905+ ++p;
63906+
63907+ c = *p++;
63908+ for (;;) {
63909+ unsigned char fn = (unsigned char)*n;
63910+
63911+ if (c == '\0')
63912+ return 1;
63913+ else {
63914+ if (c == fn)
63915+ goto matched;
63916+ cold = c;
63917+ c = *p++;
63918+
63919+ if (c == '-' && *p != ']') {
63920+ unsigned char cend = *p++;
63921+
63922+ if (cend == '\0')
63923+ return 1;
63924+
63925+ if (cold <= fn && fn <= cend)
63926+ goto matched;
63927+
63928+ c = *p++;
63929+ }
63930+ }
63931+
63932+ if (c == ']')
63933+ break;
63934+ }
63935+ if (!not)
63936+ return 1;
63937+ break;
63938+ matched:
63939+ while (c != ']') {
63940+ if (c == '\0')
63941+ return 1;
63942+
63943+ c = *p++;
63944+ }
63945+ if (not)
63946+ return 1;
63947+ }
63948+ break;
63949+ default:
63950+ if (c != *n)
63951+ return 1;
63952+ }
63953+
63954+ ++n;
63955+ }
63956+
63957+ if (*n == '\0')
63958+ return 0;
63959+
63960+ if (*n == '/')
63961+ return 0;
63962+
63963+ return 1;
63964+}
63965+
63966+static struct acl_object_label *
63967+chk_glob_label(struct acl_object_label *globbed,
63968+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
63969+{
63970+ struct acl_object_label *tmp;
63971+
63972+ if (*path == NULL)
63973+ *path = gr_to_filename_nolock(dentry, mnt);
63974+
63975+ tmp = globbed;
63976+
63977+ while (tmp) {
63978+ if (!glob_match(tmp->filename, *path))
63979+ return tmp;
63980+ tmp = tmp->next;
63981+ }
63982+
63983+ return NULL;
63984+}
63985+
63986+static struct acl_object_label *
63987+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
63988+ const ino_t curr_ino, const dev_t curr_dev,
63989+ const struct acl_subject_label *subj, char **path, const int checkglob)
63990+{
63991+ struct acl_subject_label *tmpsubj;
63992+ struct acl_object_label *retval;
63993+ struct acl_object_label *retval2;
63994+
63995+ tmpsubj = (struct acl_subject_label *) subj;
63996+ read_lock(&gr_inode_lock);
63997+ do {
63998+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
63999+ if (retval) {
64000+ if (checkglob && retval->globbed) {
64001+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
64002+ if (retval2)
64003+ retval = retval2;
64004+ }
64005+ break;
64006+ }
64007+ } while ((tmpsubj = tmpsubj->parent_subject));
64008+ read_unlock(&gr_inode_lock);
64009+
64010+ return retval;
64011+}
64012+
64013+static __inline__ struct acl_object_label *
64014+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
64015+ struct dentry *curr_dentry,
64016+ const struct acl_subject_label *subj, char **path, const int checkglob)
64017+{
64018+ int newglob = checkglob;
64019+ ino_t inode;
64020+ dev_t device;
64021+
64022+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
64023+ as we don't want a / * rule to match instead of the / object
64024+ don't do this for create lookups that call this function though, since they're looking up
64025+ on the parent and thus need globbing checks on all paths
64026+ */
64027+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
64028+ newglob = GR_NO_GLOB;
64029+
64030+ spin_lock(&curr_dentry->d_lock);
64031+ inode = curr_dentry->d_inode->i_ino;
64032+ device = __get_dev(curr_dentry);
64033+ spin_unlock(&curr_dentry->d_lock);
64034+
64035+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
64036+}
64037+
64038+#ifdef CONFIG_HUGETLBFS
64039+static inline bool
64040+is_hugetlbfs_mnt(const struct vfsmount *mnt)
64041+{
64042+ int i;
64043+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
64044+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
64045+ return true;
64046+ }
64047+
64048+ return false;
64049+}
64050+#endif
64051+
64052+static struct acl_object_label *
64053+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64054+ const struct acl_subject_label *subj, char *path, const int checkglob)
64055+{
64056+ struct dentry *dentry = (struct dentry *) l_dentry;
64057+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64058+ struct mount *real_mnt = real_mount(mnt);
64059+ struct acl_object_label *retval;
64060+ struct dentry *parent;
64061+
64062+ br_read_lock(&vfsmount_lock);
64063+ write_seqlock(&rename_lock);
64064+
64065+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
64066+#ifdef CONFIG_NET
64067+ mnt == sock_mnt ||
64068+#endif
64069+#ifdef CONFIG_HUGETLBFS
64070+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
64071+#endif
64072+ /* ignore Eric Biederman */
64073+ IS_PRIVATE(l_dentry->d_inode))) {
64074+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
64075+ goto out;
64076+ }
64077+
64078+ for (;;) {
64079+ if (dentry == real_root.dentry && mnt == real_root.mnt)
64080+ break;
64081+
64082+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64083+ if (!mnt_has_parent(real_mnt))
64084+ break;
64085+
64086+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64087+ if (retval != NULL)
64088+ goto out;
64089+
64090+ dentry = real_mnt->mnt_mountpoint;
64091+ real_mnt = real_mnt->mnt_parent;
64092+ mnt = &real_mnt->mnt;
64093+ continue;
64094+ }
64095+
64096+ parent = dentry->d_parent;
64097+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64098+ if (retval != NULL)
64099+ goto out;
64100+
64101+ dentry = parent;
64102+ }
64103+
64104+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64105+
64106+ /* real_root is pinned so we don't have to hold a reference */
64107+ if (retval == NULL)
64108+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
64109+out:
64110+ write_sequnlock(&rename_lock);
64111+ br_read_unlock(&vfsmount_lock);
64112+
64113+ BUG_ON(retval == NULL);
64114+
64115+ return retval;
64116+}
64117+
64118+static __inline__ struct acl_object_label *
64119+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64120+ const struct acl_subject_label *subj)
64121+{
64122+ char *path = NULL;
64123+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
64124+}
64125+
64126+static __inline__ struct acl_object_label *
64127+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64128+ const struct acl_subject_label *subj)
64129+{
64130+ char *path = NULL;
64131+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
64132+}
64133+
64134+static __inline__ struct acl_object_label *
64135+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64136+ const struct acl_subject_label *subj, char *path)
64137+{
64138+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
64139+}
64140+
64141+static struct acl_subject_label *
64142+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64143+ const struct acl_role_label *role)
64144+{
64145+ struct dentry *dentry = (struct dentry *) l_dentry;
64146+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64147+ struct mount *real_mnt = real_mount(mnt);
64148+ struct acl_subject_label *retval;
64149+ struct dentry *parent;
64150+
64151+ br_read_lock(&vfsmount_lock);
64152+ write_seqlock(&rename_lock);
64153+
64154+ for (;;) {
64155+ if (dentry == real_root.dentry && mnt == real_root.mnt)
64156+ break;
64157+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64158+ if (!mnt_has_parent(real_mnt))
64159+ break;
64160+
64161+ spin_lock(&dentry->d_lock);
64162+ read_lock(&gr_inode_lock);
64163+ retval =
64164+ lookup_acl_subj_label(dentry->d_inode->i_ino,
64165+ __get_dev(dentry), role);
64166+ read_unlock(&gr_inode_lock);
64167+ spin_unlock(&dentry->d_lock);
64168+ if (retval != NULL)
64169+ goto out;
64170+
64171+ dentry = real_mnt->mnt_mountpoint;
64172+ real_mnt = real_mnt->mnt_parent;
64173+ mnt = &real_mnt->mnt;
64174+ continue;
64175+ }
64176+
64177+ spin_lock(&dentry->d_lock);
64178+ read_lock(&gr_inode_lock);
64179+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64180+ __get_dev(dentry), role);
64181+ read_unlock(&gr_inode_lock);
64182+ parent = dentry->d_parent;
64183+ spin_unlock(&dentry->d_lock);
64184+
64185+ if (retval != NULL)
64186+ goto out;
64187+
64188+ dentry = parent;
64189+ }
64190+
64191+ spin_lock(&dentry->d_lock);
64192+ read_lock(&gr_inode_lock);
64193+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64194+ __get_dev(dentry), role);
64195+ read_unlock(&gr_inode_lock);
64196+ spin_unlock(&dentry->d_lock);
64197+
64198+ if (unlikely(retval == NULL)) {
64199+ /* real_root is pinned, we don't need to hold a reference */
64200+ read_lock(&gr_inode_lock);
64201+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
64202+ __get_dev(real_root.dentry), role);
64203+ read_unlock(&gr_inode_lock);
64204+ }
64205+out:
64206+ write_sequnlock(&rename_lock);
64207+ br_read_unlock(&vfsmount_lock);
64208+
64209+ BUG_ON(retval == NULL);
64210+
64211+ return retval;
64212+}
64213+
64214+static void
64215+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
64216+{
64217+ struct task_struct *task = current;
64218+ const struct cred *cred = current_cred();
64219+
64220+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
64221+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64222+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64223+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
64224+
64225+ return;
64226+}
64227+
64228+static void
64229+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
64230+{
64231+ struct task_struct *task = current;
64232+ const struct cred *cred = current_cred();
64233+
64234+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64235+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64236+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64237+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
64238+
64239+ return;
64240+}
64241+
64242+static void
64243+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
64244+{
64245+ struct task_struct *task = current;
64246+ const struct cred *cred = current_cred();
64247+
64248+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64249+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64250+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64251+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
64252+
64253+ return;
64254+}
64255+
64256+__u32
64257+gr_search_file(const struct dentry * dentry, const __u32 mode,
64258+ const struct vfsmount * mnt)
64259+{
64260+ __u32 retval = mode;
64261+ struct acl_subject_label *curracl;
64262+ struct acl_object_label *currobj;
64263+
64264+ if (unlikely(!(gr_status & GR_READY)))
64265+ return (mode & ~GR_AUDITS);
64266+
64267+ curracl = current->acl;
64268+
64269+ currobj = chk_obj_label(dentry, mnt, curracl);
64270+ retval = currobj->mode & mode;
64271+
64272+ /* if we're opening a specified transfer file for writing
64273+ (e.g. /dev/initctl), then transfer our role to init
64274+ */
64275+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
64276+ current->role->roletype & GR_ROLE_PERSIST)) {
64277+ struct task_struct *task = init_pid_ns.child_reaper;
64278+
64279+ if (task->role != current->role) {
64280+ task->acl_sp_role = 0;
64281+ task->acl_role_id = current->acl_role_id;
64282+ task->role = current->role;
64283+ rcu_read_lock();
64284+ read_lock(&grsec_exec_file_lock);
64285+ gr_apply_subject_to_task(task);
64286+ read_unlock(&grsec_exec_file_lock);
64287+ rcu_read_unlock();
64288+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
64289+ }
64290+ }
64291+
64292+ if (unlikely
64293+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
64294+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
64295+ __u32 new_mode = mode;
64296+
64297+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
64298+
64299+ retval = new_mode;
64300+
64301+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
64302+ new_mode |= GR_INHERIT;
64303+
64304+ if (!(mode & GR_NOLEARN))
64305+ gr_log_learn(dentry, mnt, new_mode);
64306+ }
64307+
64308+ return retval;
64309+}
64310+
64311+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
64312+ const struct dentry *parent,
64313+ const struct vfsmount *mnt)
64314+{
64315+ struct name_entry *match;
64316+ struct acl_object_label *matchpo;
64317+ struct acl_subject_label *curracl;
64318+ char *path;
64319+
64320+ if (unlikely(!(gr_status & GR_READY)))
64321+ return NULL;
64322+
64323+ preempt_disable();
64324+ path = gr_to_filename_rbac(new_dentry, mnt);
64325+ match = lookup_name_entry_create(path);
64326+
64327+ curracl = current->acl;
64328+
64329+ if (match) {
64330+ read_lock(&gr_inode_lock);
64331+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
64332+ read_unlock(&gr_inode_lock);
64333+
64334+ if (matchpo) {
64335+ preempt_enable();
64336+ return matchpo;
64337+ }
64338+ }
64339+
64340+ // lookup parent
64341+
64342+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
64343+
64344+ preempt_enable();
64345+ return matchpo;
64346+}
64347+
64348+__u32
64349+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
64350+ const struct vfsmount * mnt, const __u32 mode)
64351+{
64352+ struct acl_object_label *matchpo;
64353+ __u32 retval;
64354+
64355+ if (unlikely(!(gr_status & GR_READY)))
64356+ return (mode & ~GR_AUDITS);
64357+
64358+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
64359+
64360+ retval = matchpo->mode & mode;
64361+
64362+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
64363+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
64364+ __u32 new_mode = mode;
64365+
64366+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
64367+
64368+ gr_log_learn(new_dentry, mnt, new_mode);
64369+ return new_mode;
64370+ }
64371+
64372+ return retval;
64373+}
64374+
64375+__u32
64376+gr_check_link(const struct dentry * new_dentry,
64377+ const struct dentry * parent_dentry,
64378+ const struct vfsmount * parent_mnt,
64379+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
64380+{
64381+ struct acl_object_label *obj;
64382+ __u32 oldmode, newmode;
64383+ __u32 needmode;
64384+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
64385+ GR_DELETE | GR_INHERIT;
64386+
64387+ if (unlikely(!(gr_status & GR_READY)))
64388+ return (GR_CREATE | GR_LINK);
64389+
64390+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
64391+ oldmode = obj->mode;
64392+
64393+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
64394+ newmode = obj->mode;
64395+
64396+ needmode = newmode & checkmodes;
64397+
64398+ // old name for hardlink must have at least the permissions of the new name
64399+ if ((oldmode & needmode) != needmode)
64400+ goto bad;
64401+
64402+ // if old name had restrictions/auditing, make sure the new name does as well
64403+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
64404+
64405+ // don't allow hardlinking of suid/sgid/fcapped files without permission
64406+ if (is_privileged_binary(old_dentry))
64407+ needmode |= GR_SETID;
64408+
64409+ if ((newmode & needmode) != needmode)
64410+ goto bad;
64411+
64412+ // enforce minimum permissions
64413+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
64414+ return newmode;
64415+bad:
64416+ needmode = oldmode;
64417+ if (is_privileged_binary(old_dentry))
64418+ needmode |= GR_SETID;
64419+
64420+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
64421+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
64422+ return (GR_CREATE | GR_LINK);
64423+ } else if (newmode & GR_SUPPRESS)
64424+ return GR_SUPPRESS;
64425+ else
64426+ return 0;
64427+}
64428+
64429+int
64430+gr_check_hidden_task(const struct task_struct *task)
64431+{
64432+ if (unlikely(!(gr_status & GR_READY)))
64433+ return 0;
64434+
64435+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
64436+ return 1;
64437+
64438+ return 0;
64439+}
64440+
64441+int
64442+gr_check_protected_task(const struct task_struct *task)
64443+{
64444+ if (unlikely(!(gr_status & GR_READY) || !task))
64445+ return 0;
64446+
64447+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
64448+ task->acl != current->acl)
64449+ return 1;
64450+
64451+ return 0;
64452+}
64453+
64454+int
64455+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64456+{
64457+ struct task_struct *p;
64458+ int ret = 0;
64459+
64460+ if (unlikely(!(gr_status & GR_READY) || !pid))
64461+ return ret;
64462+
64463+ read_lock(&tasklist_lock);
64464+ do_each_pid_task(pid, type, p) {
64465+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
64466+ p->acl != current->acl) {
64467+ ret = 1;
64468+ goto out;
64469+ }
64470+ } while_each_pid_task(pid, type, p);
64471+out:
64472+ read_unlock(&tasklist_lock);
64473+
64474+ return ret;
64475+}
64476+
64477+void
64478+gr_copy_label(struct task_struct *tsk)
64479+{
64480+ tsk->signal->used_accept = 0;
64481+ tsk->acl_sp_role = 0;
64482+ tsk->acl_role_id = current->acl_role_id;
64483+ tsk->acl = current->acl;
64484+ tsk->role = current->role;
64485+ tsk->signal->curr_ip = current->signal->curr_ip;
64486+ tsk->signal->saved_ip = current->signal->saved_ip;
64487+ if (current->exec_file)
64488+ get_file(current->exec_file);
64489+ tsk->exec_file = current->exec_file;
64490+ tsk->is_writable = current->is_writable;
64491+ if (unlikely(current->signal->used_accept)) {
64492+ current->signal->curr_ip = 0;
64493+ current->signal->saved_ip = 0;
64494+ }
64495+
64496+ return;
64497+}
64498+
64499+static void
64500+gr_set_proc_res(struct task_struct *task)
64501+{
64502+ struct acl_subject_label *proc;
64503+ unsigned short i;
64504+
64505+ proc = task->acl;
64506+
64507+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
64508+ return;
64509+
64510+ for (i = 0; i < RLIM_NLIMITS; i++) {
64511+ if (!(proc->resmask & (1U << i)))
64512+ continue;
64513+
64514+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
64515+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
64516+
64517+ if (i == RLIMIT_CPU)
64518+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
64519+ }
64520+
64521+ return;
64522+}
64523+
64524+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
64525+
64526+int
64527+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64528+{
64529+ unsigned int i;
64530+ __u16 num;
64531+ uid_t *uidlist;
64532+ uid_t curuid;
64533+ int realok = 0;
64534+ int effectiveok = 0;
64535+ int fsok = 0;
64536+ uid_t globalreal, globaleffective, globalfs;
64537+
64538+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
64539+ struct user_struct *user;
64540+
64541+ if (!uid_valid(real))
64542+ goto skipit;
64543+
64544+ /* find user based on global namespace */
64545+
64546+ globalreal = GR_GLOBAL_UID(real);
64547+
64548+ user = find_user(make_kuid(&init_user_ns, globalreal));
64549+ if (user == NULL)
64550+ goto skipit;
64551+
64552+ if (gr_process_kernel_setuid_ban(user)) {
64553+ /* for find_user */
64554+ free_uid(user);
64555+ return 1;
64556+ }
64557+
64558+ /* for find_user */
64559+ free_uid(user);
64560+
64561+skipit:
64562+#endif
64563+
64564+ if (unlikely(!(gr_status & GR_READY)))
64565+ return 0;
64566+
64567+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
64568+ gr_log_learn_uid_change(real, effective, fs);
64569+
64570+ num = current->acl->user_trans_num;
64571+ uidlist = current->acl->user_transitions;
64572+
64573+ if (uidlist == NULL)
64574+ return 0;
64575+
64576+ if (!uid_valid(real)) {
64577+ realok = 1;
64578+ globalreal = (uid_t)-1;
64579+ } else {
64580+ globalreal = GR_GLOBAL_UID(real);
64581+ }
64582+ if (!uid_valid(effective)) {
64583+ effectiveok = 1;
64584+ globaleffective = (uid_t)-1;
64585+ } else {
64586+ globaleffective = GR_GLOBAL_UID(effective);
64587+ }
64588+ if (!uid_valid(fs)) {
64589+ fsok = 1;
64590+ globalfs = (uid_t)-1;
64591+ } else {
64592+ globalfs = GR_GLOBAL_UID(fs);
64593+ }
64594+
64595+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
64596+ for (i = 0; i < num; i++) {
64597+ curuid = uidlist[i];
64598+ if (globalreal == curuid)
64599+ realok = 1;
64600+ if (globaleffective == curuid)
64601+ effectiveok = 1;
64602+ if (globalfs == curuid)
64603+ fsok = 1;
64604+ }
64605+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
64606+ for (i = 0; i < num; i++) {
64607+ curuid = uidlist[i];
64608+ if (globalreal == curuid)
64609+ break;
64610+ if (globaleffective == curuid)
64611+ break;
64612+ if (globalfs == curuid)
64613+ break;
64614+ }
64615+ /* not in deny list */
64616+ if (i == num) {
64617+ realok = 1;
64618+ effectiveok = 1;
64619+ fsok = 1;
64620+ }
64621+ }
64622+
64623+ if (realok && effectiveok && fsok)
64624+ return 0;
64625+ else {
64626+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
64627+ return 1;
64628+ }
64629+}
64630+
64631+int
64632+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64633+{
64634+ unsigned int i;
64635+ __u16 num;
64636+ gid_t *gidlist;
64637+ gid_t curgid;
64638+ int realok = 0;
64639+ int effectiveok = 0;
64640+ int fsok = 0;
64641+ gid_t globalreal, globaleffective, globalfs;
64642+
64643+ if (unlikely(!(gr_status & GR_READY)))
64644+ return 0;
64645+
64646+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
64647+ gr_log_learn_gid_change(real, effective, fs);
64648+
64649+ num = current->acl->group_trans_num;
64650+ gidlist = current->acl->group_transitions;
64651+
64652+ if (gidlist == NULL)
64653+ return 0;
64654+
64655+ if (!gid_valid(real)) {
64656+ realok = 1;
64657+ globalreal = (gid_t)-1;
64658+ } else {
64659+ globalreal = GR_GLOBAL_GID(real);
64660+ }
64661+ if (!gid_valid(effective)) {
64662+ effectiveok = 1;
64663+ globaleffective = (gid_t)-1;
64664+ } else {
64665+ globaleffective = GR_GLOBAL_GID(effective);
64666+ }
64667+ if (!gid_valid(fs)) {
64668+ fsok = 1;
64669+ globalfs = (gid_t)-1;
64670+ } else {
64671+ globalfs = GR_GLOBAL_GID(fs);
64672+ }
64673+
64674+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
64675+ for (i = 0; i < num; i++) {
64676+ curgid = gidlist[i];
64677+ if (globalreal == curgid)
64678+ realok = 1;
64679+ if (globaleffective == curgid)
64680+ effectiveok = 1;
64681+ if (globalfs == curgid)
64682+ fsok = 1;
64683+ }
64684+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
64685+ for (i = 0; i < num; i++) {
64686+ curgid = gidlist[i];
64687+ if (globalreal == curgid)
64688+ break;
64689+ if (globaleffective == curgid)
64690+ break;
64691+ if (globalfs == curgid)
64692+ break;
64693+ }
64694+ /* not in deny list */
64695+ if (i == num) {
64696+ realok = 1;
64697+ effectiveok = 1;
64698+ fsok = 1;
64699+ }
64700+ }
64701+
64702+ if (realok && effectiveok && fsok)
64703+ return 0;
64704+ else {
64705+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
64706+ return 1;
64707+ }
64708+}
64709+
64710+extern int gr_acl_is_capable(const int cap);
64711+
64712+void
64713+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
64714+{
64715+ struct acl_role_label *role = task->role;
64716+ struct acl_subject_label *subj = NULL;
64717+ struct acl_object_label *obj;
64718+ struct file *filp;
64719+ uid_t uid;
64720+ gid_t gid;
64721+
64722+ if (unlikely(!(gr_status & GR_READY)))
64723+ return;
64724+
64725+ uid = GR_GLOBAL_UID(kuid);
64726+ gid = GR_GLOBAL_GID(kgid);
64727+
64728+ filp = task->exec_file;
64729+
64730+ /* kernel process, we'll give them the kernel role */
64731+ if (unlikely(!filp)) {
64732+ task->role = kernel_role;
64733+ task->acl = kernel_role->root_label;
64734+ return;
64735+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
64736+ role = lookup_acl_role_label(task, uid, gid);
64737+
64738+ /* don't change the role if we're not a privileged process */
64739+ if (role && task->role != role &&
64740+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
64741+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
64742+ return;
64743+
64744+ /* perform subject lookup in possibly new role
64745+ we can use this result below in the case where role == task->role
64746+ */
64747+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
64748+
64749+ /* if we changed uid/gid, but result in the same role
64750+ and are using inheritance, don't lose the inherited subject
64751+ if current subject is other than what normal lookup
64752+ would result in, we arrived via inheritance, don't
64753+ lose subject
64754+ */
64755+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
64756+ (subj == task->acl)))
64757+ task->acl = subj;
64758+
64759+ task->role = role;
64760+
64761+ task->is_writable = 0;
64762+
64763+ /* ignore additional mmap checks for processes that are writable
64764+ by the default ACL */
64765+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
64766+ if (unlikely(obj->mode & GR_WRITE))
64767+ task->is_writable = 1;
64768+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
64769+ if (unlikely(obj->mode & GR_WRITE))
64770+ task->is_writable = 1;
64771+
64772+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64773+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64774+#endif
64775+
64776+ gr_set_proc_res(task);
64777+
64778+ return;
64779+}
64780+
64781+int
64782+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64783+ const int unsafe_flags)
64784+{
64785+ struct task_struct *task = current;
64786+ struct acl_subject_label *newacl;
64787+ struct acl_object_label *obj;
64788+ __u32 retmode;
64789+
64790+ if (unlikely(!(gr_status & GR_READY)))
64791+ return 0;
64792+
64793+ newacl = chk_subj_label(dentry, mnt, task->role);
64794+
64795+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
64796+ did an exec
64797+ */
64798+ rcu_read_lock();
64799+ read_lock(&tasklist_lock);
64800+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
64801+ (task->parent->acl->mode & GR_POVERRIDE))) {
64802+ read_unlock(&tasklist_lock);
64803+ rcu_read_unlock();
64804+ goto skip_check;
64805+ }
64806+ read_unlock(&tasklist_lock);
64807+ rcu_read_unlock();
64808+
64809+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
64810+ !(task->role->roletype & GR_ROLE_GOD) &&
64811+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
64812+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
64813+ if (unsafe_flags & LSM_UNSAFE_SHARE)
64814+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
64815+ else
64816+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
64817+ return -EACCES;
64818+ }
64819+
64820+skip_check:
64821+
64822+ obj = chk_obj_label(dentry, mnt, task->acl);
64823+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
64824+
64825+ if (!(task->acl->mode & GR_INHERITLEARN) &&
64826+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
64827+ if (obj->nested)
64828+ task->acl = obj->nested;
64829+ else
64830+ task->acl = newacl;
64831+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
64832+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
64833+
64834+ task->is_writable = 0;
64835+
64836+ /* ignore additional mmap checks for processes that are writable
64837+ by the default ACL */
64838+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
64839+ if (unlikely(obj->mode & GR_WRITE))
64840+ task->is_writable = 1;
64841+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
64842+ if (unlikely(obj->mode & GR_WRITE))
64843+ task->is_writable = 1;
64844+
64845+ gr_set_proc_res(task);
64846+
64847+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64848+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64849+#endif
64850+ return 0;
64851+}
64852+
64853+/* always called with valid inodev ptr */
64854+static void
64855+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
64856+{
64857+ struct acl_object_label *matchpo;
64858+ struct acl_subject_label *matchps;
64859+ struct acl_subject_label *subj;
64860+ struct acl_role_label *role;
64861+ unsigned int x;
64862+
64863+ FOR_EACH_ROLE_START(role)
64864+ FOR_EACH_SUBJECT_START(role, subj, x)
64865+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
64866+ matchpo->mode |= GR_DELETED;
64867+ FOR_EACH_SUBJECT_END(subj,x)
64868+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
64869+ /* nested subjects aren't in the role's subj_hash table */
64870+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
64871+ matchpo->mode |= GR_DELETED;
64872+ FOR_EACH_NESTED_SUBJECT_END(subj)
64873+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
64874+ matchps->mode |= GR_DELETED;
64875+ FOR_EACH_ROLE_END(role)
64876+
64877+ inodev->nentry->deleted = 1;
64878+
64879+ return;
64880+}
64881+
64882+void
64883+gr_handle_delete(const ino_t ino, const dev_t dev)
64884+{
64885+ struct inodev_entry *inodev;
64886+
64887+ if (unlikely(!(gr_status & GR_READY)))
64888+ return;
64889+
64890+ write_lock(&gr_inode_lock);
64891+ inodev = lookup_inodev_entry(ino, dev);
64892+ if (inodev != NULL)
64893+ do_handle_delete(inodev, ino, dev);
64894+ write_unlock(&gr_inode_lock);
64895+
64896+ return;
64897+}
64898+
64899+static void
64900+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
64901+ const ino_t newinode, const dev_t newdevice,
64902+ struct acl_subject_label *subj)
64903+{
64904+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
64905+ struct acl_object_label *match;
64906+
64907+ match = subj->obj_hash[index];
64908+
64909+ while (match && (match->inode != oldinode ||
64910+ match->device != olddevice ||
64911+ !(match->mode & GR_DELETED)))
64912+ match = match->next;
64913+
64914+ if (match && (match->inode == oldinode)
64915+ && (match->device == olddevice)
64916+ && (match->mode & GR_DELETED)) {
64917+ if (match->prev == NULL) {
64918+ subj->obj_hash[index] = match->next;
64919+ if (match->next != NULL)
64920+ match->next->prev = NULL;
64921+ } else {
64922+ match->prev->next = match->next;
64923+ if (match->next != NULL)
64924+ match->next->prev = match->prev;
64925+ }
64926+ match->prev = NULL;
64927+ match->next = NULL;
64928+ match->inode = newinode;
64929+ match->device = newdevice;
64930+ match->mode &= ~GR_DELETED;
64931+
64932+ insert_acl_obj_label(match, subj);
64933+ }
64934+
64935+ return;
64936+}
64937+
64938+static void
64939+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
64940+ const ino_t newinode, const dev_t newdevice,
64941+ struct acl_role_label *role)
64942+{
64943+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
64944+ struct acl_subject_label *match;
64945+
64946+ match = role->subj_hash[index];
64947+
64948+ while (match && (match->inode != oldinode ||
64949+ match->device != olddevice ||
64950+ !(match->mode & GR_DELETED)))
64951+ match = match->next;
64952+
64953+ if (match && (match->inode == oldinode)
64954+ && (match->device == olddevice)
64955+ && (match->mode & GR_DELETED)) {
64956+ if (match->prev == NULL) {
64957+ role->subj_hash[index] = match->next;
64958+ if (match->next != NULL)
64959+ match->next->prev = NULL;
64960+ } else {
64961+ match->prev->next = match->next;
64962+ if (match->next != NULL)
64963+ match->next->prev = match->prev;
64964+ }
64965+ match->prev = NULL;
64966+ match->next = NULL;
64967+ match->inode = newinode;
64968+ match->device = newdevice;
64969+ match->mode &= ~GR_DELETED;
64970+
64971+ insert_acl_subj_label(match, role);
64972+ }
64973+
64974+ return;
64975+}
64976+
64977+static void
64978+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
64979+ const ino_t newinode, const dev_t newdevice)
64980+{
64981+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
64982+ struct inodev_entry *match;
64983+
64984+ match = inodev_set.i_hash[index];
64985+
64986+ while (match && (match->nentry->inode != oldinode ||
64987+ match->nentry->device != olddevice || !match->nentry->deleted))
64988+ match = match->next;
64989+
64990+ if (match && (match->nentry->inode == oldinode)
64991+ && (match->nentry->device == olddevice) &&
64992+ match->nentry->deleted) {
64993+ if (match->prev == NULL) {
64994+ inodev_set.i_hash[index] = match->next;
64995+ if (match->next != NULL)
64996+ match->next->prev = NULL;
64997+ } else {
64998+ match->prev->next = match->next;
64999+ if (match->next != NULL)
65000+ match->next->prev = match->prev;
65001+ }
65002+ match->prev = NULL;
65003+ match->next = NULL;
65004+ match->nentry->inode = newinode;
65005+ match->nentry->device = newdevice;
65006+ match->nentry->deleted = 0;
65007+
65008+ insert_inodev_entry(match);
65009+ }
65010+
65011+ return;
65012+}
65013+
65014+static void
65015+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
65016+{
65017+ struct acl_subject_label *subj;
65018+ struct acl_role_label *role;
65019+ unsigned int x;
65020+
65021+ FOR_EACH_ROLE_START(role)
65022+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
65023+
65024+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
65025+ if ((subj->inode == ino) && (subj->device == dev)) {
65026+ subj->inode = ino;
65027+ subj->device = dev;
65028+ }
65029+ /* nested subjects aren't in the role's subj_hash table */
65030+ update_acl_obj_label(matchn->inode, matchn->device,
65031+ ino, dev, subj);
65032+ FOR_EACH_NESTED_SUBJECT_END(subj)
65033+ FOR_EACH_SUBJECT_START(role, subj, x)
65034+ update_acl_obj_label(matchn->inode, matchn->device,
65035+ ino, dev, subj);
65036+ FOR_EACH_SUBJECT_END(subj,x)
65037+ FOR_EACH_ROLE_END(role)
65038+
65039+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
65040+
65041+ return;
65042+}
65043+
65044+static void
65045+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
65046+ const struct vfsmount *mnt)
65047+{
65048+ ino_t ino = dentry->d_inode->i_ino;
65049+ dev_t dev = __get_dev(dentry);
65050+
65051+ __do_handle_create(matchn, ino, dev);
65052+
65053+ return;
65054+}
65055+
65056+void
65057+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
65058+{
65059+ struct name_entry *matchn;
65060+
65061+ if (unlikely(!(gr_status & GR_READY)))
65062+ return;
65063+
65064+ preempt_disable();
65065+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
65066+
65067+ if (unlikely((unsigned long)matchn)) {
65068+ write_lock(&gr_inode_lock);
65069+ do_handle_create(matchn, dentry, mnt);
65070+ write_unlock(&gr_inode_lock);
65071+ }
65072+ preempt_enable();
65073+
65074+ return;
65075+}
65076+
65077+void
65078+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
65079+{
65080+ struct name_entry *matchn;
65081+
65082+ if (unlikely(!(gr_status & GR_READY)))
65083+ return;
65084+
65085+ preempt_disable();
65086+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
65087+
65088+ if (unlikely((unsigned long)matchn)) {
65089+ write_lock(&gr_inode_lock);
65090+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
65091+ write_unlock(&gr_inode_lock);
65092+ }
65093+ preempt_enable();
65094+
65095+ return;
65096+}
65097+
65098+void
65099+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
65100+ struct dentry *old_dentry,
65101+ struct dentry *new_dentry,
65102+ struct vfsmount *mnt, const __u8 replace)
65103+{
65104+ struct name_entry *matchn;
65105+ struct inodev_entry *inodev;
65106+ struct inode *inode = new_dentry->d_inode;
65107+ ino_t old_ino = old_dentry->d_inode->i_ino;
65108+ dev_t old_dev = __get_dev(old_dentry);
65109+
65110+ /* vfs_rename swaps the name and parent link for old_dentry and
65111+ new_dentry
65112+ at this point, old_dentry has the new name, parent link, and inode
65113+ for the renamed file
65114+ if a file is being replaced by a rename, new_dentry has the inode
65115+ and name for the replaced file
65116+ */
65117+
65118+ if (unlikely(!(gr_status & GR_READY)))
65119+ return;
65120+
65121+ preempt_disable();
65122+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
65123+
65124+ /* we wouldn't have to check d_inode if it weren't for
65125+ NFS silly-renaming
65126+ */
65127+
65128+ write_lock(&gr_inode_lock);
65129+ if (unlikely(replace && inode)) {
65130+ ino_t new_ino = inode->i_ino;
65131+ dev_t new_dev = __get_dev(new_dentry);
65132+
65133+ inodev = lookup_inodev_entry(new_ino, new_dev);
65134+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
65135+ do_handle_delete(inodev, new_ino, new_dev);
65136+ }
65137+
65138+ inodev = lookup_inodev_entry(old_ino, old_dev);
65139+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
65140+ do_handle_delete(inodev, old_ino, old_dev);
65141+
65142+ if (unlikely((unsigned long)matchn))
65143+ do_handle_create(matchn, old_dentry, mnt);
65144+
65145+ write_unlock(&gr_inode_lock);
65146+ preempt_enable();
65147+
65148+ return;
65149+}
65150+
65151+static int
65152+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
65153+ unsigned char **sum)
65154+{
65155+ struct acl_role_label *r;
65156+ struct role_allowed_ip *ipp;
65157+ struct role_transition *trans;
65158+ unsigned int i;
65159+ int found = 0;
65160+ u32 curr_ip = current->signal->curr_ip;
65161+
65162+ current->signal->saved_ip = curr_ip;
65163+
65164+ /* check transition table */
65165+
65166+ for (trans = current->role->transitions; trans; trans = trans->next) {
65167+ if (!strcmp(rolename, trans->rolename)) {
65168+ found = 1;
65169+ break;
65170+ }
65171+ }
65172+
65173+ if (!found)
65174+ return 0;
65175+
65176+ /* handle special roles that do not require authentication
65177+ and check ip */
65178+
65179+ FOR_EACH_ROLE_START(r)
65180+ if (!strcmp(rolename, r->rolename) &&
65181+ (r->roletype & GR_ROLE_SPECIAL)) {
65182+ found = 0;
65183+ if (r->allowed_ips != NULL) {
65184+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
65185+ if ((ntohl(curr_ip) & ipp->netmask) ==
65186+ (ntohl(ipp->addr) & ipp->netmask))
65187+ found = 1;
65188+ }
65189+ } else
65190+ found = 2;
65191+ if (!found)
65192+ return 0;
65193+
65194+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
65195+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
65196+ *salt = NULL;
65197+ *sum = NULL;
65198+ return 1;
65199+ }
65200+ }
65201+ FOR_EACH_ROLE_END(r)
65202+
65203+ for (i = 0; i < num_sprole_pws; i++) {
65204+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
65205+ *salt = acl_special_roles[i]->salt;
65206+ *sum = acl_special_roles[i]->sum;
65207+ return 1;
65208+ }
65209+ }
65210+
65211+ return 0;
65212+}
65213+
65214+static void
65215+assign_special_role(char *rolename)
65216+{
65217+ struct acl_object_label *obj;
65218+ struct acl_role_label *r;
65219+ struct acl_role_label *assigned = NULL;
65220+ struct task_struct *tsk;
65221+ struct file *filp;
65222+
65223+ FOR_EACH_ROLE_START(r)
65224+ if (!strcmp(rolename, r->rolename) &&
65225+ (r->roletype & GR_ROLE_SPECIAL)) {
65226+ assigned = r;
65227+ break;
65228+ }
65229+ FOR_EACH_ROLE_END(r)
65230+
65231+ if (!assigned)
65232+ return;
65233+
65234+ read_lock(&tasklist_lock);
65235+ read_lock(&grsec_exec_file_lock);
65236+
65237+ tsk = current->real_parent;
65238+ if (tsk == NULL)
65239+ goto out_unlock;
65240+
65241+ filp = tsk->exec_file;
65242+ if (filp == NULL)
65243+ goto out_unlock;
65244+
65245+ tsk->is_writable = 0;
65246+
65247+ tsk->acl_sp_role = 1;
65248+ tsk->acl_role_id = ++acl_sp_role_value;
65249+ tsk->role = assigned;
65250+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
65251+
65252+ /* ignore additional mmap checks for processes that are writable
65253+ by the default ACL */
65254+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
65255+ if (unlikely(obj->mode & GR_WRITE))
65256+ tsk->is_writable = 1;
65257+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
65258+ if (unlikely(obj->mode & GR_WRITE))
65259+ tsk->is_writable = 1;
65260+
65261+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65262+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
65263+#endif
65264+
65265+out_unlock:
65266+ read_unlock(&grsec_exec_file_lock);
65267+ read_unlock(&tasklist_lock);
65268+ return;
65269+}
65270+
65271+int gr_check_secure_terminal(struct task_struct *task)
65272+{
65273+ struct task_struct *p, *p2, *p3;
65274+ struct files_struct *files;
65275+ struct fdtable *fdt;
65276+ struct file *our_file = NULL, *file;
65277+ int i;
65278+
65279+ if (task->signal->tty == NULL)
65280+ return 1;
65281+
65282+ files = get_files_struct(task);
65283+ if (files != NULL) {
65284+ rcu_read_lock();
65285+ fdt = files_fdtable(files);
65286+ for (i=0; i < fdt->max_fds; i++) {
65287+ file = fcheck_files(files, i);
65288+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
65289+ get_file(file);
65290+ our_file = file;
65291+ }
65292+ }
65293+ rcu_read_unlock();
65294+ put_files_struct(files);
65295+ }
65296+
65297+ if (our_file == NULL)
65298+ return 1;
65299+
65300+ read_lock(&tasklist_lock);
65301+ do_each_thread(p2, p) {
65302+ files = get_files_struct(p);
65303+ if (files == NULL ||
65304+ (p->signal && p->signal->tty == task->signal->tty)) {
65305+ if (files != NULL)
65306+ put_files_struct(files);
65307+ continue;
65308+ }
65309+ rcu_read_lock();
65310+ fdt = files_fdtable(files);
65311+ for (i=0; i < fdt->max_fds; i++) {
65312+ file = fcheck_files(files, i);
65313+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
65314+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
65315+ p3 = task;
65316+ while (task_pid_nr(p3) > 0) {
65317+ if (p3 == p)
65318+ break;
65319+ p3 = p3->real_parent;
65320+ }
65321+ if (p3 == p)
65322+ break;
65323+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
65324+ gr_handle_alertkill(p);
65325+ rcu_read_unlock();
65326+ put_files_struct(files);
65327+ read_unlock(&tasklist_lock);
65328+ fput(our_file);
65329+ return 0;
65330+ }
65331+ }
65332+ rcu_read_unlock();
65333+ put_files_struct(files);
65334+ } while_each_thread(p2, p);
65335+ read_unlock(&tasklist_lock);
65336+
65337+ fput(our_file);
65338+ return 1;
65339+}
65340+
65341+static int gr_rbac_disable(void *unused)
65342+{
65343+ pax_open_kernel();
65344+ gr_status &= ~GR_READY;
65345+ pax_close_kernel();
65346+
65347+ return 0;
65348+}
65349+
65350+ssize_t
65351+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
65352+{
65353+ struct gr_arg_wrapper uwrap;
65354+ unsigned char *sprole_salt = NULL;
65355+ unsigned char *sprole_sum = NULL;
65356+ int error = 0;
65357+ int error2 = 0;
65358+ size_t req_count = 0;
65359+
65360+ mutex_lock(&gr_dev_mutex);
65361+
65362+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
65363+ error = -EPERM;
65364+ goto out;
65365+ }
65366+
65367+#ifdef CONFIG_COMPAT
65368+ pax_open_kernel();
65369+ if (is_compat_task()) {
65370+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
65371+ copy_gr_arg = &copy_gr_arg_compat;
65372+ copy_acl_object_label = &copy_acl_object_label_compat;
65373+ copy_acl_subject_label = &copy_acl_subject_label_compat;
65374+ copy_acl_role_label = &copy_acl_role_label_compat;
65375+ copy_acl_ip_label = &copy_acl_ip_label_compat;
65376+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
65377+ copy_role_transition = &copy_role_transition_compat;
65378+ copy_sprole_pw = &copy_sprole_pw_compat;
65379+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
65380+ copy_pointer_from_array = &copy_pointer_from_array_compat;
65381+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
65382+ } else {
65383+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
65384+ copy_gr_arg = &copy_gr_arg_normal;
65385+ copy_acl_object_label = &copy_acl_object_label_normal;
65386+ copy_acl_subject_label = &copy_acl_subject_label_normal;
65387+ copy_acl_role_label = &copy_acl_role_label_normal;
65388+ copy_acl_ip_label = &copy_acl_ip_label_normal;
65389+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
65390+ copy_role_transition = &copy_role_transition_normal;
65391+ copy_sprole_pw = &copy_sprole_pw_normal;
65392+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
65393+ copy_pointer_from_array = &copy_pointer_from_array_normal;
65394+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
65395+ }
65396+ pax_close_kernel();
65397+#endif
65398+
65399+ req_count = get_gr_arg_wrapper_size();
65400+
65401+ if (count != req_count) {
65402+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
65403+ error = -EINVAL;
65404+ goto out;
65405+ }
65406+
65407+
65408+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
65409+ gr_auth_expires = 0;
65410+ gr_auth_attempts = 0;
65411+ }
65412+
65413+ error = copy_gr_arg_wrapper(buf, &uwrap);
65414+ if (error)
65415+ goto out;
65416+
65417+ error = copy_gr_arg(uwrap.arg, gr_usermode);
65418+ if (error)
65419+ goto out;
65420+
65421+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
65422+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
65423+ time_after(gr_auth_expires, get_seconds())) {
65424+ error = -EBUSY;
65425+ goto out;
65426+ }
65427+
65428+ /* if non-root trying to do anything other than use a special role,
65429+ do not attempt authentication, do not count towards authentication
65430+ locking
65431+ */
65432+
65433+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
65434+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
65435+ gr_is_global_nonroot(current_uid())) {
65436+ error = -EPERM;
65437+ goto out;
65438+ }
65439+
65440+ /* ensure pw and special role name are null terminated */
65441+
65442+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
65443+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
65444+
65445+ /* Okay.
65446+ * We have our enough of the argument structure..(we have yet
65447+ * to copy_from_user the tables themselves) . Copy the tables
65448+ * only if we need them, i.e. for loading operations. */
65449+
65450+ switch (gr_usermode->mode) {
65451+ case GR_STATUS:
65452+ if (gr_status & GR_READY) {
65453+ error = 1;
65454+ if (!gr_check_secure_terminal(current))
65455+ error = 3;
65456+ } else
65457+ error = 2;
65458+ goto out;
65459+ case GR_SHUTDOWN:
65460+ if ((gr_status & GR_READY)
65461+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65462+ stop_machine(gr_rbac_disable, NULL, NULL);
65463+ free_variables();
65464+ memset(gr_usermode, 0, sizeof (struct gr_arg));
65465+ memset(gr_system_salt, 0, GR_SALT_LEN);
65466+ memset(gr_system_sum, 0, GR_SHA_LEN);
65467+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
65468+ } else if (gr_status & GR_READY) {
65469+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
65470+ error = -EPERM;
65471+ } else {
65472+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
65473+ error = -EAGAIN;
65474+ }
65475+ break;
65476+ case GR_ENABLE:
65477+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
65478+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
65479+ else {
65480+ if (gr_status & GR_READY)
65481+ error = -EAGAIN;
65482+ else
65483+ error = error2;
65484+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
65485+ }
65486+ break;
65487+ case GR_RELOAD:
65488+ if (!(gr_status & GR_READY)) {
65489+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
65490+ error = -EAGAIN;
65491+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65492+ stop_machine(gr_rbac_disable, NULL, NULL);
65493+ free_variables();
65494+ error2 = gracl_init(gr_usermode);
65495+ if (!error2)
65496+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
65497+ else {
65498+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
65499+ error = error2;
65500+ }
65501+ } else {
65502+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
65503+ error = -EPERM;
65504+ }
65505+ break;
65506+ case GR_SEGVMOD:
65507+ if (unlikely(!(gr_status & GR_READY))) {
65508+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
65509+ error = -EAGAIN;
65510+ break;
65511+ }
65512+
65513+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
65514+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
65515+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
65516+ struct acl_subject_label *segvacl;
65517+ segvacl =
65518+ lookup_acl_subj_label(gr_usermode->segv_inode,
65519+ gr_usermode->segv_device,
65520+ current->role);
65521+ if (segvacl) {
65522+ segvacl->crashes = 0;
65523+ segvacl->expires = 0;
65524+ }
65525+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
65526+ gr_remove_uid(gr_usermode->segv_uid);
65527+ }
65528+ } else {
65529+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
65530+ error = -EPERM;
65531+ }
65532+ break;
65533+ case GR_SPROLE:
65534+ case GR_SPROLEPAM:
65535+ if (unlikely(!(gr_status & GR_READY))) {
65536+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
65537+ error = -EAGAIN;
65538+ break;
65539+ }
65540+
65541+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
65542+ current->role->expires = 0;
65543+ current->role->auth_attempts = 0;
65544+ }
65545+
65546+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
65547+ time_after(current->role->expires, get_seconds())) {
65548+ error = -EBUSY;
65549+ goto out;
65550+ }
65551+
65552+ if (lookup_special_role_auth
65553+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
65554+ && ((!sprole_salt && !sprole_sum)
65555+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
65556+ char *p = "";
65557+ assign_special_role(gr_usermode->sp_role);
65558+ read_lock(&tasklist_lock);
65559+ if (current->real_parent)
65560+ p = current->real_parent->role->rolename;
65561+ read_unlock(&tasklist_lock);
65562+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
65563+ p, acl_sp_role_value);
65564+ } else {
65565+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
65566+ error = -EPERM;
65567+ if(!(current->role->auth_attempts++))
65568+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
65569+
65570+ goto out;
65571+ }
65572+ break;
65573+ case GR_UNSPROLE:
65574+ if (unlikely(!(gr_status & GR_READY))) {
65575+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
65576+ error = -EAGAIN;
65577+ break;
65578+ }
65579+
65580+ if (current->role->roletype & GR_ROLE_SPECIAL) {
65581+ char *p = "";
65582+ int i = 0;
65583+
65584+ read_lock(&tasklist_lock);
65585+ if (current->real_parent) {
65586+ p = current->real_parent->role->rolename;
65587+ i = current->real_parent->acl_role_id;
65588+ }
65589+ read_unlock(&tasklist_lock);
65590+
65591+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
65592+ gr_set_acls(1);
65593+ } else {
65594+ error = -EPERM;
65595+ goto out;
65596+ }
65597+ break;
65598+ default:
65599+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
65600+ error = -EINVAL;
65601+ break;
65602+ }
65603+
65604+ if (error != -EPERM)
65605+ goto out;
65606+
65607+ if(!(gr_auth_attempts++))
65608+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
65609+
65610+ out:
65611+ mutex_unlock(&gr_dev_mutex);
65612+
65613+ if (!error)
65614+ error = req_count;
65615+
65616+ return error;
65617+}
65618+
65619+/* must be called with
65620+ rcu_read_lock();
65621+ read_lock(&tasklist_lock);
65622+ read_lock(&grsec_exec_file_lock);
65623+*/
65624+int gr_apply_subject_to_task(struct task_struct *task)
65625+{
65626+ struct acl_object_label *obj;
65627+ char *tmpname;
65628+ struct acl_subject_label *tmpsubj;
65629+ struct file *filp;
65630+ struct name_entry *nmatch;
65631+
65632+ filp = task->exec_file;
65633+ if (filp == NULL)
65634+ return 0;
65635+
65636+ /* the following is to apply the correct subject
65637+ on binaries running when the RBAC system
65638+ is enabled, when the binaries have been
65639+ replaced or deleted since their execution
65640+ -----
65641+ when the RBAC system starts, the inode/dev
65642+ from exec_file will be one the RBAC system
65643+ is unaware of. It only knows the inode/dev
65644+ of the present file on disk, or the absence
65645+ of it.
65646+ */
65647+ preempt_disable();
65648+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
65649+
65650+ nmatch = lookup_name_entry(tmpname);
65651+ preempt_enable();
65652+ tmpsubj = NULL;
65653+ if (nmatch) {
65654+ if (nmatch->deleted)
65655+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
65656+ else
65657+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
65658+ if (tmpsubj != NULL)
65659+ task->acl = tmpsubj;
65660+ }
65661+ if (tmpsubj == NULL)
65662+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
65663+ task->role);
65664+ if (task->acl) {
65665+ task->is_writable = 0;
65666+ /* ignore additional mmap checks for processes that are writable
65667+ by the default ACL */
65668+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
65669+ if (unlikely(obj->mode & GR_WRITE))
65670+ task->is_writable = 1;
65671+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
65672+ if (unlikely(obj->mode & GR_WRITE))
65673+ task->is_writable = 1;
65674+
65675+ gr_set_proc_res(task);
65676+
65677+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65678+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
65679+#endif
65680+ } else {
65681+ return 1;
65682+ }
65683+
65684+ return 0;
65685+}
65686+
65687+int
65688+gr_set_acls(const int type)
65689+{
65690+ struct task_struct *task, *task2;
65691+ struct acl_role_label *role = current->role;
65692+ __u16 acl_role_id = current->acl_role_id;
65693+ const struct cred *cred;
65694+ int ret;
65695+
65696+ rcu_read_lock();
65697+ read_lock(&tasklist_lock);
65698+ read_lock(&grsec_exec_file_lock);
65699+ do_each_thread(task2, task) {
65700+ /* check to see if we're called from the exit handler,
65701+ if so, only replace ACLs that have inherited the admin
65702+ ACL */
65703+
65704+ if (type && (task->role != role ||
65705+ task->acl_role_id != acl_role_id))
65706+ continue;
65707+
65708+ task->acl_role_id = 0;
65709+ task->acl_sp_role = 0;
65710+
65711+ if (task->exec_file) {
65712+ cred = __task_cred(task);
65713+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
65714+ ret = gr_apply_subject_to_task(task);
65715+ if (ret) {
65716+ read_unlock(&grsec_exec_file_lock);
65717+ read_unlock(&tasklist_lock);
65718+ rcu_read_unlock();
65719+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
65720+ return ret;
65721+ }
65722+ } else {
65723+ // it's a kernel process
65724+ task->role = kernel_role;
65725+ task->acl = kernel_role->root_label;
65726+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
65727+ task->acl->mode &= ~GR_PROCFIND;
65728+#endif
65729+ }
65730+ } while_each_thread(task2, task);
65731+ read_unlock(&grsec_exec_file_lock);
65732+ read_unlock(&tasklist_lock);
65733+ rcu_read_unlock();
65734+
65735+ return 0;
65736+}
65737+
65738+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
65739+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
65740+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
65741+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
65742+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
65743+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
65744+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
65745+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
65746+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
65747+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
65748+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
65749+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
65750+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
65751+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
65752+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
65753+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
65754+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
65755+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
65756+};
65757+
65758+void
65759+gr_learn_resource(const struct task_struct *task,
65760+ const int res, const unsigned long wanted, const int gt)
65761+{
65762+ struct acl_subject_label *acl;
65763+ const struct cred *cred;
65764+
65765+ if (unlikely((gr_status & GR_READY) &&
65766+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
65767+ goto skip_reslog;
65768+
65769+ gr_log_resource(task, res, wanted, gt);
65770+skip_reslog:
65771+
65772+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
65773+ return;
65774+
65775+ acl = task->acl;
65776+
65777+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
65778+ !(acl->resmask & (1U << (unsigned short) res))))
65779+ return;
65780+
65781+ if (wanted >= acl->res[res].rlim_cur) {
65782+ unsigned long res_add;
65783+
65784+ res_add = wanted + res_learn_bumps[res];
65785+
65786+ acl->res[res].rlim_cur = res_add;
65787+
65788+ if (wanted > acl->res[res].rlim_max)
65789+ acl->res[res].rlim_max = res_add;
65790+
65791+ /* only log the subject filename, since resource logging is supported for
65792+ single-subject learning only */
65793+ rcu_read_lock();
65794+ cred = __task_cred(task);
65795+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
65796+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
65797+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
65798+ "", (unsigned long) res, &task->signal->saved_ip);
65799+ rcu_read_unlock();
65800+ }
65801+
65802+ return;
65803+}
65804+EXPORT_SYMBOL(gr_learn_resource);
65805+#endif
65806+
65807+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
65808+void
65809+pax_set_initial_flags(struct linux_binprm *bprm)
65810+{
65811+ struct task_struct *task = current;
65812+ struct acl_subject_label *proc;
65813+ unsigned long flags;
65814+
65815+ if (unlikely(!(gr_status & GR_READY)))
65816+ return;
65817+
65818+ flags = pax_get_flags(task);
65819+
65820+ proc = task->acl;
65821+
65822+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
65823+ flags &= ~MF_PAX_PAGEEXEC;
65824+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
65825+ flags &= ~MF_PAX_SEGMEXEC;
65826+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
65827+ flags &= ~MF_PAX_RANDMMAP;
65828+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
65829+ flags &= ~MF_PAX_EMUTRAMP;
65830+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
65831+ flags &= ~MF_PAX_MPROTECT;
65832+
65833+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
65834+ flags |= MF_PAX_PAGEEXEC;
65835+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
65836+ flags |= MF_PAX_SEGMEXEC;
65837+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
65838+ flags |= MF_PAX_RANDMMAP;
65839+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
65840+ flags |= MF_PAX_EMUTRAMP;
65841+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
65842+ flags |= MF_PAX_MPROTECT;
65843+
65844+ pax_set_flags(task, flags);
65845+
65846+ return;
65847+}
65848+#endif
65849+
65850+int
65851+gr_handle_proc_ptrace(struct task_struct *task)
65852+{
65853+ struct file *filp;
65854+ struct task_struct *tmp = task;
65855+ struct task_struct *curtemp = current;
65856+ __u32 retmode;
65857+
65858+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65859+ if (unlikely(!(gr_status & GR_READY)))
65860+ return 0;
65861+#endif
65862+
65863+ read_lock(&tasklist_lock);
65864+ read_lock(&grsec_exec_file_lock);
65865+ filp = task->exec_file;
65866+
65867+ while (task_pid_nr(tmp) > 0) {
65868+ if (tmp == curtemp)
65869+ break;
65870+ tmp = tmp->real_parent;
65871+ }
65872+
65873+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
65874+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
65875+ read_unlock(&grsec_exec_file_lock);
65876+ read_unlock(&tasklist_lock);
65877+ return 1;
65878+ }
65879+
65880+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65881+ if (!(gr_status & GR_READY)) {
65882+ read_unlock(&grsec_exec_file_lock);
65883+ read_unlock(&tasklist_lock);
65884+ return 0;
65885+ }
65886+#endif
65887+
65888+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
65889+ read_unlock(&grsec_exec_file_lock);
65890+ read_unlock(&tasklist_lock);
65891+
65892+ if (retmode & GR_NOPTRACE)
65893+ return 1;
65894+
65895+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
65896+ && (current->acl != task->acl || (current->acl != current->role->root_label
65897+ && task_pid_nr(current) != task_pid_nr(task))))
65898+ return 1;
65899+
65900+ return 0;
65901+}
65902+
65903+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
65904+{
65905+ if (unlikely(!(gr_status & GR_READY)))
65906+ return;
65907+
65908+ if (!(current->role->roletype & GR_ROLE_GOD))
65909+ return;
65910+
65911+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
65912+ p->role->rolename, gr_task_roletype_to_char(p),
65913+ p->acl->filename);
65914+}
65915+
65916+int
65917+gr_handle_ptrace(struct task_struct *task, const long request)
65918+{
65919+ struct task_struct *tmp = task;
65920+ struct task_struct *curtemp = current;
65921+ __u32 retmode;
65922+
65923+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65924+ if (unlikely(!(gr_status & GR_READY)))
65925+ return 0;
65926+#endif
65927+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65928+ read_lock(&tasklist_lock);
65929+ while (task_pid_nr(tmp) > 0) {
65930+ if (tmp == curtemp)
65931+ break;
65932+ tmp = tmp->real_parent;
65933+ }
65934+
65935+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
65936+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
65937+ read_unlock(&tasklist_lock);
65938+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
65939+ return 1;
65940+ }
65941+ read_unlock(&tasklist_lock);
65942+ }
65943+
65944+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65945+ if (!(gr_status & GR_READY))
65946+ return 0;
65947+#endif
65948+
65949+ read_lock(&grsec_exec_file_lock);
65950+ if (unlikely(!task->exec_file)) {
65951+ read_unlock(&grsec_exec_file_lock);
65952+ return 0;
65953+ }
65954+
65955+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
65956+ read_unlock(&grsec_exec_file_lock);
65957+
65958+ if (retmode & GR_NOPTRACE) {
65959+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
65960+ return 1;
65961+ }
65962+
65963+ if (retmode & GR_PTRACERD) {
65964+ switch (request) {
65965+ case PTRACE_SEIZE:
65966+ case PTRACE_POKETEXT:
65967+ case PTRACE_POKEDATA:
65968+ case PTRACE_POKEUSR:
65969+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
65970+ case PTRACE_SETREGS:
65971+ case PTRACE_SETFPREGS:
65972+#endif
65973+#ifdef CONFIG_X86
65974+ case PTRACE_SETFPXREGS:
65975+#endif
65976+#ifdef CONFIG_ALTIVEC
65977+ case PTRACE_SETVRREGS:
65978+#endif
65979+ return 1;
65980+ default:
65981+ return 0;
65982+ }
65983+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
65984+ !(current->role->roletype & GR_ROLE_GOD) &&
65985+ (current->acl != task->acl)) {
65986+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
65987+ return 1;
65988+ }
65989+
65990+ return 0;
65991+}
65992+
65993+static int is_writable_mmap(const struct file *filp)
65994+{
65995+ struct task_struct *task = current;
65996+ struct acl_object_label *obj, *obj2;
65997+
65998+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
65999+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
66000+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
66001+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
66002+ task->role->root_label);
66003+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
66004+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
66005+ return 1;
66006+ }
66007+ }
66008+ return 0;
66009+}
66010+
66011+int
66012+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
66013+{
66014+ __u32 mode;
66015+
66016+ if (unlikely(!file || !(prot & PROT_EXEC)))
66017+ return 1;
66018+
66019+ if (is_writable_mmap(file))
66020+ return 0;
66021+
66022+ mode =
66023+ gr_search_file(file->f_path.dentry,
66024+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
66025+ file->f_path.mnt);
66026+
66027+ if (!gr_tpe_allow(file))
66028+ return 0;
66029+
66030+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
66031+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66032+ return 0;
66033+ } else if (unlikely(!(mode & GR_EXEC))) {
66034+ return 0;
66035+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66036+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66037+ return 1;
66038+ }
66039+
66040+ return 1;
66041+}
66042+
66043+int
66044+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
66045+{
66046+ __u32 mode;
66047+
66048+ if (unlikely(!file || !(prot & PROT_EXEC)))
66049+ return 1;
66050+
66051+ if (is_writable_mmap(file))
66052+ return 0;
66053+
66054+ mode =
66055+ gr_search_file(file->f_path.dentry,
66056+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
66057+ file->f_path.mnt);
66058+
66059+ if (!gr_tpe_allow(file))
66060+ return 0;
66061+
66062+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
66063+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66064+ return 0;
66065+ } else if (unlikely(!(mode & GR_EXEC))) {
66066+ return 0;
66067+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66068+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66069+ return 1;
66070+ }
66071+
66072+ return 1;
66073+}
66074+
66075+void
66076+gr_acl_handle_psacct(struct task_struct *task, const long code)
66077+{
66078+ unsigned long runtime;
66079+ unsigned long cputime;
66080+ unsigned int wday, cday;
66081+ __u8 whr, chr;
66082+ __u8 wmin, cmin;
66083+ __u8 wsec, csec;
66084+ struct timespec timeval;
66085+
66086+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
66087+ !(task->acl->mode & GR_PROCACCT)))
66088+ return;
66089+
66090+ do_posix_clock_monotonic_gettime(&timeval);
66091+ runtime = timeval.tv_sec - task->start_time.tv_sec;
66092+ wday = runtime / (3600 * 24);
66093+ runtime -= wday * (3600 * 24);
66094+ whr = runtime / 3600;
66095+ runtime -= whr * 3600;
66096+ wmin = runtime / 60;
66097+ runtime -= wmin * 60;
66098+ wsec = runtime;
66099+
66100+ cputime = (task->utime + task->stime) / HZ;
66101+ cday = cputime / (3600 * 24);
66102+ cputime -= cday * (3600 * 24);
66103+ chr = cputime / 3600;
66104+ cputime -= chr * 3600;
66105+ cmin = cputime / 60;
66106+ cputime -= cmin * 60;
66107+ csec = cputime;
66108+
66109+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
66110+
66111+ return;
66112+}
66113+
66114+void gr_set_kernel_label(struct task_struct *task)
66115+{
66116+ if (gr_status & GR_READY) {
66117+ task->role = kernel_role;
66118+ task->acl = kernel_role->root_label;
66119+ }
66120+ return;
66121+}
66122+
66123+#ifdef CONFIG_TASKSTATS
66124+int gr_is_taskstats_denied(int pid)
66125+{
66126+ struct task_struct *task;
66127+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66128+ const struct cred *cred;
66129+#endif
66130+ int ret = 0;
66131+
66132+ /* restrict taskstats viewing to un-chrooted root users
66133+ who have the 'view' subject flag if the RBAC system is enabled
66134+ */
66135+
66136+ rcu_read_lock();
66137+ read_lock(&tasklist_lock);
66138+ task = find_task_by_vpid(pid);
66139+ if (task) {
66140+#ifdef CONFIG_GRKERNSEC_CHROOT
66141+ if (proc_is_chrooted(task))
66142+ ret = -EACCES;
66143+#endif
66144+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66145+ cred = __task_cred(task);
66146+#ifdef CONFIG_GRKERNSEC_PROC_USER
66147+ if (gr_is_global_nonroot(cred->uid))
66148+ ret = -EACCES;
66149+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66150+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
66151+ ret = -EACCES;
66152+#endif
66153+#endif
66154+ if (gr_status & GR_READY) {
66155+ if (!(task->acl->mode & GR_VIEW))
66156+ ret = -EACCES;
66157+ }
66158+ } else
66159+ ret = -ENOENT;
66160+
66161+ read_unlock(&tasklist_lock);
66162+ rcu_read_unlock();
66163+
66164+ return ret;
66165+}
66166+#endif
66167+
66168+/* AUXV entries are filled via a descendant of search_binary_handler
66169+ after we've already applied the subject for the target
66170+*/
66171+int gr_acl_enable_at_secure(void)
66172+{
66173+ if (unlikely(!(gr_status & GR_READY)))
66174+ return 0;
66175+
66176+ if (current->acl->mode & GR_ATSECURE)
66177+ return 1;
66178+
66179+ return 0;
66180+}
66181+
66182+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
66183+{
66184+ struct task_struct *task = current;
66185+ struct dentry *dentry = file->f_path.dentry;
66186+ struct vfsmount *mnt = file->f_path.mnt;
66187+ struct acl_object_label *obj, *tmp;
66188+ struct acl_subject_label *subj;
66189+ unsigned int bufsize;
66190+ int is_not_root;
66191+ char *path;
66192+ dev_t dev = __get_dev(dentry);
66193+
66194+ if (unlikely(!(gr_status & GR_READY)))
66195+ return 1;
66196+
66197+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
66198+ return 1;
66199+
66200+ /* ignore Eric Biederman */
66201+ if (IS_PRIVATE(dentry->d_inode))
66202+ return 1;
66203+
66204+ subj = task->acl;
66205+ read_lock(&gr_inode_lock);
66206+ do {
66207+ obj = lookup_acl_obj_label(ino, dev, subj);
66208+ if (obj != NULL) {
66209+ read_unlock(&gr_inode_lock);
66210+ return (obj->mode & GR_FIND) ? 1 : 0;
66211+ }
66212+ } while ((subj = subj->parent_subject));
66213+ read_unlock(&gr_inode_lock);
66214+
66215+ /* this is purely an optimization since we're looking for an object
66216+ for the directory we're doing a readdir on
66217+ if it's possible for any globbed object to match the entry we're
66218+ filling into the directory, then the object we find here will be
66219+ an anchor point with attached globbed objects
66220+ */
66221+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
66222+ if (obj->globbed == NULL)
66223+ return (obj->mode & GR_FIND) ? 1 : 0;
66224+
66225+ is_not_root = ((obj->filename[0] == '/') &&
66226+ (obj->filename[1] == '\0')) ? 0 : 1;
66227+ bufsize = PAGE_SIZE - namelen - is_not_root;
66228+
66229+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
66230+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
66231+ return 1;
66232+
66233+ preempt_disable();
66234+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
66235+ bufsize);
66236+
66237+ bufsize = strlen(path);
66238+
66239+ /* if base is "/", don't append an additional slash */
66240+ if (is_not_root)
66241+ *(path + bufsize) = '/';
66242+ memcpy(path + bufsize + is_not_root, name, namelen);
66243+ *(path + bufsize + namelen + is_not_root) = '\0';
66244+
66245+ tmp = obj->globbed;
66246+ while (tmp) {
66247+ if (!glob_match(tmp->filename, path)) {
66248+ preempt_enable();
66249+ return (tmp->mode & GR_FIND) ? 1 : 0;
66250+ }
66251+ tmp = tmp->next;
66252+ }
66253+ preempt_enable();
66254+ return (obj->mode & GR_FIND) ? 1 : 0;
66255+}
66256+
66257+void gr_put_exec_file(struct task_struct *task)
66258+{
66259+ struct file *filp;
66260+
66261+ write_lock(&grsec_exec_file_lock);
66262+ filp = task->exec_file;
66263+ task->exec_file = NULL;
66264+ write_unlock(&grsec_exec_file_lock);
66265+
66266+ if (filp)
66267+ fput(filp);
66268+
66269+ return;
66270+}
66271+
66272+
66273+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
66274+EXPORT_SYMBOL(gr_acl_is_enabled);
66275+#endif
66276+EXPORT_SYMBOL(gr_set_kernel_label);
66277+#ifdef CONFIG_SECURITY
66278+EXPORT_SYMBOL(gr_check_user_change);
66279+EXPORT_SYMBOL(gr_check_group_change);
66280+#endif
66281+
66282diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
66283new file mode 100644
66284index 0000000..34fefda
66285--- /dev/null
66286+++ b/grsecurity/gracl_alloc.c
66287@@ -0,0 +1,105 @@
66288+#include <linux/kernel.h>
66289+#include <linux/mm.h>
66290+#include <linux/slab.h>
66291+#include <linux/vmalloc.h>
66292+#include <linux/gracl.h>
66293+#include <linux/grsecurity.h>
66294+
66295+static unsigned long alloc_stack_next = 1;
66296+static unsigned long alloc_stack_size = 1;
66297+static void **alloc_stack;
66298+
66299+static __inline__ int
66300+alloc_pop(void)
66301+{
66302+ if (alloc_stack_next == 1)
66303+ return 0;
66304+
66305+ kfree(alloc_stack[alloc_stack_next - 2]);
66306+
66307+ alloc_stack_next--;
66308+
66309+ return 1;
66310+}
66311+
66312+static __inline__ int
66313+alloc_push(void *buf)
66314+{
66315+ if (alloc_stack_next >= alloc_stack_size)
66316+ return 1;
66317+
66318+ alloc_stack[alloc_stack_next - 1] = buf;
66319+
66320+ alloc_stack_next++;
66321+
66322+ return 0;
66323+}
66324+
66325+void *
66326+acl_alloc(unsigned long len)
66327+{
66328+ void *ret = NULL;
66329+
66330+ if (!len || len > PAGE_SIZE)
66331+ goto out;
66332+
66333+ ret = kmalloc(len, GFP_KERNEL);
66334+
66335+ if (ret) {
66336+ if (alloc_push(ret)) {
66337+ kfree(ret);
66338+ ret = NULL;
66339+ }
66340+ }
66341+
66342+out:
66343+ return ret;
66344+}
66345+
66346+void *
66347+acl_alloc_num(unsigned long num, unsigned long len)
66348+{
66349+ if (!len || (num > (PAGE_SIZE / len)))
66350+ return NULL;
66351+
66352+ return acl_alloc(num * len);
66353+}
66354+
66355+void
66356+acl_free_all(void)
66357+{
66358+ if (gr_acl_is_enabled() || !alloc_stack)
66359+ return;
66360+
66361+ while (alloc_pop()) ;
66362+
66363+ if (alloc_stack) {
66364+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
66365+ kfree(alloc_stack);
66366+ else
66367+ vfree(alloc_stack);
66368+ }
66369+
66370+ alloc_stack = NULL;
66371+ alloc_stack_size = 1;
66372+ alloc_stack_next = 1;
66373+
66374+ return;
66375+}
66376+
66377+int
66378+acl_alloc_stack_init(unsigned long size)
66379+{
66380+ if ((size * sizeof (void *)) <= PAGE_SIZE)
66381+ alloc_stack =
66382+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
66383+ else
66384+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
66385+
66386+ alloc_stack_size = size;
66387+
66388+ if (!alloc_stack)
66389+ return 0;
66390+ else
66391+ return 1;
66392+}
66393diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
66394new file mode 100644
66395index 0000000..bdd51ea
66396--- /dev/null
66397+++ b/grsecurity/gracl_cap.c
66398@@ -0,0 +1,110 @@
66399+#include <linux/kernel.h>
66400+#include <linux/module.h>
66401+#include <linux/sched.h>
66402+#include <linux/gracl.h>
66403+#include <linux/grsecurity.h>
66404+#include <linux/grinternal.h>
66405+
66406+extern const char *captab_log[];
66407+extern int captab_log_entries;
66408+
66409+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
66410+{
66411+ struct acl_subject_label *curracl;
66412+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66413+ kernel_cap_t cap_audit = __cap_empty_set;
66414+
66415+ if (!gr_acl_is_enabled())
66416+ return 1;
66417+
66418+ curracl = task->acl;
66419+
66420+ cap_drop = curracl->cap_lower;
66421+ cap_mask = curracl->cap_mask;
66422+ cap_audit = curracl->cap_invert_audit;
66423+
66424+ while ((curracl = curracl->parent_subject)) {
66425+ /* if the cap isn't specified in the current computed mask but is specified in the
66426+ current level subject, and is lowered in the current level subject, then add
66427+ it to the set of dropped capabilities
66428+ otherwise, add the current level subject's mask to the current computed mask
66429+ */
66430+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66431+ cap_raise(cap_mask, cap);
66432+ if (cap_raised(curracl->cap_lower, cap))
66433+ cap_raise(cap_drop, cap);
66434+ if (cap_raised(curracl->cap_invert_audit, cap))
66435+ cap_raise(cap_audit, cap);
66436+ }
66437+ }
66438+
66439+ if (!cap_raised(cap_drop, cap)) {
66440+ if (cap_raised(cap_audit, cap))
66441+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
66442+ return 1;
66443+ }
66444+
66445+ curracl = task->acl;
66446+
66447+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
66448+ && cap_raised(cred->cap_effective, cap)) {
66449+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
66450+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
66451+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
66452+ gr_to_filename(task->exec_file->f_path.dentry,
66453+ task->exec_file->f_path.mnt) : curracl->filename,
66454+ curracl->filename, 0UL,
66455+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
66456+ return 1;
66457+ }
66458+
66459+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
66460+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
66461+
66462+ return 0;
66463+}
66464+
66465+int
66466+gr_acl_is_capable(const int cap)
66467+{
66468+ return gr_task_acl_is_capable(current, current_cred(), cap);
66469+}
66470+
66471+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
66472+{
66473+ struct acl_subject_label *curracl;
66474+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66475+
66476+ if (!gr_acl_is_enabled())
66477+ return 1;
66478+
66479+ curracl = task->acl;
66480+
66481+ cap_drop = curracl->cap_lower;
66482+ cap_mask = curracl->cap_mask;
66483+
66484+ while ((curracl = curracl->parent_subject)) {
66485+ /* if the cap isn't specified in the current computed mask but is specified in the
66486+ current level subject, and is lowered in the current level subject, then add
66487+ it to the set of dropped capabilities
66488+ otherwise, add the current level subject's mask to the current computed mask
66489+ */
66490+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66491+ cap_raise(cap_mask, cap);
66492+ if (cap_raised(curracl->cap_lower, cap))
66493+ cap_raise(cap_drop, cap);
66494+ }
66495+ }
66496+
66497+ if (!cap_raised(cap_drop, cap))
66498+ return 1;
66499+
66500+ return 0;
66501+}
66502+
66503+int
66504+gr_acl_is_capable_nolog(const int cap)
66505+{
66506+ return gr_task_acl_is_capable_nolog(current, cap);
66507+}
66508+
66509diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
66510new file mode 100644
66511index 0000000..a43dd06
66512--- /dev/null
66513+++ b/grsecurity/gracl_compat.c
66514@@ -0,0 +1,269 @@
66515+#include <linux/kernel.h>
66516+#include <linux/gracl.h>
66517+#include <linux/compat.h>
66518+#include <linux/gracl_compat.h>
66519+
66520+#include <asm/uaccess.h>
66521+
66522+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
66523+{
66524+ struct gr_arg_wrapper_compat uwrapcompat;
66525+
66526+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
66527+ return -EFAULT;
66528+
66529+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
66530+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
66531+ return -EINVAL;
66532+
66533+ uwrap->arg = compat_ptr(uwrapcompat.arg);
66534+ uwrap->version = uwrapcompat.version;
66535+ uwrap->size = sizeof(struct gr_arg);
66536+
66537+ return 0;
66538+}
66539+
66540+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
66541+{
66542+ struct gr_arg_compat argcompat;
66543+
66544+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
66545+ return -EFAULT;
66546+
66547+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
66548+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
66549+ arg->role_db.num_roles = argcompat.role_db.num_roles;
66550+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
66551+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
66552+ arg->role_db.num_objects = argcompat.role_db.num_objects;
66553+
66554+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
66555+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
66556+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
66557+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
66558+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
66559+ arg->segv_device = argcompat.segv_device;
66560+ arg->segv_inode = argcompat.segv_inode;
66561+ arg->segv_uid = argcompat.segv_uid;
66562+ arg->num_sprole_pws = argcompat.num_sprole_pws;
66563+ arg->mode = argcompat.mode;
66564+
66565+ return 0;
66566+}
66567+
66568+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
66569+{
66570+ struct acl_object_label_compat objcompat;
66571+
66572+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
66573+ return -EFAULT;
66574+
66575+ obj->filename = compat_ptr(objcompat.filename);
66576+ obj->inode = objcompat.inode;
66577+ obj->device = objcompat.device;
66578+ obj->mode = objcompat.mode;
66579+
66580+ obj->nested = compat_ptr(objcompat.nested);
66581+ obj->globbed = compat_ptr(objcompat.globbed);
66582+
66583+ obj->prev = compat_ptr(objcompat.prev);
66584+ obj->next = compat_ptr(objcompat.next);
66585+
66586+ return 0;
66587+}
66588+
66589+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
66590+{
66591+ unsigned int i;
66592+ struct acl_subject_label_compat subjcompat;
66593+
66594+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
66595+ return -EFAULT;
66596+
66597+ subj->filename = compat_ptr(subjcompat.filename);
66598+ subj->inode = subjcompat.inode;
66599+ subj->device = subjcompat.device;
66600+ subj->mode = subjcompat.mode;
66601+ subj->cap_mask = subjcompat.cap_mask;
66602+ subj->cap_lower = subjcompat.cap_lower;
66603+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
66604+
66605+ for (i = 0; i < GR_NLIMITS; i++) {
66606+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
66607+ subj->res[i].rlim_cur = RLIM_INFINITY;
66608+ else
66609+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
66610+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
66611+ subj->res[i].rlim_max = RLIM_INFINITY;
66612+ else
66613+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
66614+ }
66615+ subj->resmask = subjcompat.resmask;
66616+
66617+ subj->user_trans_type = subjcompat.user_trans_type;
66618+ subj->group_trans_type = subjcompat.group_trans_type;
66619+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
66620+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
66621+ subj->user_trans_num = subjcompat.user_trans_num;
66622+ subj->group_trans_num = subjcompat.group_trans_num;
66623+
66624+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
66625+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
66626+ subj->ip_type = subjcompat.ip_type;
66627+ subj->ips = compat_ptr(subjcompat.ips);
66628+ subj->ip_num = subjcompat.ip_num;
66629+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
66630+
66631+ subj->crashes = subjcompat.crashes;
66632+ subj->expires = subjcompat.expires;
66633+
66634+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
66635+ subj->hash = compat_ptr(subjcompat.hash);
66636+ subj->prev = compat_ptr(subjcompat.prev);
66637+ subj->next = compat_ptr(subjcompat.next);
66638+
66639+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
66640+ subj->obj_hash_size = subjcompat.obj_hash_size;
66641+ subj->pax_flags = subjcompat.pax_flags;
66642+
66643+ return 0;
66644+}
66645+
66646+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
66647+{
66648+ struct acl_role_label_compat rolecompat;
66649+
66650+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
66651+ return -EFAULT;
66652+
66653+ role->rolename = compat_ptr(rolecompat.rolename);
66654+ role->uidgid = rolecompat.uidgid;
66655+ role->roletype = rolecompat.roletype;
66656+
66657+ role->auth_attempts = rolecompat.auth_attempts;
66658+ role->expires = rolecompat.expires;
66659+
66660+ role->root_label = compat_ptr(rolecompat.root_label);
66661+ role->hash = compat_ptr(rolecompat.hash);
66662+
66663+ role->prev = compat_ptr(rolecompat.prev);
66664+ role->next = compat_ptr(rolecompat.next);
66665+
66666+ role->transitions = compat_ptr(rolecompat.transitions);
66667+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
66668+ role->domain_children = compat_ptr(rolecompat.domain_children);
66669+ role->domain_child_num = rolecompat.domain_child_num;
66670+
66671+ role->umask = rolecompat.umask;
66672+
66673+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
66674+ role->subj_hash_size = rolecompat.subj_hash_size;
66675+
66676+ return 0;
66677+}
66678+
66679+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
66680+{
66681+ struct role_allowed_ip_compat roleip_compat;
66682+
66683+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
66684+ return -EFAULT;
66685+
66686+ roleip->addr = roleip_compat.addr;
66687+ roleip->netmask = roleip_compat.netmask;
66688+
66689+ roleip->prev = compat_ptr(roleip_compat.prev);
66690+ roleip->next = compat_ptr(roleip_compat.next);
66691+
66692+ return 0;
66693+}
66694+
66695+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
66696+{
66697+ struct role_transition_compat trans_compat;
66698+
66699+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
66700+ return -EFAULT;
66701+
66702+ trans->rolename = compat_ptr(trans_compat.rolename);
66703+
66704+ trans->prev = compat_ptr(trans_compat.prev);
66705+ trans->next = compat_ptr(trans_compat.next);
66706+
66707+ return 0;
66708+
66709+}
66710+
66711+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
66712+{
66713+ struct gr_hash_struct_compat hash_compat;
66714+
66715+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
66716+ return -EFAULT;
66717+
66718+ hash->table = compat_ptr(hash_compat.table);
66719+ hash->nametable = compat_ptr(hash_compat.nametable);
66720+ hash->first = compat_ptr(hash_compat.first);
66721+
66722+ hash->table_size = hash_compat.table_size;
66723+ hash->used_size = hash_compat.used_size;
66724+
66725+ hash->type = hash_compat.type;
66726+
66727+ return 0;
66728+}
66729+
66730+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
66731+{
66732+ compat_uptr_t ptrcompat;
66733+
66734+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
66735+ return -EFAULT;
66736+
66737+ *(void **)ptr = compat_ptr(ptrcompat);
66738+
66739+ return 0;
66740+}
66741+
66742+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
66743+{
66744+ struct acl_ip_label_compat ip_compat;
66745+
66746+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
66747+ return -EFAULT;
66748+
66749+ ip->iface = compat_ptr(ip_compat.iface);
66750+ ip->addr = ip_compat.addr;
66751+ ip->netmask = ip_compat.netmask;
66752+ ip->low = ip_compat.low;
66753+ ip->high = ip_compat.high;
66754+ ip->mode = ip_compat.mode;
66755+ ip->type = ip_compat.type;
66756+
66757+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
66758+
66759+ ip->prev = compat_ptr(ip_compat.prev);
66760+ ip->next = compat_ptr(ip_compat.next);
66761+
66762+ return 0;
66763+}
66764+
66765+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
66766+{
66767+ struct sprole_pw_compat pw_compat;
66768+
66769+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
66770+ return -EFAULT;
66771+
66772+ pw->rolename = compat_ptr(pw_compat.rolename);
66773+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
66774+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
66775+
66776+ return 0;
66777+}
66778+
66779+size_t get_gr_arg_wrapper_size_compat(void)
66780+{
66781+ return sizeof(struct gr_arg_wrapper_compat);
66782+}
66783+
66784diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
66785new file mode 100644
66786index 0000000..a340c17
66787--- /dev/null
66788+++ b/grsecurity/gracl_fs.c
66789@@ -0,0 +1,431 @@
66790+#include <linux/kernel.h>
66791+#include <linux/sched.h>
66792+#include <linux/types.h>
66793+#include <linux/fs.h>
66794+#include <linux/file.h>
66795+#include <linux/stat.h>
66796+#include <linux/grsecurity.h>
66797+#include <linux/grinternal.h>
66798+#include <linux/gracl.h>
66799+
66800+umode_t
66801+gr_acl_umask(void)
66802+{
66803+ if (unlikely(!gr_acl_is_enabled()))
66804+ return 0;
66805+
66806+ return current->role->umask;
66807+}
66808+
66809+__u32
66810+gr_acl_handle_hidden_file(const struct dentry * dentry,
66811+ const struct vfsmount * mnt)
66812+{
66813+ __u32 mode;
66814+
66815+ if (unlikely(!dentry->d_inode))
66816+ return GR_FIND;
66817+
66818+ mode =
66819+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
66820+
66821+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
66822+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66823+ return mode;
66824+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
66825+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66826+ return 0;
66827+ } else if (unlikely(!(mode & GR_FIND)))
66828+ return 0;
66829+
66830+ return GR_FIND;
66831+}
66832+
66833+__u32
66834+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
66835+ int acc_mode)
66836+{
66837+ __u32 reqmode = GR_FIND;
66838+ __u32 mode;
66839+
66840+ if (unlikely(!dentry->d_inode))
66841+ return reqmode;
66842+
66843+ if (acc_mode & MAY_APPEND)
66844+ reqmode |= GR_APPEND;
66845+ else if (acc_mode & MAY_WRITE)
66846+ reqmode |= GR_WRITE;
66847+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
66848+ reqmode |= GR_READ;
66849+
66850+ mode =
66851+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66852+ mnt);
66853+
66854+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66855+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66856+ reqmode & GR_READ ? " reading" : "",
66857+ reqmode & GR_WRITE ? " writing" : reqmode &
66858+ GR_APPEND ? " appending" : "");
66859+ return reqmode;
66860+ } else
66861+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66862+ {
66863+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66864+ reqmode & GR_READ ? " reading" : "",
66865+ reqmode & GR_WRITE ? " writing" : reqmode &
66866+ GR_APPEND ? " appending" : "");
66867+ return 0;
66868+ } else if (unlikely((mode & reqmode) != reqmode))
66869+ return 0;
66870+
66871+ return reqmode;
66872+}
66873+
66874+__u32
66875+gr_acl_handle_creat(const struct dentry * dentry,
66876+ const struct dentry * p_dentry,
66877+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
66878+ const int imode)
66879+{
66880+ __u32 reqmode = GR_WRITE | GR_CREATE;
66881+ __u32 mode;
66882+
66883+ if (acc_mode & MAY_APPEND)
66884+ reqmode |= GR_APPEND;
66885+ // if a directory was required or the directory already exists, then
66886+ // don't count this open as a read
66887+ if ((acc_mode & MAY_READ) &&
66888+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
66889+ reqmode |= GR_READ;
66890+ if ((open_flags & O_CREAT) &&
66891+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
66892+ reqmode |= GR_SETID;
66893+
66894+ mode =
66895+ gr_check_create(dentry, p_dentry, p_mnt,
66896+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
66897+
66898+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66899+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66900+ reqmode & GR_READ ? " reading" : "",
66901+ reqmode & GR_WRITE ? " writing" : reqmode &
66902+ GR_APPEND ? " appending" : "");
66903+ return reqmode;
66904+ } else
66905+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66906+ {
66907+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66908+ reqmode & GR_READ ? " reading" : "",
66909+ reqmode & GR_WRITE ? " writing" : reqmode &
66910+ GR_APPEND ? " appending" : "");
66911+ return 0;
66912+ } else if (unlikely((mode & reqmode) != reqmode))
66913+ return 0;
66914+
66915+ return reqmode;
66916+}
66917+
66918+__u32
66919+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
66920+ const int fmode)
66921+{
66922+ __u32 mode, reqmode = GR_FIND;
66923+
66924+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
66925+ reqmode |= GR_EXEC;
66926+ if (fmode & S_IWOTH)
66927+ reqmode |= GR_WRITE;
66928+ if (fmode & S_IROTH)
66929+ reqmode |= GR_READ;
66930+
66931+ mode =
66932+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66933+ mnt);
66934+
66935+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66936+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
66937+ reqmode & GR_READ ? " reading" : "",
66938+ reqmode & GR_WRITE ? " writing" : "",
66939+ reqmode & GR_EXEC ? " executing" : "");
66940+ return reqmode;
66941+ } else
66942+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66943+ {
66944+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
66945+ reqmode & GR_READ ? " reading" : "",
66946+ reqmode & GR_WRITE ? " writing" : "",
66947+ reqmode & GR_EXEC ? " executing" : "");
66948+ return 0;
66949+ } else if (unlikely((mode & reqmode) != reqmode))
66950+ return 0;
66951+
66952+ return reqmode;
66953+}
66954+
66955+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
66956+{
66957+ __u32 mode;
66958+
66959+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
66960+
66961+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
66962+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
66963+ return mode;
66964+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
66965+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
66966+ return 0;
66967+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
66968+ return 0;
66969+
66970+ return (reqmode);
66971+}
66972+
66973+__u32
66974+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
66975+{
66976+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
66977+}
66978+
66979+__u32
66980+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
66981+{
66982+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
66983+}
66984+
66985+__u32
66986+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
66987+{
66988+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
66989+}
66990+
66991+__u32
66992+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
66993+{
66994+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
66995+}
66996+
66997+__u32
66998+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
66999+ umode_t *modeptr)
67000+{
67001+ umode_t mode;
67002+
67003+ *modeptr &= ~gr_acl_umask();
67004+ mode = *modeptr;
67005+
67006+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
67007+ return 1;
67008+
67009+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
67010+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
67011+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
67012+ GR_CHMOD_ACL_MSG);
67013+ } else {
67014+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
67015+ }
67016+}
67017+
67018+__u32
67019+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
67020+{
67021+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
67022+}
67023+
67024+__u32
67025+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
67026+{
67027+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
67028+}
67029+
67030+__u32
67031+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
67032+{
67033+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
67034+}
67035+
67036+__u32
67037+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
67038+{
67039+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
67040+ GR_UNIXCONNECT_ACL_MSG);
67041+}
67042+
67043+/* hardlinks require at minimum create and link permission,
67044+ any additional privilege required is based on the
67045+ privilege of the file being linked to
67046+*/
67047+__u32
67048+gr_acl_handle_link(const struct dentry * new_dentry,
67049+ const struct dentry * parent_dentry,
67050+ const struct vfsmount * parent_mnt,
67051+ const struct dentry * old_dentry,
67052+ const struct vfsmount * old_mnt, const struct filename *to)
67053+{
67054+ __u32 mode;
67055+ __u32 needmode = GR_CREATE | GR_LINK;
67056+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
67057+
67058+ mode =
67059+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
67060+ old_mnt);
67061+
67062+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
67063+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67064+ return mode;
67065+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67066+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67067+ return 0;
67068+ } else if (unlikely((mode & needmode) != needmode))
67069+ return 0;
67070+
67071+ return 1;
67072+}
67073+
67074+__u32
67075+gr_acl_handle_symlink(const struct dentry * new_dentry,
67076+ const struct dentry * parent_dentry,
67077+ const struct vfsmount * parent_mnt, const struct filename *from)
67078+{
67079+ __u32 needmode = GR_WRITE | GR_CREATE;
67080+ __u32 mode;
67081+
67082+ mode =
67083+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
67084+ GR_CREATE | GR_AUDIT_CREATE |
67085+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
67086+
67087+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
67088+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67089+ return mode;
67090+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67091+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67092+ return 0;
67093+ } else if (unlikely((mode & needmode) != needmode))
67094+ return 0;
67095+
67096+ return (GR_WRITE | GR_CREATE);
67097+}
67098+
67099+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
67100+{
67101+ __u32 mode;
67102+
67103+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
67104+
67105+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
67106+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
67107+ return mode;
67108+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
67109+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
67110+ return 0;
67111+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
67112+ return 0;
67113+
67114+ return (reqmode);
67115+}
67116+
67117+__u32
67118+gr_acl_handle_mknod(const struct dentry * new_dentry,
67119+ const struct dentry * parent_dentry,
67120+ const struct vfsmount * parent_mnt,
67121+ const int mode)
67122+{
67123+ __u32 reqmode = GR_WRITE | GR_CREATE;
67124+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
67125+ reqmode |= GR_SETID;
67126+
67127+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67128+ reqmode, GR_MKNOD_ACL_MSG);
67129+}
67130+
67131+__u32
67132+gr_acl_handle_mkdir(const struct dentry *new_dentry,
67133+ const struct dentry *parent_dentry,
67134+ const struct vfsmount *parent_mnt)
67135+{
67136+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67137+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
67138+}
67139+
67140+#define RENAME_CHECK_SUCCESS(old, new) \
67141+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
67142+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
67143+
67144+int
67145+gr_acl_handle_rename(struct dentry *new_dentry,
67146+ struct dentry *parent_dentry,
67147+ const struct vfsmount *parent_mnt,
67148+ struct dentry *old_dentry,
67149+ struct inode *old_parent_inode,
67150+ struct vfsmount *old_mnt, const struct filename *newname)
67151+{
67152+ __u32 comp1, comp2;
67153+ int error = 0;
67154+
67155+ if (unlikely(!gr_acl_is_enabled()))
67156+ return 0;
67157+
67158+ if (!new_dentry->d_inode) {
67159+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
67160+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
67161+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
67162+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
67163+ GR_DELETE | GR_AUDIT_DELETE |
67164+ GR_AUDIT_READ | GR_AUDIT_WRITE |
67165+ GR_SUPPRESS, old_mnt);
67166+ } else {
67167+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
67168+ GR_CREATE | GR_DELETE |
67169+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
67170+ GR_AUDIT_READ | GR_AUDIT_WRITE |
67171+ GR_SUPPRESS, parent_mnt);
67172+ comp2 =
67173+ gr_search_file(old_dentry,
67174+ GR_READ | GR_WRITE | GR_AUDIT_READ |
67175+ GR_DELETE | GR_AUDIT_DELETE |
67176+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
67177+ }
67178+
67179+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
67180+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
67181+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67182+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
67183+ && !(comp2 & GR_SUPPRESS)) {
67184+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67185+ error = -EACCES;
67186+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
67187+ error = -EACCES;
67188+
67189+ return error;
67190+}
67191+
67192+void
67193+gr_acl_handle_exit(void)
67194+{
67195+ u16 id;
67196+ char *rolename;
67197+
67198+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
67199+ !(current->role->roletype & GR_ROLE_PERSIST))) {
67200+ id = current->acl_role_id;
67201+ rolename = current->role->rolename;
67202+ gr_set_acls(1);
67203+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
67204+ }
67205+
67206+ gr_put_exec_file(current);
67207+ return;
67208+}
67209+
67210+int
67211+gr_acl_handle_procpidmem(const struct task_struct *task)
67212+{
67213+ if (unlikely(!gr_acl_is_enabled()))
67214+ return 0;
67215+
67216+ if (task != current && task->acl->mode & GR_PROTPROCFD)
67217+ return -EACCES;
67218+
67219+ return 0;
67220+}
67221diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
67222new file mode 100644
67223index 0000000..8132048
67224--- /dev/null
67225+++ b/grsecurity/gracl_ip.c
67226@@ -0,0 +1,387 @@
67227+#include <linux/kernel.h>
67228+#include <asm/uaccess.h>
67229+#include <asm/errno.h>
67230+#include <net/sock.h>
67231+#include <linux/file.h>
67232+#include <linux/fs.h>
67233+#include <linux/net.h>
67234+#include <linux/in.h>
67235+#include <linux/skbuff.h>
67236+#include <linux/ip.h>
67237+#include <linux/udp.h>
67238+#include <linux/types.h>
67239+#include <linux/sched.h>
67240+#include <linux/netdevice.h>
67241+#include <linux/inetdevice.h>
67242+#include <linux/gracl.h>
67243+#include <linux/grsecurity.h>
67244+#include <linux/grinternal.h>
67245+
67246+#define GR_BIND 0x01
67247+#define GR_CONNECT 0x02
67248+#define GR_INVERT 0x04
67249+#define GR_BINDOVERRIDE 0x08
67250+#define GR_CONNECTOVERRIDE 0x10
67251+#define GR_SOCK_FAMILY 0x20
67252+
67253+static const char * gr_protocols[IPPROTO_MAX] = {
67254+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
67255+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
67256+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
67257+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
67258+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
67259+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
67260+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
67261+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
67262+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
67263+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
67264+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
67265+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
67266+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
67267+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
67268+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
67269+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
67270+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
67271+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
67272+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
67273+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
67274+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
67275+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
67276+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
67277+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
67278+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
67279+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
67280+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
67281+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
67282+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
67283+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
67284+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
67285+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
67286+ };
67287+
67288+static const char * gr_socktypes[SOCK_MAX] = {
67289+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
67290+ "unknown:7", "unknown:8", "unknown:9", "packet"
67291+ };
67292+
67293+static const char * gr_sockfamilies[AF_MAX+1] = {
67294+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
67295+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
67296+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
67297+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
67298+ };
67299+
67300+const char *
67301+gr_proto_to_name(unsigned char proto)
67302+{
67303+ return gr_protocols[proto];
67304+}
67305+
67306+const char *
67307+gr_socktype_to_name(unsigned char type)
67308+{
67309+ return gr_socktypes[type];
67310+}
67311+
67312+const char *
67313+gr_sockfamily_to_name(unsigned char family)
67314+{
67315+ return gr_sockfamilies[family];
67316+}
67317+
67318+int
67319+gr_search_socket(const int domain, const int type, const int protocol)
67320+{
67321+ struct acl_subject_label *curr;
67322+ const struct cred *cred = current_cred();
67323+
67324+ if (unlikely(!gr_acl_is_enabled()))
67325+ goto exit;
67326+
67327+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
67328+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
67329+ goto exit; // let the kernel handle it
67330+
67331+ curr = current->acl;
67332+
67333+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
67334+ /* the family is allowed, if this is PF_INET allow it only if
67335+ the extra sock type/protocol checks pass */
67336+ if (domain == PF_INET)
67337+ goto inet_check;
67338+ goto exit;
67339+ } else {
67340+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67341+ __u32 fakeip = 0;
67342+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67343+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67344+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67345+ gr_to_filename(current->exec_file->f_path.dentry,
67346+ current->exec_file->f_path.mnt) :
67347+ curr->filename, curr->filename,
67348+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
67349+ &current->signal->saved_ip);
67350+ goto exit;
67351+ }
67352+ goto exit_fail;
67353+ }
67354+
67355+inet_check:
67356+ /* the rest of this checking is for IPv4 only */
67357+ if (!curr->ips)
67358+ goto exit;
67359+
67360+ if ((curr->ip_type & (1U << type)) &&
67361+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
67362+ goto exit;
67363+
67364+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67365+ /* we don't place acls on raw sockets , and sometimes
67366+ dgram/ip sockets are opened for ioctl and not
67367+ bind/connect, so we'll fake a bind learn log */
67368+ if (type == SOCK_RAW || type == SOCK_PACKET) {
67369+ __u32 fakeip = 0;
67370+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67371+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67372+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67373+ gr_to_filename(current->exec_file->f_path.dentry,
67374+ current->exec_file->f_path.mnt) :
67375+ curr->filename, curr->filename,
67376+ &fakeip, 0, type,
67377+ protocol, GR_CONNECT, &current->signal->saved_ip);
67378+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
67379+ __u32 fakeip = 0;
67380+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67381+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67382+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67383+ gr_to_filename(current->exec_file->f_path.dentry,
67384+ current->exec_file->f_path.mnt) :
67385+ curr->filename, curr->filename,
67386+ &fakeip, 0, type,
67387+ protocol, GR_BIND, &current->signal->saved_ip);
67388+ }
67389+ /* we'll log when they use connect or bind */
67390+ goto exit;
67391+ }
67392+
67393+exit_fail:
67394+ if (domain == PF_INET)
67395+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
67396+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
67397+ else
67398+#ifndef CONFIG_IPV6
67399+ if (domain != PF_INET6)
67400+#endif
67401+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
67402+ gr_socktype_to_name(type), protocol);
67403+
67404+ return 0;
67405+exit:
67406+ return 1;
67407+}
67408+
67409+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
67410+{
67411+ if ((ip->mode & mode) &&
67412+ (ip_port >= ip->low) &&
67413+ (ip_port <= ip->high) &&
67414+ ((ntohl(ip_addr) & our_netmask) ==
67415+ (ntohl(our_addr) & our_netmask))
67416+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
67417+ && (ip->type & (1U << type))) {
67418+ if (ip->mode & GR_INVERT)
67419+ return 2; // specifically denied
67420+ else
67421+ return 1; // allowed
67422+ }
67423+
67424+ return 0; // not specifically allowed, may continue parsing
67425+}
67426+
67427+static int
67428+gr_search_connectbind(const int full_mode, struct sock *sk,
67429+ struct sockaddr_in *addr, const int type)
67430+{
67431+ char iface[IFNAMSIZ] = {0};
67432+ struct acl_subject_label *curr;
67433+ struct acl_ip_label *ip;
67434+ struct inet_sock *isk;
67435+ struct net_device *dev;
67436+ struct in_device *idev;
67437+ unsigned long i;
67438+ int ret;
67439+ int mode = full_mode & (GR_BIND | GR_CONNECT);
67440+ __u32 ip_addr = 0;
67441+ __u32 our_addr;
67442+ __u32 our_netmask;
67443+ char *p;
67444+ __u16 ip_port = 0;
67445+ const struct cred *cred = current_cred();
67446+
67447+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
67448+ return 0;
67449+
67450+ curr = current->acl;
67451+ isk = inet_sk(sk);
67452+
67453+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
67454+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
67455+ addr->sin_addr.s_addr = curr->inaddr_any_override;
67456+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
67457+ struct sockaddr_in saddr;
67458+ int err;
67459+
67460+ saddr.sin_family = AF_INET;
67461+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
67462+ saddr.sin_port = isk->inet_sport;
67463+
67464+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67465+ if (err)
67466+ return err;
67467+
67468+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67469+ if (err)
67470+ return err;
67471+ }
67472+
67473+ if (!curr->ips)
67474+ return 0;
67475+
67476+ ip_addr = addr->sin_addr.s_addr;
67477+ ip_port = ntohs(addr->sin_port);
67478+
67479+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67480+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67481+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
67482+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
67483+ gr_to_filename(current->exec_file->f_path.dentry,
67484+ current->exec_file->f_path.mnt) :
67485+ curr->filename, curr->filename,
67486+ &ip_addr, ip_port, type,
67487+ sk->sk_protocol, mode, &current->signal->saved_ip);
67488+ return 0;
67489+ }
67490+
67491+ for (i = 0; i < curr->ip_num; i++) {
67492+ ip = *(curr->ips + i);
67493+ if (ip->iface != NULL) {
67494+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
67495+ p = strchr(iface, ':');
67496+ if (p != NULL)
67497+ *p = '\0';
67498+ dev = dev_get_by_name(sock_net(sk), iface);
67499+ if (dev == NULL)
67500+ continue;
67501+ idev = in_dev_get(dev);
67502+ if (idev == NULL) {
67503+ dev_put(dev);
67504+ continue;
67505+ }
67506+ rcu_read_lock();
67507+ for_ifa(idev) {
67508+ if (!strcmp(ip->iface, ifa->ifa_label)) {
67509+ our_addr = ifa->ifa_address;
67510+ our_netmask = 0xffffffff;
67511+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67512+ if (ret == 1) {
67513+ rcu_read_unlock();
67514+ in_dev_put(idev);
67515+ dev_put(dev);
67516+ return 0;
67517+ } else if (ret == 2) {
67518+ rcu_read_unlock();
67519+ in_dev_put(idev);
67520+ dev_put(dev);
67521+ goto denied;
67522+ }
67523+ }
67524+ } endfor_ifa(idev);
67525+ rcu_read_unlock();
67526+ in_dev_put(idev);
67527+ dev_put(dev);
67528+ } else {
67529+ our_addr = ip->addr;
67530+ our_netmask = ip->netmask;
67531+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67532+ if (ret == 1)
67533+ return 0;
67534+ else if (ret == 2)
67535+ goto denied;
67536+ }
67537+ }
67538+
67539+denied:
67540+ if (mode == GR_BIND)
67541+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67542+ else if (mode == GR_CONNECT)
67543+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67544+
67545+ return -EACCES;
67546+}
67547+
67548+int
67549+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
67550+{
67551+ /* always allow disconnection of dgram sockets with connect */
67552+ if (addr->sin_family == AF_UNSPEC)
67553+ return 0;
67554+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
67555+}
67556+
67557+int
67558+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
67559+{
67560+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
67561+}
67562+
67563+int gr_search_listen(struct socket *sock)
67564+{
67565+ struct sock *sk = sock->sk;
67566+ struct sockaddr_in addr;
67567+
67568+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67569+ addr.sin_port = inet_sk(sk)->inet_sport;
67570+
67571+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67572+}
67573+
67574+int gr_search_accept(struct socket *sock)
67575+{
67576+ struct sock *sk = sock->sk;
67577+ struct sockaddr_in addr;
67578+
67579+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67580+ addr.sin_port = inet_sk(sk)->inet_sport;
67581+
67582+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67583+}
67584+
67585+int
67586+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
67587+{
67588+ if (addr)
67589+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
67590+ else {
67591+ struct sockaddr_in sin;
67592+ const struct inet_sock *inet = inet_sk(sk);
67593+
67594+ sin.sin_addr.s_addr = inet->inet_daddr;
67595+ sin.sin_port = inet->inet_dport;
67596+
67597+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67598+ }
67599+}
67600+
67601+int
67602+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
67603+{
67604+ struct sockaddr_in sin;
67605+
67606+ if (unlikely(skb->len < sizeof (struct udphdr)))
67607+ return 0; // skip this packet
67608+
67609+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
67610+ sin.sin_port = udp_hdr(skb)->source;
67611+
67612+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67613+}
67614diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
67615new file mode 100644
67616index 0000000..25f54ef
67617--- /dev/null
67618+++ b/grsecurity/gracl_learn.c
67619@@ -0,0 +1,207 @@
67620+#include <linux/kernel.h>
67621+#include <linux/mm.h>
67622+#include <linux/sched.h>
67623+#include <linux/poll.h>
67624+#include <linux/string.h>
67625+#include <linux/file.h>
67626+#include <linux/types.h>
67627+#include <linux/vmalloc.h>
67628+#include <linux/grinternal.h>
67629+
67630+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
67631+ size_t count, loff_t *ppos);
67632+extern int gr_acl_is_enabled(void);
67633+
67634+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
67635+static int gr_learn_attached;
67636+
67637+/* use a 512k buffer */
67638+#define LEARN_BUFFER_SIZE (512 * 1024)
67639+
67640+static DEFINE_SPINLOCK(gr_learn_lock);
67641+static DEFINE_MUTEX(gr_learn_user_mutex);
67642+
67643+/* we need to maintain two buffers, so that the kernel context of grlearn
67644+ uses a semaphore around the userspace copying, and the other kernel contexts
67645+ use a spinlock when copying into the buffer, since they cannot sleep
67646+*/
67647+static char *learn_buffer;
67648+static char *learn_buffer_user;
67649+static int learn_buffer_len;
67650+static int learn_buffer_user_len;
67651+
67652+static ssize_t
67653+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
67654+{
67655+ DECLARE_WAITQUEUE(wait, current);
67656+ ssize_t retval = 0;
67657+
67658+ add_wait_queue(&learn_wait, &wait);
67659+ set_current_state(TASK_INTERRUPTIBLE);
67660+ do {
67661+ mutex_lock(&gr_learn_user_mutex);
67662+ spin_lock(&gr_learn_lock);
67663+ if (learn_buffer_len)
67664+ break;
67665+ spin_unlock(&gr_learn_lock);
67666+ mutex_unlock(&gr_learn_user_mutex);
67667+ if (file->f_flags & O_NONBLOCK) {
67668+ retval = -EAGAIN;
67669+ goto out;
67670+ }
67671+ if (signal_pending(current)) {
67672+ retval = -ERESTARTSYS;
67673+ goto out;
67674+ }
67675+
67676+ schedule();
67677+ } while (1);
67678+
67679+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
67680+ learn_buffer_user_len = learn_buffer_len;
67681+ retval = learn_buffer_len;
67682+ learn_buffer_len = 0;
67683+
67684+ spin_unlock(&gr_learn_lock);
67685+
67686+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
67687+ retval = -EFAULT;
67688+
67689+ mutex_unlock(&gr_learn_user_mutex);
67690+out:
67691+ set_current_state(TASK_RUNNING);
67692+ remove_wait_queue(&learn_wait, &wait);
67693+ return retval;
67694+}
67695+
67696+static unsigned int
67697+poll_learn(struct file * file, poll_table * wait)
67698+{
67699+ poll_wait(file, &learn_wait, wait);
67700+
67701+ if (learn_buffer_len)
67702+ return (POLLIN | POLLRDNORM);
67703+
67704+ return 0;
67705+}
67706+
67707+void
67708+gr_clear_learn_entries(void)
67709+{
67710+ char *tmp;
67711+
67712+ mutex_lock(&gr_learn_user_mutex);
67713+ spin_lock(&gr_learn_lock);
67714+ tmp = learn_buffer;
67715+ learn_buffer = NULL;
67716+ spin_unlock(&gr_learn_lock);
67717+ if (tmp)
67718+ vfree(tmp);
67719+ if (learn_buffer_user != NULL) {
67720+ vfree(learn_buffer_user);
67721+ learn_buffer_user = NULL;
67722+ }
67723+ learn_buffer_len = 0;
67724+ mutex_unlock(&gr_learn_user_mutex);
67725+
67726+ return;
67727+}
67728+
67729+void
67730+gr_add_learn_entry(const char *fmt, ...)
67731+{
67732+ va_list args;
67733+ unsigned int len;
67734+
67735+ if (!gr_learn_attached)
67736+ return;
67737+
67738+ spin_lock(&gr_learn_lock);
67739+
67740+ /* leave a gap at the end so we know when it's "full" but don't have to
67741+ compute the exact length of the string we're trying to append
67742+ */
67743+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
67744+ spin_unlock(&gr_learn_lock);
67745+ wake_up_interruptible(&learn_wait);
67746+ return;
67747+ }
67748+ if (learn_buffer == NULL) {
67749+ spin_unlock(&gr_learn_lock);
67750+ return;
67751+ }
67752+
67753+ va_start(args, fmt);
67754+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
67755+ va_end(args);
67756+
67757+ learn_buffer_len += len + 1;
67758+
67759+ spin_unlock(&gr_learn_lock);
67760+ wake_up_interruptible(&learn_wait);
67761+
67762+ return;
67763+}
67764+
67765+static int
67766+open_learn(struct inode *inode, struct file *file)
67767+{
67768+ if (file->f_mode & FMODE_READ && gr_learn_attached)
67769+ return -EBUSY;
67770+ if (file->f_mode & FMODE_READ) {
67771+ int retval = 0;
67772+ mutex_lock(&gr_learn_user_mutex);
67773+ if (learn_buffer == NULL)
67774+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
67775+ if (learn_buffer_user == NULL)
67776+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
67777+ if (learn_buffer == NULL) {
67778+ retval = -ENOMEM;
67779+ goto out_error;
67780+ }
67781+ if (learn_buffer_user == NULL) {
67782+ retval = -ENOMEM;
67783+ goto out_error;
67784+ }
67785+ learn_buffer_len = 0;
67786+ learn_buffer_user_len = 0;
67787+ gr_learn_attached = 1;
67788+out_error:
67789+ mutex_unlock(&gr_learn_user_mutex);
67790+ return retval;
67791+ }
67792+ return 0;
67793+}
67794+
67795+static int
67796+close_learn(struct inode *inode, struct file *file)
67797+{
67798+ if (file->f_mode & FMODE_READ) {
67799+ char *tmp = NULL;
67800+ mutex_lock(&gr_learn_user_mutex);
67801+ spin_lock(&gr_learn_lock);
67802+ tmp = learn_buffer;
67803+ learn_buffer = NULL;
67804+ spin_unlock(&gr_learn_lock);
67805+ if (tmp)
67806+ vfree(tmp);
67807+ if (learn_buffer_user != NULL) {
67808+ vfree(learn_buffer_user);
67809+ learn_buffer_user = NULL;
67810+ }
67811+ learn_buffer_len = 0;
67812+ learn_buffer_user_len = 0;
67813+ gr_learn_attached = 0;
67814+ mutex_unlock(&gr_learn_user_mutex);
67815+ }
67816+
67817+ return 0;
67818+}
67819+
67820+const struct file_operations grsec_fops = {
67821+ .read = read_learn,
67822+ .write = write_grsec_handler,
67823+ .open = open_learn,
67824+ .release = close_learn,
67825+ .poll = poll_learn,
67826+};
67827diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
67828new file mode 100644
67829index 0000000..39645c9
67830--- /dev/null
67831+++ b/grsecurity/gracl_res.c
67832@@ -0,0 +1,68 @@
67833+#include <linux/kernel.h>
67834+#include <linux/sched.h>
67835+#include <linux/gracl.h>
67836+#include <linux/grinternal.h>
67837+
67838+static const char *restab_log[] = {
67839+ [RLIMIT_CPU] = "RLIMIT_CPU",
67840+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
67841+ [RLIMIT_DATA] = "RLIMIT_DATA",
67842+ [RLIMIT_STACK] = "RLIMIT_STACK",
67843+ [RLIMIT_CORE] = "RLIMIT_CORE",
67844+ [RLIMIT_RSS] = "RLIMIT_RSS",
67845+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
67846+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
67847+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
67848+ [RLIMIT_AS] = "RLIMIT_AS",
67849+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
67850+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
67851+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
67852+ [RLIMIT_NICE] = "RLIMIT_NICE",
67853+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
67854+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
67855+ [GR_CRASH_RES] = "RLIMIT_CRASH"
67856+};
67857+
67858+void
67859+gr_log_resource(const struct task_struct *task,
67860+ const int res, const unsigned long wanted, const int gt)
67861+{
67862+ const struct cred *cred;
67863+ unsigned long rlim;
67864+
67865+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
67866+ return;
67867+
67868+ // not yet supported resource
67869+ if (unlikely(!restab_log[res]))
67870+ return;
67871+
67872+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
67873+ rlim = task_rlimit_max(task, res);
67874+ else
67875+ rlim = task_rlimit(task, res);
67876+
67877+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
67878+ return;
67879+
67880+ rcu_read_lock();
67881+ cred = __task_cred(task);
67882+
67883+ if (res == RLIMIT_NPROC &&
67884+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
67885+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
67886+ goto out_rcu_unlock;
67887+ else if (res == RLIMIT_MEMLOCK &&
67888+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
67889+ goto out_rcu_unlock;
67890+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
67891+ goto out_rcu_unlock;
67892+ rcu_read_unlock();
67893+
67894+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
67895+
67896+ return;
67897+out_rcu_unlock:
67898+ rcu_read_unlock();
67899+ return;
67900+}
67901diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
67902new file mode 100644
67903index 0000000..3c38bfe
67904--- /dev/null
67905+++ b/grsecurity/gracl_segv.c
67906@@ -0,0 +1,305 @@
67907+#include <linux/kernel.h>
67908+#include <linux/mm.h>
67909+#include <asm/uaccess.h>
67910+#include <asm/errno.h>
67911+#include <asm/mman.h>
67912+#include <net/sock.h>
67913+#include <linux/file.h>
67914+#include <linux/fs.h>
67915+#include <linux/net.h>
67916+#include <linux/in.h>
67917+#include <linux/slab.h>
67918+#include <linux/types.h>
67919+#include <linux/sched.h>
67920+#include <linux/timer.h>
67921+#include <linux/gracl.h>
67922+#include <linux/grsecurity.h>
67923+#include <linux/grinternal.h>
67924+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
67925+#include <linux/magic.h>
67926+#include <linux/pagemap.h>
67927+#include "../fs/btrfs/async-thread.h"
67928+#include "../fs/btrfs/ctree.h"
67929+#include "../fs/btrfs/btrfs_inode.h"
67930+#endif
67931+
67932+static struct crash_uid *uid_set;
67933+static unsigned short uid_used;
67934+static DEFINE_SPINLOCK(gr_uid_lock);
67935+extern rwlock_t gr_inode_lock;
67936+extern struct acl_subject_label *
67937+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
67938+ struct acl_role_label *role);
67939+
67940+static inline dev_t __get_dev(const struct dentry *dentry)
67941+{
67942+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
67943+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
67944+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
67945+ else
67946+#endif
67947+ return dentry->d_sb->s_dev;
67948+}
67949+
67950+int
67951+gr_init_uidset(void)
67952+{
67953+ uid_set =
67954+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
67955+ uid_used = 0;
67956+
67957+ return uid_set ? 1 : 0;
67958+}
67959+
67960+void
67961+gr_free_uidset(void)
67962+{
67963+ if (uid_set)
67964+ kfree(uid_set);
67965+
67966+ return;
67967+}
67968+
67969+int
67970+gr_find_uid(const uid_t uid)
67971+{
67972+ struct crash_uid *tmp = uid_set;
67973+ uid_t buid;
67974+ int low = 0, high = uid_used - 1, mid;
67975+
67976+ while (high >= low) {
67977+ mid = (low + high) >> 1;
67978+ buid = tmp[mid].uid;
67979+ if (buid == uid)
67980+ return mid;
67981+ if (buid > uid)
67982+ high = mid - 1;
67983+ if (buid < uid)
67984+ low = mid + 1;
67985+ }
67986+
67987+ return -1;
67988+}
67989+
67990+static __inline__ void
67991+gr_insertsort(void)
67992+{
67993+ unsigned short i, j;
67994+ struct crash_uid index;
67995+
67996+ for (i = 1; i < uid_used; i++) {
67997+ index = uid_set[i];
67998+ j = i;
67999+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
68000+ uid_set[j] = uid_set[j - 1];
68001+ j--;
68002+ }
68003+ uid_set[j] = index;
68004+ }
68005+
68006+ return;
68007+}
68008+
68009+static __inline__ void
68010+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
68011+{
68012+ int loc;
68013+ uid_t uid = GR_GLOBAL_UID(kuid);
68014+
68015+ if (uid_used == GR_UIDTABLE_MAX)
68016+ return;
68017+
68018+ loc = gr_find_uid(uid);
68019+
68020+ if (loc >= 0) {
68021+ uid_set[loc].expires = expires;
68022+ return;
68023+ }
68024+
68025+ uid_set[uid_used].uid = uid;
68026+ uid_set[uid_used].expires = expires;
68027+ uid_used++;
68028+
68029+ gr_insertsort();
68030+
68031+ return;
68032+}
68033+
68034+void
68035+gr_remove_uid(const unsigned short loc)
68036+{
68037+ unsigned short i;
68038+
68039+ for (i = loc + 1; i < uid_used; i++)
68040+ uid_set[i - 1] = uid_set[i];
68041+
68042+ uid_used--;
68043+
68044+ return;
68045+}
68046+
68047+int
68048+gr_check_crash_uid(const kuid_t kuid)
68049+{
68050+ int loc;
68051+ int ret = 0;
68052+ uid_t uid;
68053+
68054+ if (unlikely(!gr_acl_is_enabled()))
68055+ return 0;
68056+
68057+ uid = GR_GLOBAL_UID(kuid);
68058+
68059+ spin_lock(&gr_uid_lock);
68060+ loc = gr_find_uid(uid);
68061+
68062+ if (loc < 0)
68063+ goto out_unlock;
68064+
68065+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
68066+ gr_remove_uid(loc);
68067+ else
68068+ ret = 1;
68069+
68070+out_unlock:
68071+ spin_unlock(&gr_uid_lock);
68072+ return ret;
68073+}
68074+
68075+static __inline__ int
68076+proc_is_setxid(const struct cred *cred)
68077+{
68078+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
68079+ !uid_eq(cred->uid, cred->fsuid))
68080+ return 1;
68081+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
68082+ !gid_eq(cred->gid, cred->fsgid))
68083+ return 1;
68084+
68085+ return 0;
68086+}
68087+
68088+extern int gr_fake_force_sig(int sig, struct task_struct *t);
68089+
68090+void
68091+gr_handle_crash(struct task_struct *task, const int sig)
68092+{
68093+ struct acl_subject_label *curr;
68094+ struct task_struct *tsk, *tsk2;
68095+ const struct cred *cred;
68096+ const struct cred *cred2;
68097+
68098+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
68099+ return;
68100+
68101+ if (unlikely(!gr_acl_is_enabled()))
68102+ return;
68103+
68104+ curr = task->acl;
68105+
68106+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
68107+ return;
68108+
68109+ if (time_before_eq(curr->expires, get_seconds())) {
68110+ curr->expires = 0;
68111+ curr->crashes = 0;
68112+ }
68113+
68114+ curr->crashes++;
68115+
68116+ if (!curr->expires)
68117+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
68118+
68119+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
68120+ time_after(curr->expires, get_seconds())) {
68121+ rcu_read_lock();
68122+ cred = __task_cred(task);
68123+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
68124+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
68125+ spin_lock(&gr_uid_lock);
68126+ gr_insert_uid(cred->uid, curr->expires);
68127+ spin_unlock(&gr_uid_lock);
68128+ curr->expires = 0;
68129+ curr->crashes = 0;
68130+ read_lock(&tasklist_lock);
68131+ do_each_thread(tsk2, tsk) {
68132+ cred2 = __task_cred(tsk);
68133+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
68134+ gr_fake_force_sig(SIGKILL, tsk);
68135+ } while_each_thread(tsk2, tsk);
68136+ read_unlock(&tasklist_lock);
68137+ } else {
68138+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
68139+ read_lock(&tasklist_lock);
68140+ read_lock(&grsec_exec_file_lock);
68141+ do_each_thread(tsk2, tsk) {
68142+ if (likely(tsk != task)) {
68143+ // if this thread has the same subject as the one that triggered
68144+ // RES_CRASH and it's the same binary, kill it
68145+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
68146+ gr_fake_force_sig(SIGKILL, tsk);
68147+ }
68148+ } while_each_thread(tsk2, tsk);
68149+ read_unlock(&grsec_exec_file_lock);
68150+ read_unlock(&tasklist_lock);
68151+ }
68152+ rcu_read_unlock();
68153+ }
68154+
68155+ return;
68156+}
68157+
68158+int
68159+gr_check_crash_exec(const struct file *filp)
68160+{
68161+ struct acl_subject_label *curr;
68162+
68163+ if (unlikely(!gr_acl_is_enabled()))
68164+ return 0;
68165+
68166+ read_lock(&gr_inode_lock);
68167+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
68168+ __get_dev(filp->f_path.dentry),
68169+ current->role);
68170+ read_unlock(&gr_inode_lock);
68171+
68172+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
68173+ (!curr->crashes && !curr->expires))
68174+ return 0;
68175+
68176+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
68177+ time_after(curr->expires, get_seconds()))
68178+ return 1;
68179+ else if (time_before_eq(curr->expires, get_seconds())) {
68180+ curr->crashes = 0;
68181+ curr->expires = 0;
68182+ }
68183+
68184+ return 0;
68185+}
68186+
68187+void
68188+gr_handle_alertkill(struct task_struct *task)
68189+{
68190+ struct acl_subject_label *curracl;
68191+ __u32 curr_ip;
68192+ struct task_struct *p, *p2;
68193+
68194+ if (unlikely(!gr_acl_is_enabled()))
68195+ return;
68196+
68197+ curracl = task->acl;
68198+ curr_ip = task->signal->curr_ip;
68199+
68200+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
68201+ read_lock(&tasklist_lock);
68202+ do_each_thread(p2, p) {
68203+ if (p->signal->curr_ip == curr_ip)
68204+ gr_fake_force_sig(SIGKILL, p);
68205+ } while_each_thread(p2, p);
68206+ read_unlock(&tasklist_lock);
68207+ } else if (curracl->mode & GR_KILLPROC)
68208+ gr_fake_force_sig(SIGKILL, task);
68209+
68210+ return;
68211+}
68212diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
68213new file mode 100644
68214index 0000000..98011b0
68215--- /dev/null
68216+++ b/grsecurity/gracl_shm.c
68217@@ -0,0 +1,40 @@
68218+#include <linux/kernel.h>
68219+#include <linux/mm.h>
68220+#include <linux/sched.h>
68221+#include <linux/file.h>
68222+#include <linux/ipc.h>
68223+#include <linux/gracl.h>
68224+#include <linux/grsecurity.h>
68225+#include <linux/grinternal.h>
68226+
68227+int
68228+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68229+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
68230+{
68231+ struct task_struct *task;
68232+
68233+ if (!gr_acl_is_enabled())
68234+ return 1;
68235+
68236+ rcu_read_lock();
68237+ read_lock(&tasklist_lock);
68238+
68239+ task = find_task_by_vpid(shm_cprid);
68240+
68241+ if (unlikely(!task))
68242+ task = find_task_by_vpid(shm_lapid);
68243+
68244+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
68245+ (task_pid_nr(task) == shm_lapid)) &&
68246+ (task->acl->mode & GR_PROTSHM) &&
68247+ (task->acl != current->acl))) {
68248+ read_unlock(&tasklist_lock);
68249+ rcu_read_unlock();
68250+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
68251+ return 0;
68252+ }
68253+ read_unlock(&tasklist_lock);
68254+ rcu_read_unlock();
68255+
68256+ return 1;
68257+}
68258diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
68259new file mode 100644
68260index 0000000..bc0be01
68261--- /dev/null
68262+++ b/grsecurity/grsec_chdir.c
68263@@ -0,0 +1,19 @@
68264+#include <linux/kernel.h>
68265+#include <linux/sched.h>
68266+#include <linux/fs.h>
68267+#include <linux/file.h>
68268+#include <linux/grsecurity.h>
68269+#include <linux/grinternal.h>
68270+
68271+void
68272+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
68273+{
68274+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
68275+ if ((grsec_enable_chdir && grsec_enable_group &&
68276+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
68277+ !grsec_enable_group)) {
68278+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
68279+ }
68280+#endif
68281+ return;
68282+}
68283diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
68284new file mode 100644
68285index 0000000..bd6e105
68286--- /dev/null
68287+++ b/grsecurity/grsec_chroot.c
68288@@ -0,0 +1,370 @@
68289+#include <linux/kernel.h>
68290+#include <linux/module.h>
68291+#include <linux/sched.h>
68292+#include <linux/file.h>
68293+#include <linux/fs.h>
68294+#include <linux/mount.h>
68295+#include <linux/types.h>
68296+#include "../fs/mount.h"
68297+#include <linux/grsecurity.h>
68298+#include <linux/grinternal.h>
68299+
68300+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68301+static int gr_init_ran;
68302+#endif
68303+
68304+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
68305+{
68306+#ifdef CONFIG_GRKERNSEC
68307+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
68308+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
68309+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68310+ && gr_init_ran
68311+#endif
68312+ )
68313+ task->gr_is_chrooted = 1;
68314+ else {
68315+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
68316+ if (task_pid_nr(task) == 1 && !gr_init_ran)
68317+ gr_init_ran = 1;
68318+#endif
68319+ task->gr_is_chrooted = 0;
68320+ }
68321+
68322+ task->gr_chroot_dentry = path->dentry;
68323+#endif
68324+ return;
68325+}
68326+
68327+void gr_clear_chroot_entries(struct task_struct *task)
68328+{
68329+#ifdef CONFIG_GRKERNSEC
68330+ task->gr_is_chrooted = 0;
68331+ task->gr_chroot_dentry = NULL;
68332+#endif
68333+ return;
68334+}
68335+
68336+int
68337+gr_handle_chroot_unix(const pid_t pid)
68338+{
68339+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
68340+ struct task_struct *p;
68341+
68342+ if (unlikely(!grsec_enable_chroot_unix))
68343+ return 1;
68344+
68345+ if (likely(!proc_is_chrooted(current)))
68346+ return 1;
68347+
68348+ rcu_read_lock();
68349+ read_lock(&tasklist_lock);
68350+ p = find_task_by_vpid_unrestricted(pid);
68351+ if (unlikely(p && !have_same_root(current, p))) {
68352+ read_unlock(&tasklist_lock);
68353+ rcu_read_unlock();
68354+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
68355+ return 0;
68356+ }
68357+ read_unlock(&tasklist_lock);
68358+ rcu_read_unlock();
68359+#endif
68360+ return 1;
68361+}
68362+
68363+int
68364+gr_handle_chroot_nice(void)
68365+{
68366+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
68367+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
68368+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
68369+ return -EPERM;
68370+ }
68371+#endif
68372+ return 0;
68373+}
68374+
68375+int
68376+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
68377+{
68378+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
68379+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
68380+ && proc_is_chrooted(current)) {
68381+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
68382+ return -EACCES;
68383+ }
68384+#endif
68385+ return 0;
68386+}
68387+
68388+int
68389+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
68390+{
68391+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68392+ struct task_struct *p;
68393+ int ret = 0;
68394+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
68395+ return ret;
68396+
68397+ read_lock(&tasklist_lock);
68398+ do_each_pid_task(pid, type, p) {
68399+ if (!have_same_root(current, p)) {
68400+ ret = 1;
68401+ goto out;
68402+ }
68403+ } while_each_pid_task(pid, type, p);
68404+out:
68405+ read_unlock(&tasklist_lock);
68406+ return ret;
68407+#endif
68408+ return 0;
68409+}
68410+
68411+int
68412+gr_pid_is_chrooted(struct task_struct *p)
68413+{
68414+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68415+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
68416+ return 0;
68417+
68418+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
68419+ !have_same_root(current, p)) {
68420+ return 1;
68421+ }
68422+#endif
68423+ return 0;
68424+}
68425+
68426+EXPORT_SYMBOL(gr_pid_is_chrooted);
68427+
68428+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
68429+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
68430+{
68431+ struct path path, currentroot;
68432+ int ret = 0;
68433+
68434+ path.dentry = (struct dentry *)u_dentry;
68435+ path.mnt = (struct vfsmount *)u_mnt;
68436+ get_fs_root(current->fs, &currentroot);
68437+ if (path_is_under(&path, &currentroot))
68438+ ret = 1;
68439+ path_put(&currentroot);
68440+
68441+ return ret;
68442+}
68443+#endif
68444+
68445+int
68446+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
68447+{
68448+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
68449+ if (!grsec_enable_chroot_fchdir)
68450+ return 1;
68451+
68452+ if (!proc_is_chrooted(current))
68453+ return 1;
68454+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
68455+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
68456+ return 0;
68457+ }
68458+#endif
68459+ return 1;
68460+}
68461+
68462+int
68463+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68464+ const time_t shm_createtime)
68465+{
68466+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
68467+ struct task_struct *p;
68468+ time_t starttime;
68469+
68470+ if (unlikely(!grsec_enable_chroot_shmat))
68471+ return 1;
68472+
68473+ if (likely(!proc_is_chrooted(current)))
68474+ return 1;
68475+
68476+ rcu_read_lock();
68477+ read_lock(&tasklist_lock);
68478+
68479+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
68480+ starttime = p->start_time.tv_sec;
68481+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
68482+ if (have_same_root(current, p)) {
68483+ goto allow;
68484+ } else {
68485+ read_unlock(&tasklist_lock);
68486+ rcu_read_unlock();
68487+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
68488+ return 0;
68489+ }
68490+ }
68491+ /* creator exited, pid reuse, fall through to next check */
68492+ }
68493+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
68494+ if (unlikely(!have_same_root(current, p))) {
68495+ read_unlock(&tasklist_lock);
68496+ rcu_read_unlock();
68497+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
68498+ return 0;
68499+ }
68500+ }
68501+
68502+allow:
68503+ read_unlock(&tasklist_lock);
68504+ rcu_read_unlock();
68505+#endif
68506+ return 1;
68507+}
68508+
68509+void
68510+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
68511+{
68512+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
68513+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
68514+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
68515+#endif
68516+ return;
68517+}
68518+
68519+int
68520+gr_handle_chroot_mknod(const struct dentry *dentry,
68521+ const struct vfsmount *mnt, const int mode)
68522+{
68523+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
68524+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
68525+ proc_is_chrooted(current)) {
68526+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
68527+ return -EPERM;
68528+ }
68529+#endif
68530+ return 0;
68531+}
68532+
68533+int
68534+gr_handle_chroot_mount(const struct dentry *dentry,
68535+ const struct vfsmount *mnt, const char *dev_name)
68536+{
68537+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
68538+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
68539+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
68540+ return -EPERM;
68541+ }
68542+#endif
68543+ return 0;
68544+}
68545+
68546+int
68547+gr_handle_chroot_pivot(void)
68548+{
68549+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
68550+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
68551+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
68552+ return -EPERM;
68553+ }
68554+#endif
68555+ return 0;
68556+}
68557+
68558+int
68559+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
68560+{
68561+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
68562+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
68563+ !gr_is_outside_chroot(dentry, mnt)) {
68564+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
68565+ return -EPERM;
68566+ }
68567+#endif
68568+ return 0;
68569+}
68570+
68571+extern const char *captab_log[];
68572+extern int captab_log_entries;
68573+
68574+int
68575+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
68576+{
68577+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68578+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
68579+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
68580+ if (cap_raised(chroot_caps, cap)) {
68581+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
68582+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
68583+ }
68584+ return 0;
68585+ }
68586+ }
68587+#endif
68588+ return 1;
68589+}
68590+
68591+int
68592+gr_chroot_is_capable(const int cap)
68593+{
68594+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68595+ return gr_task_chroot_is_capable(current, current_cred(), cap);
68596+#endif
68597+ return 1;
68598+}
68599+
68600+int
68601+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
68602+{
68603+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68604+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
68605+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
68606+ if (cap_raised(chroot_caps, cap)) {
68607+ return 0;
68608+ }
68609+ }
68610+#endif
68611+ return 1;
68612+}
68613+
68614+int
68615+gr_chroot_is_capable_nolog(const int cap)
68616+{
68617+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68618+ return gr_task_chroot_is_capable_nolog(current, cap);
68619+#endif
68620+ return 1;
68621+}
68622+
68623+int
68624+gr_handle_chroot_sysctl(const int op)
68625+{
68626+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
68627+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
68628+ proc_is_chrooted(current))
68629+ return -EACCES;
68630+#endif
68631+ return 0;
68632+}
68633+
68634+void
68635+gr_handle_chroot_chdir(const struct path *path)
68636+{
68637+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
68638+ if (grsec_enable_chroot_chdir)
68639+ set_fs_pwd(current->fs, path);
68640+#endif
68641+ return;
68642+}
68643+
68644+int
68645+gr_handle_chroot_chmod(const struct dentry *dentry,
68646+ const struct vfsmount *mnt, const int mode)
68647+{
68648+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
68649+ /* allow chmod +s on directories, but not files */
68650+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
68651+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
68652+ proc_is_chrooted(current)) {
68653+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
68654+ return -EPERM;
68655+ }
68656+#endif
68657+ return 0;
68658+}
68659diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
68660new file mode 100644
68661index 0000000..ce65ceb
68662--- /dev/null
68663+++ b/grsecurity/grsec_disabled.c
68664@@ -0,0 +1,434 @@
68665+#include <linux/kernel.h>
68666+#include <linux/module.h>
68667+#include <linux/sched.h>
68668+#include <linux/file.h>
68669+#include <linux/fs.h>
68670+#include <linux/kdev_t.h>
68671+#include <linux/net.h>
68672+#include <linux/in.h>
68673+#include <linux/ip.h>
68674+#include <linux/skbuff.h>
68675+#include <linux/sysctl.h>
68676+
68677+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68678+void
68679+pax_set_initial_flags(struct linux_binprm *bprm)
68680+{
68681+ return;
68682+}
68683+#endif
68684+
68685+#ifdef CONFIG_SYSCTL
68686+__u32
68687+gr_handle_sysctl(const struct ctl_table * table, const int op)
68688+{
68689+ return 0;
68690+}
68691+#endif
68692+
68693+#ifdef CONFIG_TASKSTATS
68694+int gr_is_taskstats_denied(int pid)
68695+{
68696+ return 0;
68697+}
68698+#endif
68699+
68700+int
68701+gr_acl_is_enabled(void)
68702+{
68703+ return 0;
68704+}
68705+
68706+void
68707+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
68708+{
68709+ return;
68710+}
68711+
68712+int
68713+gr_handle_rawio(const struct inode *inode)
68714+{
68715+ return 0;
68716+}
68717+
68718+void
68719+gr_acl_handle_psacct(struct task_struct *task, const long code)
68720+{
68721+ return;
68722+}
68723+
68724+int
68725+gr_handle_ptrace(struct task_struct *task, const long request)
68726+{
68727+ return 0;
68728+}
68729+
68730+int
68731+gr_handle_proc_ptrace(struct task_struct *task)
68732+{
68733+ return 0;
68734+}
68735+
68736+int
68737+gr_set_acls(const int type)
68738+{
68739+ return 0;
68740+}
68741+
68742+int
68743+gr_check_hidden_task(const struct task_struct *tsk)
68744+{
68745+ return 0;
68746+}
68747+
68748+int
68749+gr_check_protected_task(const struct task_struct *task)
68750+{
68751+ return 0;
68752+}
68753+
68754+int
68755+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
68756+{
68757+ return 0;
68758+}
68759+
68760+void
68761+gr_copy_label(struct task_struct *tsk)
68762+{
68763+ return;
68764+}
68765+
68766+void
68767+gr_set_pax_flags(struct task_struct *task)
68768+{
68769+ return;
68770+}
68771+
68772+int
68773+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
68774+ const int unsafe_share)
68775+{
68776+ return 0;
68777+}
68778+
68779+void
68780+gr_handle_delete(const ino_t ino, const dev_t dev)
68781+{
68782+ return;
68783+}
68784+
68785+void
68786+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
68787+{
68788+ return;
68789+}
68790+
68791+void
68792+gr_handle_crash(struct task_struct *task, const int sig)
68793+{
68794+ return;
68795+}
68796+
68797+int
68798+gr_check_crash_exec(const struct file *filp)
68799+{
68800+ return 0;
68801+}
68802+
68803+int
68804+gr_check_crash_uid(const kuid_t uid)
68805+{
68806+ return 0;
68807+}
68808+
68809+void
68810+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68811+ struct dentry *old_dentry,
68812+ struct dentry *new_dentry,
68813+ struct vfsmount *mnt, const __u8 replace)
68814+{
68815+ return;
68816+}
68817+
68818+int
68819+gr_search_socket(const int family, const int type, const int protocol)
68820+{
68821+ return 1;
68822+}
68823+
68824+int
68825+gr_search_connectbind(const int mode, const struct socket *sock,
68826+ const struct sockaddr_in *addr)
68827+{
68828+ return 0;
68829+}
68830+
68831+void
68832+gr_handle_alertkill(struct task_struct *task)
68833+{
68834+ return;
68835+}
68836+
68837+__u32
68838+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
68839+{
68840+ return 1;
68841+}
68842+
68843+__u32
68844+gr_acl_handle_hidden_file(const struct dentry * dentry,
68845+ const struct vfsmount * mnt)
68846+{
68847+ return 1;
68848+}
68849+
68850+__u32
68851+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
68852+ int acc_mode)
68853+{
68854+ return 1;
68855+}
68856+
68857+__u32
68858+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
68859+{
68860+ return 1;
68861+}
68862+
68863+__u32
68864+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
68865+{
68866+ return 1;
68867+}
68868+
68869+int
68870+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
68871+ unsigned int *vm_flags)
68872+{
68873+ return 1;
68874+}
68875+
68876+__u32
68877+gr_acl_handle_truncate(const struct dentry * dentry,
68878+ const struct vfsmount * mnt)
68879+{
68880+ return 1;
68881+}
68882+
68883+__u32
68884+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
68885+{
68886+ return 1;
68887+}
68888+
68889+__u32
68890+gr_acl_handle_access(const struct dentry * dentry,
68891+ const struct vfsmount * mnt, const int fmode)
68892+{
68893+ return 1;
68894+}
68895+
68896+__u32
68897+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
68898+ umode_t *mode)
68899+{
68900+ return 1;
68901+}
68902+
68903+__u32
68904+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
68905+{
68906+ return 1;
68907+}
68908+
68909+__u32
68910+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
68911+{
68912+ return 1;
68913+}
68914+
68915+void
68916+grsecurity_init(void)
68917+{
68918+ return;
68919+}
68920+
68921+umode_t gr_acl_umask(void)
68922+{
68923+ return 0;
68924+}
68925+
68926+__u32
68927+gr_acl_handle_mknod(const struct dentry * new_dentry,
68928+ const struct dentry * parent_dentry,
68929+ const struct vfsmount * parent_mnt,
68930+ const int mode)
68931+{
68932+ return 1;
68933+}
68934+
68935+__u32
68936+gr_acl_handle_mkdir(const struct dentry * new_dentry,
68937+ const struct dentry * parent_dentry,
68938+ const struct vfsmount * parent_mnt)
68939+{
68940+ return 1;
68941+}
68942+
68943+__u32
68944+gr_acl_handle_symlink(const struct dentry * new_dentry,
68945+ const struct dentry * parent_dentry,
68946+ const struct vfsmount * parent_mnt, const struct filename *from)
68947+{
68948+ return 1;
68949+}
68950+
68951+__u32
68952+gr_acl_handle_link(const struct dentry * new_dentry,
68953+ const struct dentry * parent_dentry,
68954+ const struct vfsmount * parent_mnt,
68955+ const struct dentry * old_dentry,
68956+ const struct vfsmount * old_mnt, const struct filename *to)
68957+{
68958+ return 1;
68959+}
68960+
68961+int
68962+gr_acl_handle_rename(const struct dentry *new_dentry,
68963+ const struct dentry *parent_dentry,
68964+ const struct vfsmount *parent_mnt,
68965+ const struct dentry *old_dentry,
68966+ const struct inode *old_parent_inode,
68967+ const struct vfsmount *old_mnt, const struct filename *newname)
68968+{
68969+ return 0;
68970+}
68971+
68972+int
68973+gr_acl_handle_filldir(const struct file *file, const char *name,
68974+ const int namelen, const ino_t ino)
68975+{
68976+ return 1;
68977+}
68978+
68979+int
68980+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
68981+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
68982+{
68983+ return 1;
68984+}
68985+
68986+int
68987+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
68988+{
68989+ return 0;
68990+}
68991+
68992+int
68993+gr_search_accept(const struct socket *sock)
68994+{
68995+ return 0;
68996+}
68997+
68998+int
68999+gr_search_listen(const struct socket *sock)
69000+{
69001+ return 0;
69002+}
69003+
69004+int
69005+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
69006+{
69007+ return 0;
69008+}
69009+
69010+__u32
69011+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
69012+{
69013+ return 1;
69014+}
69015+
69016+__u32
69017+gr_acl_handle_creat(const struct dentry * dentry,
69018+ const struct dentry * p_dentry,
69019+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
69020+ const int imode)
69021+{
69022+ return 1;
69023+}
69024+
69025+void
69026+gr_acl_handle_exit(void)
69027+{
69028+ return;
69029+}
69030+
69031+int
69032+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
69033+{
69034+ return 1;
69035+}
69036+
69037+void
69038+gr_set_role_label(const kuid_t uid, const kgid_t gid)
69039+{
69040+ return;
69041+}
69042+
69043+int
69044+gr_acl_handle_procpidmem(const struct task_struct *task)
69045+{
69046+ return 0;
69047+}
69048+
69049+int
69050+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
69051+{
69052+ return 0;
69053+}
69054+
69055+int
69056+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
69057+{
69058+ return 0;
69059+}
69060+
69061+void
69062+gr_set_kernel_label(struct task_struct *task)
69063+{
69064+ return;
69065+}
69066+
69067+int
69068+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
69069+{
69070+ return 0;
69071+}
69072+
69073+int
69074+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
69075+{
69076+ return 0;
69077+}
69078+
69079+int gr_acl_enable_at_secure(void)
69080+{
69081+ return 0;
69082+}
69083+
69084+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69085+{
69086+ return dentry->d_sb->s_dev;
69087+}
69088+
69089+void gr_put_exec_file(struct task_struct *task)
69090+{
69091+ return;
69092+}
69093+
69094+EXPORT_SYMBOL(gr_set_kernel_label);
69095+#ifdef CONFIG_SECURITY
69096+EXPORT_SYMBOL(gr_check_user_change);
69097+EXPORT_SYMBOL(gr_check_group_change);
69098+#endif
69099diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
69100new file mode 100644
69101index 0000000..387032b
69102--- /dev/null
69103+++ b/grsecurity/grsec_exec.c
69104@@ -0,0 +1,187 @@
69105+#include <linux/kernel.h>
69106+#include <linux/sched.h>
69107+#include <linux/file.h>
69108+#include <linux/binfmts.h>
69109+#include <linux/fs.h>
69110+#include <linux/types.h>
69111+#include <linux/grdefs.h>
69112+#include <linux/grsecurity.h>
69113+#include <linux/grinternal.h>
69114+#include <linux/capability.h>
69115+#include <linux/module.h>
69116+#include <linux/compat.h>
69117+
69118+#include <asm/uaccess.h>
69119+
69120+#ifdef CONFIG_GRKERNSEC_EXECLOG
69121+static char gr_exec_arg_buf[132];
69122+static DEFINE_MUTEX(gr_exec_arg_mutex);
69123+#endif
69124+
69125+struct user_arg_ptr {
69126+#ifdef CONFIG_COMPAT
69127+ bool is_compat;
69128+#endif
69129+ union {
69130+ const char __user *const __user *native;
69131+#ifdef CONFIG_COMPAT
69132+ const compat_uptr_t __user *compat;
69133+#endif
69134+ } ptr;
69135+};
69136+
69137+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
69138+
69139+void
69140+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
69141+{
69142+#ifdef CONFIG_GRKERNSEC_EXECLOG
69143+ char *grarg = gr_exec_arg_buf;
69144+ unsigned int i, x, execlen = 0;
69145+ char c;
69146+
69147+ if (!((grsec_enable_execlog && grsec_enable_group &&
69148+ in_group_p(grsec_audit_gid))
69149+ || (grsec_enable_execlog && !grsec_enable_group)))
69150+ return;
69151+
69152+ mutex_lock(&gr_exec_arg_mutex);
69153+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
69154+
69155+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
69156+ const char __user *p;
69157+ unsigned int len;
69158+
69159+ p = get_user_arg_ptr(argv, i);
69160+ if (IS_ERR(p))
69161+ goto log;
69162+
69163+ len = strnlen_user(p, 128 - execlen);
69164+ if (len > 128 - execlen)
69165+ len = 128 - execlen;
69166+ else if (len > 0)
69167+ len--;
69168+ if (copy_from_user(grarg + execlen, p, len))
69169+ goto log;
69170+
69171+ /* rewrite unprintable characters */
69172+ for (x = 0; x < len; x++) {
69173+ c = *(grarg + execlen + x);
69174+ if (c < 32 || c > 126)
69175+ *(grarg + execlen + x) = ' ';
69176+ }
69177+
69178+ execlen += len;
69179+ *(grarg + execlen) = ' ';
69180+ *(grarg + execlen + 1) = '\0';
69181+ execlen++;
69182+ }
69183+
69184+ log:
69185+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
69186+ bprm->file->f_path.mnt, grarg);
69187+ mutex_unlock(&gr_exec_arg_mutex);
69188+#endif
69189+ return;
69190+}
69191+
69192+#ifdef CONFIG_GRKERNSEC
69193+extern int gr_acl_is_capable(const int cap);
69194+extern int gr_acl_is_capable_nolog(const int cap);
69195+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69196+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
69197+extern int gr_chroot_is_capable(const int cap);
69198+extern int gr_chroot_is_capable_nolog(const int cap);
69199+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69200+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
69201+#endif
69202+
69203+const char *captab_log[] = {
69204+ "CAP_CHOWN",
69205+ "CAP_DAC_OVERRIDE",
69206+ "CAP_DAC_READ_SEARCH",
69207+ "CAP_FOWNER",
69208+ "CAP_FSETID",
69209+ "CAP_KILL",
69210+ "CAP_SETGID",
69211+ "CAP_SETUID",
69212+ "CAP_SETPCAP",
69213+ "CAP_LINUX_IMMUTABLE",
69214+ "CAP_NET_BIND_SERVICE",
69215+ "CAP_NET_BROADCAST",
69216+ "CAP_NET_ADMIN",
69217+ "CAP_NET_RAW",
69218+ "CAP_IPC_LOCK",
69219+ "CAP_IPC_OWNER",
69220+ "CAP_SYS_MODULE",
69221+ "CAP_SYS_RAWIO",
69222+ "CAP_SYS_CHROOT",
69223+ "CAP_SYS_PTRACE",
69224+ "CAP_SYS_PACCT",
69225+ "CAP_SYS_ADMIN",
69226+ "CAP_SYS_BOOT",
69227+ "CAP_SYS_NICE",
69228+ "CAP_SYS_RESOURCE",
69229+ "CAP_SYS_TIME",
69230+ "CAP_SYS_TTY_CONFIG",
69231+ "CAP_MKNOD",
69232+ "CAP_LEASE",
69233+ "CAP_AUDIT_WRITE",
69234+ "CAP_AUDIT_CONTROL",
69235+ "CAP_SETFCAP",
69236+ "CAP_MAC_OVERRIDE",
69237+ "CAP_MAC_ADMIN",
69238+ "CAP_SYSLOG",
69239+ "CAP_WAKE_ALARM"
69240+};
69241+
69242+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
69243+
69244+int gr_is_capable(const int cap)
69245+{
69246+#ifdef CONFIG_GRKERNSEC
69247+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
69248+ return 1;
69249+ return 0;
69250+#else
69251+ return 1;
69252+#endif
69253+}
69254+
69255+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
69256+{
69257+#ifdef CONFIG_GRKERNSEC
69258+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
69259+ return 1;
69260+ return 0;
69261+#else
69262+ return 1;
69263+#endif
69264+}
69265+
69266+int gr_is_capable_nolog(const int cap)
69267+{
69268+#ifdef CONFIG_GRKERNSEC
69269+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
69270+ return 1;
69271+ return 0;
69272+#else
69273+ return 1;
69274+#endif
69275+}
69276+
69277+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
69278+{
69279+#ifdef CONFIG_GRKERNSEC
69280+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
69281+ return 1;
69282+ return 0;
69283+#else
69284+ return 1;
69285+#endif
69286+}
69287+
69288+EXPORT_SYMBOL(gr_is_capable);
69289+EXPORT_SYMBOL(gr_is_capable_nolog);
69290+EXPORT_SYMBOL(gr_task_is_capable);
69291+EXPORT_SYMBOL(gr_task_is_capable_nolog);
69292diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
69293new file mode 100644
69294index 0000000..06cc6ea
69295--- /dev/null
69296+++ b/grsecurity/grsec_fifo.c
69297@@ -0,0 +1,24 @@
69298+#include <linux/kernel.h>
69299+#include <linux/sched.h>
69300+#include <linux/fs.h>
69301+#include <linux/file.h>
69302+#include <linux/grinternal.h>
69303+
69304+int
69305+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
69306+ const struct dentry *dir, const int flag, const int acc_mode)
69307+{
69308+#ifdef CONFIG_GRKERNSEC_FIFO
69309+ const struct cred *cred = current_cred();
69310+
69311+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
69312+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
69313+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
69314+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
69315+ if (!inode_permission(dentry->d_inode, acc_mode))
69316+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
69317+ return -EACCES;
69318+ }
69319+#endif
69320+ return 0;
69321+}
69322diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
69323new file mode 100644
69324index 0000000..8ca18bf
69325--- /dev/null
69326+++ b/grsecurity/grsec_fork.c
69327@@ -0,0 +1,23 @@
69328+#include <linux/kernel.h>
69329+#include <linux/sched.h>
69330+#include <linux/grsecurity.h>
69331+#include <linux/grinternal.h>
69332+#include <linux/errno.h>
69333+
69334+void
69335+gr_log_forkfail(const int retval)
69336+{
69337+#ifdef CONFIG_GRKERNSEC_FORKFAIL
69338+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
69339+ switch (retval) {
69340+ case -EAGAIN:
69341+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
69342+ break;
69343+ case -ENOMEM:
69344+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
69345+ break;
69346+ }
69347+ }
69348+#endif
69349+ return;
69350+}
69351diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
69352new file mode 100644
69353index 0000000..836f38f
69354--- /dev/null
69355+++ b/grsecurity/grsec_init.c
69356@@ -0,0 +1,280 @@
69357+#include <linux/kernel.h>
69358+#include <linux/sched.h>
69359+#include <linux/mm.h>
69360+#include <linux/gracl.h>
69361+#include <linux/slab.h>
69362+#include <linux/vmalloc.h>
69363+#include <linux/percpu.h>
69364+#include <linux/module.h>
69365+
69366+int grsec_enable_ptrace_readexec;
69367+int grsec_enable_setxid;
69368+int grsec_enable_symlinkown;
69369+kgid_t grsec_symlinkown_gid;
69370+int grsec_enable_brute;
69371+int grsec_enable_link;
69372+int grsec_enable_dmesg;
69373+int grsec_enable_harden_ptrace;
69374+int grsec_enable_fifo;
69375+int grsec_enable_execlog;
69376+int grsec_enable_signal;
69377+int grsec_enable_forkfail;
69378+int grsec_enable_audit_ptrace;
69379+int grsec_enable_time;
69380+int grsec_enable_group;
69381+kgid_t grsec_audit_gid;
69382+int grsec_enable_chdir;
69383+int grsec_enable_mount;
69384+int grsec_enable_rofs;
69385+int grsec_deny_new_usb;
69386+int grsec_enable_chroot_findtask;
69387+int grsec_enable_chroot_mount;
69388+int grsec_enable_chroot_shmat;
69389+int grsec_enable_chroot_fchdir;
69390+int grsec_enable_chroot_double;
69391+int grsec_enable_chroot_pivot;
69392+int grsec_enable_chroot_chdir;
69393+int grsec_enable_chroot_chmod;
69394+int grsec_enable_chroot_mknod;
69395+int grsec_enable_chroot_nice;
69396+int grsec_enable_chroot_execlog;
69397+int grsec_enable_chroot_caps;
69398+int grsec_enable_chroot_sysctl;
69399+int grsec_enable_chroot_unix;
69400+int grsec_enable_tpe;
69401+kgid_t grsec_tpe_gid;
69402+int grsec_enable_blackhole;
69403+#ifdef CONFIG_IPV6_MODULE
69404+EXPORT_SYMBOL(grsec_enable_blackhole);
69405+#endif
69406+int grsec_lastack_retries;
69407+int grsec_enable_tpe_all;
69408+int grsec_enable_tpe_invert;
69409+int grsec_enable_socket_all;
69410+kgid_t grsec_socket_all_gid;
69411+int grsec_enable_socket_client;
69412+kgid_t grsec_socket_client_gid;
69413+int grsec_enable_socket_server;
69414+kgid_t grsec_socket_server_gid;
69415+int grsec_resource_logging;
69416+int grsec_disable_privio;
69417+int grsec_enable_log_rwxmaps;
69418+int grsec_lock;
69419+
69420+DEFINE_SPINLOCK(grsec_alert_lock);
69421+unsigned long grsec_alert_wtime = 0;
69422+unsigned long grsec_alert_fyet = 0;
69423+
69424+DEFINE_SPINLOCK(grsec_audit_lock);
69425+
69426+DEFINE_RWLOCK(grsec_exec_file_lock);
69427+
69428+char *gr_shared_page[4];
69429+
69430+char *gr_alert_log_fmt;
69431+char *gr_audit_log_fmt;
69432+char *gr_alert_log_buf;
69433+char *gr_audit_log_buf;
69434+
69435+extern struct gr_arg *gr_usermode;
69436+extern unsigned char *gr_system_salt;
69437+extern unsigned char *gr_system_sum;
69438+
69439+void __init
69440+grsecurity_init(void)
69441+{
69442+ int j;
69443+ /* create the per-cpu shared pages */
69444+
69445+#ifdef CONFIG_X86
69446+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
69447+#endif
69448+
69449+ for (j = 0; j < 4; j++) {
69450+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
69451+ if (gr_shared_page[j] == NULL) {
69452+ panic("Unable to allocate grsecurity shared page");
69453+ return;
69454+ }
69455+ }
69456+
69457+ /* allocate log buffers */
69458+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
69459+ if (!gr_alert_log_fmt) {
69460+ panic("Unable to allocate grsecurity alert log format buffer");
69461+ return;
69462+ }
69463+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
69464+ if (!gr_audit_log_fmt) {
69465+ panic("Unable to allocate grsecurity audit log format buffer");
69466+ return;
69467+ }
69468+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
69469+ if (!gr_alert_log_buf) {
69470+ panic("Unable to allocate grsecurity alert log buffer");
69471+ return;
69472+ }
69473+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
69474+ if (!gr_audit_log_buf) {
69475+ panic("Unable to allocate grsecurity audit log buffer");
69476+ return;
69477+ }
69478+
69479+ /* allocate memory for authentication structure */
69480+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
69481+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
69482+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
69483+
69484+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
69485+ panic("Unable to allocate grsecurity authentication structure");
69486+ return;
69487+ }
69488+
69489+
69490+#ifdef CONFIG_GRKERNSEC_IO
69491+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
69492+ grsec_disable_privio = 1;
69493+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
69494+ grsec_disable_privio = 1;
69495+#else
69496+ grsec_disable_privio = 0;
69497+#endif
69498+#endif
69499+
69500+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
69501+ /* for backward compatibility, tpe_invert always defaults to on if
69502+ enabled in the kernel
69503+ */
69504+ grsec_enable_tpe_invert = 1;
69505+#endif
69506+
69507+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
69508+#ifndef CONFIG_GRKERNSEC_SYSCTL
69509+ grsec_lock = 1;
69510+#endif
69511+
69512+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
69513+ grsec_enable_log_rwxmaps = 1;
69514+#endif
69515+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
69516+ grsec_enable_group = 1;
69517+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
69518+#endif
69519+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
69520+ grsec_enable_ptrace_readexec = 1;
69521+#endif
69522+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
69523+ grsec_enable_chdir = 1;
69524+#endif
69525+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
69526+ grsec_enable_harden_ptrace = 1;
69527+#endif
69528+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
69529+ grsec_enable_mount = 1;
69530+#endif
69531+#ifdef CONFIG_GRKERNSEC_LINK
69532+ grsec_enable_link = 1;
69533+#endif
69534+#ifdef CONFIG_GRKERNSEC_BRUTE
69535+ grsec_enable_brute = 1;
69536+#endif
69537+#ifdef CONFIG_GRKERNSEC_DMESG
69538+ grsec_enable_dmesg = 1;
69539+#endif
69540+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69541+ grsec_enable_blackhole = 1;
69542+ grsec_lastack_retries = 4;
69543+#endif
69544+#ifdef CONFIG_GRKERNSEC_FIFO
69545+ grsec_enable_fifo = 1;
69546+#endif
69547+#ifdef CONFIG_GRKERNSEC_EXECLOG
69548+ grsec_enable_execlog = 1;
69549+#endif
69550+#ifdef CONFIG_GRKERNSEC_SETXID
69551+ grsec_enable_setxid = 1;
69552+#endif
69553+#ifdef CONFIG_GRKERNSEC_SIGNAL
69554+ grsec_enable_signal = 1;
69555+#endif
69556+#ifdef CONFIG_GRKERNSEC_FORKFAIL
69557+ grsec_enable_forkfail = 1;
69558+#endif
69559+#ifdef CONFIG_GRKERNSEC_TIME
69560+ grsec_enable_time = 1;
69561+#endif
69562+#ifdef CONFIG_GRKERNSEC_RESLOG
69563+ grsec_resource_logging = 1;
69564+#endif
69565+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69566+ grsec_enable_chroot_findtask = 1;
69567+#endif
69568+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
69569+ grsec_enable_chroot_unix = 1;
69570+#endif
69571+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
69572+ grsec_enable_chroot_mount = 1;
69573+#endif
69574+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
69575+ grsec_enable_chroot_fchdir = 1;
69576+#endif
69577+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
69578+ grsec_enable_chroot_shmat = 1;
69579+#endif
69580+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
69581+ grsec_enable_audit_ptrace = 1;
69582+#endif
69583+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
69584+ grsec_enable_chroot_double = 1;
69585+#endif
69586+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
69587+ grsec_enable_chroot_pivot = 1;
69588+#endif
69589+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
69590+ grsec_enable_chroot_chdir = 1;
69591+#endif
69592+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
69593+ grsec_enable_chroot_chmod = 1;
69594+#endif
69595+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
69596+ grsec_enable_chroot_mknod = 1;
69597+#endif
69598+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
69599+ grsec_enable_chroot_nice = 1;
69600+#endif
69601+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
69602+ grsec_enable_chroot_execlog = 1;
69603+#endif
69604+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69605+ grsec_enable_chroot_caps = 1;
69606+#endif
69607+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
69608+ grsec_enable_chroot_sysctl = 1;
69609+#endif
69610+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
69611+ grsec_enable_symlinkown = 1;
69612+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
69613+#endif
69614+#ifdef CONFIG_GRKERNSEC_TPE
69615+ grsec_enable_tpe = 1;
69616+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
69617+#ifdef CONFIG_GRKERNSEC_TPE_ALL
69618+ grsec_enable_tpe_all = 1;
69619+#endif
69620+#endif
69621+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
69622+ grsec_enable_socket_all = 1;
69623+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
69624+#endif
69625+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
69626+ grsec_enable_socket_client = 1;
69627+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
69628+#endif
69629+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69630+ grsec_enable_socket_server = 1;
69631+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
69632+#endif
69633+#endif
69634+
69635+ return;
69636+}
69637diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
69638new file mode 100644
69639index 0000000..5e05e20
69640--- /dev/null
69641+++ b/grsecurity/grsec_link.c
69642@@ -0,0 +1,58 @@
69643+#include <linux/kernel.h>
69644+#include <linux/sched.h>
69645+#include <linux/fs.h>
69646+#include <linux/file.h>
69647+#include <linux/grinternal.h>
69648+
69649+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
69650+{
69651+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
69652+ const struct inode *link_inode = link->dentry->d_inode;
69653+
69654+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
69655+ /* ignore root-owned links, e.g. /proc/self */
69656+ gr_is_global_nonroot(link_inode->i_uid) && target &&
69657+ !uid_eq(link_inode->i_uid, target->i_uid)) {
69658+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
69659+ return 1;
69660+ }
69661+#endif
69662+ return 0;
69663+}
69664+
69665+int
69666+gr_handle_follow_link(const struct inode *parent,
69667+ const struct inode *inode,
69668+ const struct dentry *dentry, const struct vfsmount *mnt)
69669+{
69670+#ifdef CONFIG_GRKERNSEC_LINK
69671+ const struct cred *cred = current_cred();
69672+
69673+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
69674+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
69675+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
69676+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
69677+ return -EACCES;
69678+ }
69679+#endif
69680+ return 0;
69681+}
69682+
69683+int
69684+gr_handle_hardlink(const struct dentry *dentry,
69685+ const struct vfsmount *mnt,
69686+ struct inode *inode, const int mode, const struct filename *to)
69687+{
69688+#ifdef CONFIG_GRKERNSEC_LINK
69689+ const struct cred *cred = current_cred();
69690+
69691+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
69692+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
69693+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
69694+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
69695+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
69696+ return -EPERM;
69697+ }
69698+#endif
69699+ return 0;
69700+}
69701diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
69702new file mode 100644
69703index 0000000..dbe0a6b
69704--- /dev/null
69705+++ b/grsecurity/grsec_log.c
69706@@ -0,0 +1,341 @@
69707+#include <linux/kernel.h>
69708+#include <linux/sched.h>
69709+#include <linux/file.h>
69710+#include <linux/tty.h>
69711+#include <linux/fs.h>
69712+#include <linux/mm.h>
69713+#include <linux/grinternal.h>
69714+
69715+#ifdef CONFIG_TREE_PREEMPT_RCU
69716+#define DISABLE_PREEMPT() preempt_disable()
69717+#define ENABLE_PREEMPT() preempt_enable()
69718+#else
69719+#define DISABLE_PREEMPT()
69720+#define ENABLE_PREEMPT()
69721+#endif
69722+
69723+#define BEGIN_LOCKS(x) \
69724+ DISABLE_PREEMPT(); \
69725+ rcu_read_lock(); \
69726+ read_lock(&tasklist_lock); \
69727+ read_lock(&grsec_exec_file_lock); \
69728+ if (x != GR_DO_AUDIT) \
69729+ spin_lock(&grsec_alert_lock); \
69730+ else \
69731+ spin_lock(&grsec_audit_lock)
69732+
69733+#define END_LOCKS(x) \
69734+ if (x != GR_DO_AUDIT) \
69735+ spin_unlock(&grsec_alert_lock); \
69736+ else \
69737+ spin_unlock(&grsec_audit_lock); \
69738+ read_unlock(&grsec_exec_file_lock); \
69739+ read_unlock(&tasklist_lock); \
69740+ rcu_read_unlock(); \
69741+ ENABLE_PREEMPT(); \
69742+ if (x == GR_DONT_AUDIT) \
69743+ gr_handle_alertkill(current)
69744+
69745+enum {
69746+ FLOODING,
69747+ NO_FLOODING
69748+};
69749+
69750+extern char *gr_alert_log_fmt;
69751+extern char *gr_audit_log_fmt;
69752+extern char *gr_alert_log_buf;
69753+extern char *gr_audit_log_buf;
69754+
69755+static int gr_log_start(int audit)
69756+{
69757+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
69758+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
69759+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69760+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
69761+ unsigned long curr_secs = get_seconds();
69762+
69763+ if (audit == GR_DO_AUDIT)
69764+ goto set_fmt;
69765+
69766+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
69767+ grsec_alert_wtime = curr_secs;
69768+ grsec_alert_fyet = 0;
69769+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
69770+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
69771+ grsec_alert_fyet++;
69772+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
69773+ grsec_alert_wtime = curr_secs;
69774+ grsec_alert_fyet++;
69775+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
69776+ return FLOODING;
69777+ }
69778+ else return FLOODING;
69779+
69780+set_fmt:
69781+#endif
69782+ memset(buf, 0, PAGE_SIZE);
69783+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
69784+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
69785+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
69786+ } else if (current->signal->curr_ip) {
69787+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
69788+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
69789+ } else if (gr_acl_is_enabled()) {
69790+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
69791+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
69792+ } else {
69793+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
69794+ strcpy(buf, fmt);
69795+ }
69796+
69797+ return NO_FLOODING;
69798+}
69799+
69800+static void gr_log_middle(int audit, const char *msg, va_list ap)
69801+ __attribute__ ((format (printf, 2, 0)));
69802+
69803+static void gr_log_middle(int audit, const char *msg, va_list ap)
69804+{
69805+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69806+ unsigned int len = strlen(buf);
69807+
69808+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
69809+
69810+ return;
69811+}
69812+
69813+static void gr_log_middle_varargs(int audit, const char *msg, ...)
69814+ __attribute__ ((format (printf, 2, 3)));
69815+
69816+static void gr_log_middle_varargs(int audit, const char *msg, ...)
69817+{
69818+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69819+ unsigned int len = strlen(buf);
69820+ va_list ap;
69821+
69822+ va_start(ap, msg);
69823+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
69824+ va_end(ap);
69825+
69826+ return;
69827+}
69828+
69829+static void gr_log_end(int audit, int append_default)
69830+{
69831+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
69832+ if (append_default) {
69833+ struct task_struct *task = current;
69834+ struct task_struct *parent = task->real_parent;
69835+ const struct cred *cred = __task_cred(task);
69836+ const struct cred *pcred = __task_cred(parent);
69837+ unsigned int len = strlen(buf);
69838+
69839+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69840+ }
69841+
69842+ printk("%s\n", buf);
69843+
69844+ return;
69845+}
69846+
69847+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
69848+{
69849+ int logtype;
69850+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
69851+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
69852+ void *voidptr = NULL;
69853+ int num1 = 0, num2 = 0;
69854+ unsigned long ulong1 = 0, ulong2 = 0;
69855+ struct dentry *dentry = NULL;
69856+ struct vfsmount *mnt = NULL;
69857+ struct file *file = NULL;
69858+ struct task_struct *task = NULL;
69859+ struct vm_area_struct *vma = NULL;
69860+ const struct cred *cred, *pcred;
69861+ va_list ap;
69862+
69863+ BEGIN_LOCKS(audit);
69864+ logtype = gr_log_start(audit);
69865+ if (logtype == FLOODING) {
69866+ END_LOCKS(audit);
69867+ return;
69868+ }
69869+ va_start(ap, argtypes);
69870+ switch (argtypes) {
69871+ case GR_TTYSNIFF:
69872+ task = va_arg(ap, struct task_struct *);
69873+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
69874+ break;
69875+ case GR_SYSCTL_HIDDEN:
69876+ str1 = va_arg(ap, char *);
69877+ gr_log_middle_varargs(audit, msg, result, str1);
69878+ break;
69879+ case GR_RBAC:
69880+ dentry = va_arg(ap, struct dentry *);
69881+ mnt = va_arg(ap, struct vfsmount *);
69882+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
69883+ break;
69884+ case GR_RBAC_STR:
69885+ dentry = va_arg(ap, struct dentry *);
69886+ mnt = va_arg(ap, struct vfsmount *);
69887+ str1 = va_arg(ap, char *);
69888+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
69889+ break;
69890+ case GR_STR_RBAC:
69891+ str1 = va_arg(ap, char *);
69892+ dentry = va_arg(ap, struct dentry *);
69893+ mnt = va_arg(ap, struct vfsmount *);
69894+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
69895+ break;
69896+ case GR_RBAC_MODE2:
69897+ dentry = va_arg(ap, struct dentry *);
69898+ mnt = va_arg(ap, struct vfsmount *);
69899+ str1 = va_arg(ap, char *);
69900+ str2 = va_arg(ap, char *);
69901+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
69902+ break;
69903+ case GR_RBAC_MODE3:
69904+ dentry = va_arg(ap, struct dentry *);
69905+ mnt = va_arg(ap, struct vfsmount *);
69906+ str1 = va_arg(ap, char *);
69907+ str2 = va_arg(ap, char *);
69908+ str3 = va_arg(ap, char *);
69909+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
69910+ break;
69911+ case GR_FILENAME:
69912+ dentry = va_arg(ap, struct dentry *);
69913+ mnt = va_arg(ap, struct vfsmount *);
69914+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
69915+ break;
69916+ case GR_STR_FILENAME:
69917+ str1 = va_arg(ap, char *);
69918+ dentry = va_arg(ap, struct dentry *);
69919+ mnt = va_arg(ap, struct vfsmount *);
69920+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
69921+ break;
69922+ case GR_FILENAME_STR:
69923+ dentry = va_arg(ap, struct dentry *);
69924+ mnt = va_arg(ap, struct vfsmount *);
69925+ str1 = va_arg(ap, char *);
69926+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
69927+ break;
69928+ case GR_FILENAME_TWO_INT:
69929+ dentry = va_arg(ap, struct dentry *);
69930+ mnt = va_arg(ap, struct vfsmount *);
69931+ num1 = va_arg(ap, int);
69932+ num2 = va_arg(ap, int);
69933+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
69934+ break;
69935+ case GR_FILENAME_TWO_INT_STR:
69936+ dentry = va_arg(ap, struct dentry *);
69937+ mnt = va_arg(ap, struct vfsmount *);
69938+ num1 = va_arg(ap, int);
69939+ num2 = va_arg(ap, int);
69940+ str1 = va_arg(ap, char *);
69941+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
69942+ break;
69943+ case GR_TEXTREL:
69944+ file = va_arg(ap, struct file *);
69945+ ulong1 = va_arg(ap, unsigned long);
69946+ ulong2 = va_arg(ap, unsigned long);
69947+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
69948+ break;
69949+ case GR_PTRACE:
69950+ task = va_arg(ap, struct task_struct *);
69951+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
69952+ break;
69953+ case GR_RESOURCE:
69954+ task = va_arg(ap, struct task_struct *);
69955+ cred = __task_cred(task);
69956+ pcred = __task_cred(task->real_parent);
69957+ ulong1 = va_arg(ap, unsigned long);
69958+ str1 = va_arg(ap, char *);
69959+ ulong2 = va_arg(ap, unsigned long);
69960+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69961+ break;
69962+ case GR_CAP:
69963+ task = va_arg(ap, struct task_struct *);
69964+ cred = __task_cred(task);
69965+ pcred = __task_cred(task->real_parent);
69966+ str1 = va_arg(ap, char *);
69967+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69968+ break;
69969+ case GR_SIG:
69970+ str1 = va_arg(ap, char *);
69971+ voidptr = va_arg(ap, void *);
69972+ gr_log_middle_varargs(audit, msg, str1, voidptr);
69973+ break;
69974+ case GR_SIG2:
69975+ task = va_arg(ap, struct task_struct *);
69976+ cred = __task_cred(task);
69977+ pcred = __task_cred(task->real_parent);
69978+ num1 = va_arg(ap, int);
69979+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
69980+ break;
69981+ case GR_CRASH1:
69982+ task = va_arg(ap, struct task_struct *);
69983+ cred = __task_cred(task);
69984+ pcred = __task_cred(task->real_parent);
69985+ ulong1 = va_arg(ap, unsigned long);
69986+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
69987+ break;
69988+ case GR_CRASH2:
69989+ task = va_arg(ap, struct task_struct *);
69990+ cred = __task_cred(task);
69991+ pcred = __task_cred(task->real_parent);
69992+ ulong1 = va_arg(ap, unsigned long);
69993+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
69994+ break;
69995+ case GR_RWXMAP:
69996+ file = va_arg(ap, struct file *);
69997+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
69998+ break;
69999+ case GR_RWXMAPVMA:
70000+ vma = va_arg(ap, struct vm_area_struct *);
70001+ if (vma->vm_file)
70002+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
70003+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
70004+ str1 = "<stack>";
70005+ else if (vma->vm_start <= current->mm->brk &&
70006+ vma->vm_end >= current->mm->start_brk)
70007+ str1 = "<heap>";
70008+ else
70009+ str1 = "<anonymous mapping>";
70010+ gr_log_middle_varargs(audit, msg, str1);
70011+ break;
70012+ case GR_PSACCT:
70013+ {
70014+ unsigned int wday, cday;
70015+ __u8 whr, chr;
70016+ __u8 wmin, cmin;
70017+ __u8 wsec, csec;
70018+ char cur_tty[64] = { 0 };
70019+ char parent_tty[64] = { 0 };
70020+
70021+ task = va_arg(ap, struct task_struct *);
70022+ wday = va_arg(ap, unsigned int);
70023+ cday = va_arg(ap, unsigned int);
70024+ whr = va_arg(ap, int);
70025+ chr = va_arg(ap, int);
70026+ wmin = va_arg(ap, int);
70027+ cmin = va_arg(ap, int);
70028+ wsec = va_arg(ap, int);
70029+ csec = va_arg(ap, int);
70030+ ulong1 = va_arg(ap, unsigned long);
70031+ cred = __task_cred(task);
70032+ pcred = __task_cred(task->real_parent);
70033+
70034+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
70035+ }
70036+ break;
70037+ default:
70038+ gr_log_middle(audit, msg, ap);
70039+ }
70040+ va_end(ap);
70041+ // these don't need DEFAULTSECARGS printed on the end
70042+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
70043+ gr_log_end(audit, 0);
70044+ else
70045+ gr_log_end(audit, 1);
70046+ END_LOCKS(audit);
70047+}
70048diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
70049new file mode 100644
70050index 0000000..f536303
70051--- /dev/null
70052+++ b/grsecurity/grsec_mem.c
70053@@ -0,0 +1,40 @@
70054+#include <linux/kernel.h>
70055+#include <linux/sched.h>
70056+#include <linux/mm.h>
70057+#include <linux/mman.h>
70058+#include <linux/grinternal.h>
70059+
70060+void
70061+gr_handle_ioperm(void)
70062+{
70063+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
70064+ return;
70065+}
70066+
70067+void
70068+gr_handle_iopl(void)
70069+{
70070+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
70071+ return;
70072+}
70073+
70074+void
70075+gr_handle_mem_readwrite(u64 from, u64 to)
70076+{
70077+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
70078+ return;
70079+}
70080+
70081+void
70082+gr_handle_vm86(void)
70083+{
70084+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
70085+ return;
70086+}
70087+
70088+void
70089+gr_log_badprocpid(const char *entry)
70090+{
70091+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
70092+ return;
70093+}
70094diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
70095new file mode 100644
70096index 0000000..2131422
70097--- /dev/null
70098+++ b/grsecurity/grsec_mount.c
70099@@ -0,0 +1,62 @@
70100+#include <linux/kernel.h>
70101+#include <linux/sched.h>
70102+#include <linux/mount.h>
70103+#include <linux/grsecurity.h>
70104+#include <linux/grinternal.h>
70105+
70106+void
70107+gr_log_remount(const char *devname, const int retval)
70108+{
70109+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70110+ if (grsec_enable_mount && (retval >= 0))
70111+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
70112+#endif
70113+ return;
70114+}
70115+
70116+void
70117+gr_log_unmount(const char *devname, const int retval)
70118+{
70119+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70120+ if (grsec_enable_mount && (retval >= 0))
70121+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
70122+#endif
70123+ return;
70124+}
70125+
70126+void
70127+gr_log_mount(const char *from, const char *to, const int retval)
70128+{
70129+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
70130+ if (grsec_enable_mount && (retval >= 0))
70131+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
70132+#endif
70133+ return;
70134+}
70135+
70136+int
70137+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
70138+{
70139+#ifdef CONFIG_GRKERNSEC_ROFS
70140+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
70141+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
70142+ return -EPERM;
70143+ } else
70144+ return 0;
70145+#endif
70146+ return 0;
70147+}
70148+
70149+int
70150+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
70151+{
70152+#ifdef CONFIG_GRKERNSEC_ROFS
70153+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
70154+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
70155+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
70156+ return -EPERM;
70157+ } else
70158+ return 0;
70159+#endif
70160+ return 0;
70161+}
70162diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
70163new file mode 100644
70164index 0000000..6ee9d50
70165--- /dev/null
70166+++ b/grsecurity/grsec_pax.c
70167@@ -0,0 +1,45 @@
70168+#include <linux/kernel.h>
70169+#include <linux/sched.h>
70170+#include <linux/mm.h>
70171+#include <linux/file.h>
70172+#include <linux/grinternal.h>
70173+#include <linux/grsecurity.h>
70174+
70175+void
70176+gr_log_textrel(struct vm_area_struct * vma)
70177+{
70178+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70179+ if (grsec_enable_log_rwxmaps)
70180+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
70181+#endif
70182+ return;
70183+}
70184+
70185+void gr_log_ptgnustack(struct file *file)
70186+{
70187+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70188+ if (grsec_enable_log_rwxmaps)
70189+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
70190+#endif
70191+ return;
70192+}
70193+
70194+void
70195+gr_log_rwxmmap(struct file *file)
70196+{
70197+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70198+ if (grsec_enable_log_rwxmaps)
70199+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
70200+#endif
70201+ return;
70202+}
70203+
70204+void
70205+gr_log_rwxmprotect(struct vm_area_struct *vma)
70206+{
70207+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70208+ if (grsec_enable_log_rwxmaps)
70209+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
70210+#endif
70211+ return;
70212+}
70213diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
70214new file mode 100644
70215index 0000000..f7f29aa
70216--- /dev/null
70217+++ b/grsecurity/grsec_ptrace.c
70218@@ -0,0 +1,30 @@
70219+#include <linux/kernel.h>
70220+#include <linux/sched.h>
70221+#include <linux/grinternal.h>
70222+#include <linux/security.h>
70223+
70224+void
70225+gr_audit_ptrace(struct task_struct *task)
70226+{
70227+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
70228+ if (grsec_enable_audit_ptrace)
70229+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
70230+#endif
70231+ return;
70232+}
70233+
70234+int
70235+gr_ptrace_readexec(struct file *file, int unsafe_flags)
70236+{
70237+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
70238+ const struct dentry *dentry = file->f_path.dentry;
70239+ const struct vfsmount *mnt = file->f_path.mnt;
70240+
70241+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
70242+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
70243+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
70244+ return -EACCES;
70245+ }
70246+#endif
70247+ return 0;
70248+}
70249diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
70250new file mode 100644
70251index 0000000..4e29cc7
70252--- /dev/null
70253+++ b/grsecurity/grsec_sig.c
70254@@ -0,0 +1,246 @@
70255+#include <linux/kernel.h>
70256+#include <linux/sched.h>
70257+#include <linux/fs.h>
70258+#include <linux/delay.h>
70259+#include <linux/grsecurity.h>
70260+#include <linux/grinternal.h>
70261+#include <linux/hardirq.h>
70262+
70263+char *signames[] = {
70264+ [SIGSEGV] = "Segmentation fault",
70265+ [SIGILL] = "Illegal instruction",
70266+ [SIGABRT] = "Abort",
70267+ [SIGBUS] = "Invalid alignment/Bus error"
70268+};
70269+
70270+void
70271+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
70272+{
70273+#ifdef CONFIG_GRKERNSEC_SIGNAL
70274+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
70275+ (sig == SIGABRT) || (sig == SIGBUS))) {
70276+ if (task_pid_nr(t) == task_pid_nr(current)) {
70277+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
70278+ } else {
70279+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
70280+ }
70281+ }
70282+#endif
70283+ return;
70284+}
70285+
70286+int
70287+gr_handle_signal(const struct task_struct *p, const int sig)
70288+{
70289+#ifdef CONFIG_GRKERNSEC
70290+ /* ignore the 0 signal for protected task checks */
70291+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
70292+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
70293+ return -EPERM;
70294+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
70295+ return -EPERM;
70296+ }
70297+#endif
70298+ return 0;
70299+}
70300+
70301+#ifdef CONFIG_GRKERNSEC
70302+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
70303+
70304+int gr_fake_force_sig(int sig, struct task_struct *t)
70305+{
70306+ unsigned long int flags;
70307+ int ret, blocked, ignored;
70308+ struct k_sigaction *action;
70309+
70310+ spin_lock_irqsave(&t->sighand->siglock, flags);
70311+ action = &t->sighand->action[sig-1];
70312+ ignored = action->sa.sa_handler == SIG_IGN;
70313+ blocked = sigismember(&t->blocked, sig);
70314+ if (blocked || ignored) {
70315+ action->sa.sa_handler = SIG_DFL;
70316+ if (blocked) {
70317+ sigdelset(&t->blocked, sig);
70318+ recalc_sigpending_and_wake(t);
70319+ }
70320+ }
70321+ if (action->sa.sa_handler == SIG_DFL)
70322+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
70323+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
70324+
70325+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
70326+
70327+ return ret;
70328+}
70329+#endif
70330+
70331+#ifdef CONFIG_GRKERNSEC_BRUTE
70332+#define GR_USER_BAN_TIME (15 * 60)
70333+#define GR_DAEMON_BRUTE_TIME (30 * 60)
70334+
70335+static int __get_dumpable(unsigned long mm_flags)
70336+{
70337+ int ret;
70338+
70339+ ret = mm_flags & MMF_DUMPABLE_MASK;
70340+ return (ret >= 2) ? 2 : ret;
70341+}
70342+#endif
70343+
70344+void gr_handle_brute_attach(unsigned long mm_flags)
70345+{
70346+#ifdef CONFIG_GRKERNSEC_BRUTE
70347+ struct task_struct *p = current;
70348+ kuid_t uid = GLOBAL_ROOT_UID;
70349+ int daemon = 0;
70350+
70351+ if (!grsec_enable_brute)
70352+ return;
70353+
70354+ rcu_read_lock();
70355+ read_lock(&tasklist_lock);
70356+ read_lock(&grsec_exec_file_lock);
70357+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
70358+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
70359+ p->real_parent->brute = 1;
70360+ daemon = 1;
70361+ } else {
70362+ const struct cred *cred = __task_cred(p), *cred2;
70363+ struct task_struct *tsk, *tsk2;
70364+
70365+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
70366+ struct user_struct *user;
70367+
70368+ uid = cred->uid;
70369+
70370+ /* this is put upon execution past expiration */
70371+ user = find_user(uid);
70372+ if (user == NULL)
70373+ goto unlock;
70374+ user->suid_banned = 1;
70375+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
70376+ if (user->suid_ban_expires == ~0UL)
70377+ user->suid_ban_expires--;
70378+
70379+ /* only kill other threads of the same binary, from the same user */
70380+ do_each_thread(tsk2, tsk) {
70381+ cred2 = __task_cred(tsk);
70382+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
70383+ gr_fake_force_sig(SIGKILL, tsk);
70384+ } while_each_thread(tsk2, tsk);
70385+ }
70386+ }
70387+unlock:
70388+ read_unlock(&grsec_exec_file_lock);
70389+ read_unlock(&tasklist_lock);
70390+ rcu_read_unlock();
70391+
70392+ if (gr_is_global_nonroot(uid))
70393+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
70394+ else if (daemon)
70395+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
70396+
70397+#endif
70398+ return;
70399+}
70400+
70401+void gr_handle_brute_check(void)
70402+{
70403+#ifdef CONFIG_GRKERNSEC_BRUTE
70404+ struct task_struct *p = current;
70405+
70406+ if (unlikely(p->brute)) {
70407+ if (!grsec_enable_brute)
70408+ p->brute = 0;
70409+ else if (time_before(get_seconds(), p->brute_expires))
70410+ msleep(30 * 1000);
70411+ }
70412+#endif
70413+ return;
70414+}
70415+
70416+void gr_handle_kernel_exploit(void)
70417+{
70418+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70419+ const struct cred *cred;
70420+ struct task_struct *tsk, *tsk2;
70421+ struct user_struct *user;
70422+ kuid_t uid;
70423+
70424+ if (in_irq() || in_serving_softirq() || in_nmi())
70425+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
70426+
70427+ uid = current_uid();
70428+
70429+ if (gr_is_global_root(uid))
70430+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
70431+ else {
70432+ /* kill all the processes of this user, hold a reference
70433+ to their creds struct, and prevent them from creating
70434+ another process until system reset
70435+ */
70436+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
70437+ GR_GLOBAL_UID(uid));
70438+ /* we intentionally leak this ref */
70439+ user = get_uid(current->cred->user);
70440+ if (user)
70441+ user->kernel_banned = 1;
70442+
70443+ /* kill all processes of this user */
70444+ read_lock(&tasklist_lock);
70445+ do_each_thread(tsk2, tsk) {
70446+ cred = __task_cred(tsk);
70447+ if (uid_eq(cred->uid, uid))
70448+ gr_fake_force_sig(SIGKILL, tsk);
70449+ } while_each_thread(tsk2, tsk);
70450+ read_unlock(&tasklist_lock);
70451+ }
70452+#endif
70453+}
70454+
70455+#ifdef CONFIG_GRKERNSEC_BRUTE
70456+static bool suid_ban_expired(struct user_struct *user)
70457+{
70458+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
70459+ user->suid_banned = 0;
70460+ user->suid_ban_expires = 0;
70461+ free_uid(user);
70462+ return true;
70463+ }
70464+
70465+ return false;
70466+}
70467+#endif
70468+
70469+int gr_process_kernel_exec_ban(void)
70470+{
70471+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70472+ if (unlikely(current->cred->user->kernel_banned))
70473+ return -EPERM;
70474+#endif
70475+ return 0;
70476+}
70477+
70478+int gr_process_kernel_setuid_ban(struct user_struct *user)
70479+{
70480+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
70481+ if (unlikely(user->kernel_banned))
70482+ gr_fake_force_sig(SIGKILL, current);
70483+#endif
70484+ return 0;
70485+}
70486+
70487+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
70488+{
70489+#ifdef CONFIG_GRKERNSEC_BRUTE
70490+ struct user_struct *user = current->cred->user;
70491+ if (unlikely(user->suid_banned)) {
70492+ if (suid_ban_expired(user))
70493+ return 0;
70494+ /* disallow execution of suid binaries only */
70495+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
70496+ return -EPERM;
70497+ }
70498+#endif
70499+ return 0;
70500+}
70501diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
70502new file mode 100644
70503index 0000000..4030d57
70504--- /dev/null
70505+++ b/grsecurity/grsec_sock.c
70506@@ -0,0 +1,244 @@
70507+#include <linux/kernel.h>
70508+#include <linux/module.h>
70509+#include <linux/sched.h>
70510+#include <linux/file.h>
70511+#include <linux/net.h>
70512+#include <linux/in.h>
70513+#include <linux/ip.h>
70514+#include <net/sock.h>
70515+#include <net/inet_sock.h>
70516+#include <linux/grsecurity.h>
70517+#include <linux/grinternal.h>
70518+#include <linux/gracl.h>
70519+
70520+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
70521+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
70522+
70523+EXPORT_SYMBOL(gr_search_udp_recvmsg);
70524+EXPORT_SYMBOL(gr_search_udp_sendmsg);
70525+
70526+#ifdef CONFIG_UNIX_MODULE
70527+EXPORT_SYMBOL(gr_acl_handle_unix);
70528+EXPORT_SYMBOL(gr_acl_handle_mknod);
70529+EXPORT_SYMBOL(gr_handle_chroot_unix);
70530+EXPORT_SYMBOL(gr_handle_create);
70531+#endif
70532+
70533+#ifdef CONFIG_GRKERNSEC
70534+#define gr_conn_table_size 32749
70535+struct conn_table_entry {
70536+ struct conn_table_entry *next;
70537+ struct signal_struct *sig;
70538+};
70539+
70540+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
70541+DEFINE_SPINLOCK(gr_conn_table_lock);
70542+
70543+extern const char * gr_socktype_to_name(unsigned char type);
70544+extern const char * gr_proto_to_name(unsigned char proto);
70545+extern const char * gr_sockfamily_to_name(unsigned char family);
70546+
70547+static __inline__ int
70548+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
70549+{
70550+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
70551+}
70552+
70553+static __inline__ int
70554+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
70555+ __u16 sport, __u16 dport)
70556+{
70557+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
70558+ sig->gr_sport == sport && sig->gr_dport == dport))
70559+ return 1;
70560+ else
70561+ return 0;
70562+}
70563+
70564+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
70565+{
70566+ struct conn_table_entry **match;
70567+ unsigned int index;
70568+
70569+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
70570+ sig->gr_sport, sig->gr_dport,
70571+ gr_conn_table_size);
70572+
70573+ newent->sig = sig;
70574+
70575+ match = &gr_conn_table[index];
70576+ newent->next = *match;
70577+ *match = newent;
70578+
70579+ return;
70580+}
70581+
70582+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
70583+{
70584+ struct conn_table_entry *match, *last = NULL;
70585+ unsigned int index;
70586+
70587+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
70588+ sig->gr_sport, sig->gr_dport,
70589+ gr_conn_table_size);
70590+
70591+ match = gr_conn_table[index];
70592+ while (match && !conn_match(match->sig,
70593+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
70594+ sig->gr_dport)) {
70595+ last = match;
70596+ match = match->next;
70597+ }
70598+
70599+ if (match) {
70600+ if (last)
70601+ last->next = match->next;
70602+ else
70603+ gr_conn_table[index] = NULL;
70604+ kfree(match);
70605+ }
70606+
70607+ return;
70608+}
70609+
70610+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
70611+ __u16 sport, __u16 dport)
70612+{
70613+ struct conn_table_entry *match;
70614+ unsigned int index;
70615+
70616+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
70617+
70618+ match = gr_conn_table[index];
70619+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
70620+ match = match->next;
70621+
70622+ if (match)
70623+ return match->sig;
70624+ else
70625+ return NULL;
70626+}
70627+
70628+#endif
70629+
70630+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
70631+{
70632+#ifdef CONFIG_GRKERNSEC
70633+ struct signal_struct *sig = task->signal;
70634+ struct conn_table_entry *newent;
70635+
70636+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
70637+ if (newent == NULL)
70638+ return;
70639+ /* no bh lock needed since we are called with bh disabled */
70640+ spin_lock(&gr_conn_table_lock);
70641+ gr_del_task_from_ip_table_nolock(sig);
70642+ sig->gr_saddr = inet->inet_rcv_saddr;
70643+ sig->gr_daddr = inet->inet_daddr;
70644+ sig->gr_sport = inet->inet_sport;
70645+ sig->gr_dport = inet->inet_dport;
70646+ gr_add_to_task_ip_table_nolock(sig, newent);
70647+ spin_unlock(&gr_conn_table_lock);
70648+#endif
70649+ return;
70650+}
70651+
70652+void gr_del_task_from_ip_table(struct task_struct *task)
70653+{
70654+#ifdef CONFIG_GRKERNSEC
70655+ spin_lock_bh(&gr_conn_table_lock);
70656+ gr_del_task_from_ip_table_nolock(task->signal);
70657+ spin_unlock_bh(&gr_conn_table_lock);
70658+#endif
70659+ return;
70660+}
70661+
70662+void
70663+gr_attach_curr_ip(const struct sock *sk)
70664+{
70665+#ifdef CONFIG_GRKERNSEC
70666+ struct signal_struct *p, *set;
70667+ const struct inet_sock *inet = inet_sk(sk);
70668+
70669+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
70670+ return;
70671+
70672+ set = current->signal;
70673+
70674+ spin_lock_bh(&gr_conn_table_lock);
70675+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
70676+ inet->inet_dport, inet->inet_sport);
70677+ if (unlikely(p != NULL)) {
70678+ set->curr_ip = p->curr_ip;
70679+ set->used_accept = 1;
70680+ gr_del_task_from_ip_table_nolock(p);
70681+ spin_unlock_bh(&gr_conn_table_lock);
70682+ return;
70683+ }
70684+ spin_unlock_bh(&gr_conn_table_lock);
70685+
70686+ set->curr_ip = inet->inet_daddr;
70687+ set->used_accept = 1;
70688+#endif
70689+ return;
70690+}
70691+
70692+int
70693+gr_handle_sock_all(const int family, const int type, const int protocol)
70694+{
70695+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
70696+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
70697+ (family != AF_UNIX)) {
70698+ if (family == AF_INET)
70699+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
70700+ else
70701+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
70702+ return -EACCES;
70703+ }
70704+#endif
70705+ return 0;
70706+}
70707+
70708+int
70709+gr_handle_sock_server(const struct sockaddr *sck)
70710+{
70711+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
70712+ if (grsec_enable_socket_server &&
70713+ in_group_p(grsec_socket_server_gid) &&
70714+ sck && (sck->sa_family != AF_UNIX) &&
70715+ (sck->sa_family != AF_LOCAL)) {
70716+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
70717+ return -EACCES;
70718+ }
70719+#endif
70720+ return 0;
70721+}
70722+
70723+int
70724+gr_handle_sock_server_other(const struct sock *sck)
70725+{
70726+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
70727+ if (grsec_enable_socket_server &&
70728+ in_group_p(grsec_socket_server_gid) &&
70729+ sck && (sck->sk_family != AF_UNIX) &&
70730+ (sck->sk_family != AF_LOCAL)) {
70731+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
70732+ return -EACCES;
70733+ }
70734+#endif
70735+ return 0;
70736+}
70737+
70738+int
70739+gr_handle_sock_client(const struct sockaddr *sck)
70740+{
70741+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
70742+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
70743+ sck && (sck->sa_family != AF_UNIX) &&
70744+ (sck->sa_family != AF_LOCAL)) {
70745+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
70746+ return -EACCES;
70747+ }
70748+#endif
70749+ return 0;
70750+}
70751diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
70752new file mode 100644
70753index 0000000..301c665
70754--- /dev/null
70755+++ b/grsecurity/grsec_sysctl.c
70756@@ -0,0 +1,471 @@
70757+#include <linux/kernel.h>
70758+#include <linux/sched.h>
70759+#include <linux/sysctl.h>
70760+#include <linux/grsecurity.h>
70761+#include <linux/grinternal.h>
70762+
70763+int
70764+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
70765+{
70766+#ifdef CONFIG_GRKERNSEC_SYSCTL
70767+ if (dirname == NULL || name == NULL)
70768+ return 0;
70769+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
70770+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
70771+ return -EACCES;
70772+ }
70773+#endif
70774+ return 0;
70775+}
70776+
70777+#ifdef CONFIG_GRKERNSEC_ROFS
70778+static int __maybe_unused one = 1;
70779+#endif
70780+
70781+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
70782+struct ctl_table grsecurity_table[] = {
70783+#ifdef CONFIG_GRKERNSEC_SYSCTL
70784+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
70785+#ifdef CONFIG_GRKERNSEC_IO
70786+ {
70787+ .procname = "disable_priv_io",
70788+ .data = &grsec_disable_privio,
70789+ .maxlen = sizeof(int),
70790+ .mode = 0600,
70791+ .proc_handler = &proc_dointvec,
70792+ },
70793+#endif
70794+#endif
70795+#ifdef CONFIG_GRKERNSEC_LINK
70796+ {
70797+ .procname = "linking_restrictions",
70798+ .data = &grsec_enable_link,
70799+ .maxlen = sizeof(int),
70800+ .mode = 0600,
70801+ .proc_handler = &proc_dointvec,
70802+ },
70803+#endif
70804+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
70805+ {
70806+ .procname = "enforce_symlinksifowner",
70807+ .data = &grsec_enable_symlinkown,
70808+ .maxlen = sizeof(int),
70809+ .mode = 0600,
70810+ .proc_handler = &proc_dointvec,
70811+ },
70812+ {
70813+ .procname = "symlinkown_gid",
70814+ .data = &grsec_symlinkown_gid,
70815+ .maxlen = sizeof(int),
70816+ .mode = 0600,
70817+ .proc_handler = &proc_dointvec,
70818+ },
70819+#endif
70820+#ifdef CONFIG_GRKERNSEC_BRUTE
70821+ {
70822+ .procname = "deter_bruteforce",
70823+ .data = &grsec_enable_brute,
70824+ .maxlen = sizeof(int),
70825+ .mode = 0600,
70826+ .proc_handler = &proc_dointvec,
70827+ },
70828+#endif
70829+#ifdef CONFIG_GRKERNSEC_FIFO
70830+ {
70831+ .procname = "fifo_restrictions",
70832+ .data = &grsec_enable_fifo,
70833+ .maxlen = sizeof(int),
70834+ .mode = 0600,
70835+ .proc_handler = &proc_dointvec,
70836+ },
70837+#endif
70838+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
70839+ {
70840+ .procname = "ptrace_readexec",
70841+ .data = &grsec_enable_ptrace_readexec,
70842+ .maxlen = sizeof(int),
70843+ .mode = 0600,
70844+ .proc_handler = &proc_dointvec,
70845+ },
70846+#endif
70847+#ifdef CONFIG_GRKERNSEC_SETXID
70848+ {
70849+ .procname = "consistent_setxid",
70850+ .data = &grsec_enable_setxid,
70851+ .maxlen = sizeof(int),
70852+ .mode = 0600,
70853+ .proc_handler = &proc_dointvec,
70854+ },
70855+#endif
70856+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
70857+ {
70858+ .procname = "ip_blackhole",
70859+ .data = &grsec_enable_blackhole,
70860+ .maxlen = sizeof(int),
70861+ .mode = 0600,
70862+ .proc_handler = &proc_dointvec,
70863+ },
70864+ {
70865+ .procname = "lastack_retries",
70866+ .data = &grsec_lastack_retries,
70867+ .maxlen = sizeof(int),
70868+ .mode = 0600,
70869+ .proc_handler = &proc_dointvec,
70870+ },
70871+#endif
70872+#ifdef CONFIG_GRKERNSEC_EXECLOG
70873+ {
70874+ .procname = "exec_logging",
70875+ .data = &grsec_enable_execlog,
70876+ .maxlen = sizeof(int),
70877+ .mode = 0600,
70878+ .proc_handler = &proc_dointvec,
70879+ },
70880+#endif
70881+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
70882+ {
70883+ .procname = "rwxmap_logging",
70884+ .data = &grsec_enable_log_rwxmaps,
70885+ .maxlen = sizeof(int),
70886+ .mode = 0600,
70887+ .proc_handler = &proc_dointvec,
70888+ },
70889+#endif
70890+#ifdef CONFIG_GRKERNSEC_SIGNAL
70891+ {
70892+ .procname = "signal_logging",
70893+ .data = &grsec_enable_signal,
70894+ .maxlen = sizeof(int),
70895+ .mode = 0600,
70896+ .proc_handler = &proc_dointvec,
70897+ },
70898+#endif
70899+#ifdef CONFIG_GRKERNSEC_FORKFAIL
70900+ {
70901+ .procname = "forkfail_logging",
70902+ .data = &grsec_enable_forkfail,
70903+ .maxlen = sizeof(int),
70904+ .mode = 0600,
70905+ .proc_handler = &proc_dointvec,
70906+ },
70907+#endif
70908+#ifdef CONFIG_GRKERNSEC_TIME
70909+ {
70910+ .procname = "timechange_logging",
70911+ .data = &grsec_enable_time,
70912+ .maxlen = sizeof(int),
70913+ .mode = 0600,
70914+ .proc_handler = &proc_dointvec,
70915+ },
70916+#endif
70917+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
70918+ {
70919+ .procname = "chroot_deny_shmat",
70920+ .data = &grsec_enable_chroot_shmat,
70921+ .maxlen = sizeof(int),
70922+ .mode = 0600,
70923+ .proc_handler = &proc_dointvec,
70924+ },
70925+#endif
70926+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70927+ {
70928+ .procname = "chroot_deny_unix",
70929+ .data = &grsec_enable_chroot_unix,
70930+ .maxlen = sizeof(int),
70931+ .mode = 0600,
70932+ .proc_handler = &proc_dointvec,
70933+ },
70934+#endif
70935+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
70936+ {
70937+ .procname = "chroot_deny_mount",
70938+ .data = &grsec_enable_chroot_mount,
70939+ .maxlen = sizeof(int),
70940+ .mode = 0600,
70941+ .proc_handler = &proc_dointvec,
70942+ },
70943+#endif
70944+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
70945+ {
70946+ .procname = "chroot_deny_fchdir",
70947+ .data = &grsec_enable_chroot_fchdir,
70948+ .maxlen = sizeof(int),
70949+ .mode = 0600,
70950+ .proc_handler = &proc_dointvec,
70951+ },
70952+#endif
70953+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
70954+ {
70955+ .procname = "chroot_deny_chroot",
70956+ .data = &grsec_enable_chroot_double,
70957+ .maxlen = sizeof(int),
70958+ .mode = 0600,
70959+ .proc_handler = &proc_dointvec,
70960+ },
70961+#endif
70962+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
70963+ {
70964+ .procname = "chroot_deny_pivot",
70965+ .data = &grsec_enable_chroot_pivot,
70966+ .maxlen = sizeof(int),
70967+ .mode = 0600,
70968+ .proc_handler = &proc_dointvec,
70969+ },
70970+#endif
70971+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
70972+ {
70973+ .procname = "chroot_enforce_chdir",
70974+ .data = &grsec_enable_chroot_chdir,
70975+ .maxlen = sizeof(int),
70976+ .mode = 0600,
70977+ .proc_handler = &proc_dointvec,
70978+ },
70979+#endif
70980+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
70981+ {
70982+ .procname = "chroot_deny_chmod",
70983+ .data = &grsec_enable_chroot_chmod,
70984+ .maxlen = sizeof(int),
70985+ .mode = 0600,
70986+ .proc_handler = &proc_dointvec,
70987+ },
70988+#endif
70989+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
70990+ {
70991+ .procname = "chroot_deny_mknod",
70992+ .data = &grsec_enable_chroot_mknod,
70993+ .maxlen = sizeof(int),
70994+ .mode = 0600,
70995+ .proc_handler = &proc_dointvec,
70996+ },
70997+#endif
70998+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
70999+ {
71000+ .procname = "chroot_restrict_nice",
71001+ .data = &grsec_enable_chroot_nice,
71002+ .maxlen = sizeof(int),
71003+ .mode = 0600,
71004+ .proc_handler = &proc_dointvec,
71005+ },
71006+#endif
71007+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
71008+ {
71009+ .procname = "chroot_execlog",
71010+ .data = &grsec_enable_chroot_execlog,
71011+ .maxlen = sizeof(int),
71012+ .mode = 0600,
71013+ .proc_handler = &proc_dointvec,
71014+ },
71015+#endif
71016+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
71017+ {
71018+ .procname = "chroot_caps",
71019+ .data = &grsec_enable_chroot_caps,
71020+ .maxlen = sizeof(int),
71021+ .mode = 0600,
71022+ .proc_handler = &proc_dointvec,
71023+ },
71024+#endif
71025+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
71026+ {
71027+ .procname = "chroot_deny_sysctl",
71028+ .data = &grsec_enable_chroot_sysctl,
71029+ .maxlen = sizeof(int),
71030+ .mode = 0600,
71031+ .proc_handler = &proc_dointvec,
71032+ },
71033+#endif
71034+#ifdef CONFIG_GRKERNSEC_TPE
71035+ {
71036+ .procname = "tpe",
71037+ .data = &grsec_enable_tpe,
71038+ .maxlen = sizeof(int),
71039+ .mode = 0600,
71040+ .proc_handler = &proc_dointvec,
71041+ },
71042+ {
71043+ .procname = "tpe_gid",
71044+ .data = &grsec_tpe_gid,
71045+ .maxlen = sizeof(int),
71046+ .mode = 0600,
71047+ .proc_handler = &proc_dointvec,
71048+ },
71049+#endif
71050+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
71051+ {
71052+ .procname = "tpe_invert",
71053+ .data = &grsec_enable_tpe_invert,
71054+ .maxlen = sizeof(int),
71055+ .mode = 0600,
71056+ .proc_handler = &proc_dointvec,
71057+ },
71058+#endif
71059+#ifdef CONFIG_GRKERNSEC_TPE_ALL
71060+ {
71061+ .procname = "tpe_restrict_all",
71062+ .data = &grsec_enable_tpe_all,
71063+ .maxlen = sizeof(int),
71064+ .mode = 0600,
71065+ .proc_handler = &proc_dointvec,
71066+ },
71067+#endif
71068+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
71069+ {
71070+ .procname = "socket_all",
71071+ .data = &grsec_enable_socket_all,
71072+ .maxlen = sizeof(int),
71073+ .mode = 0600,
71074+ .proc_handler = &proc_dointvec,
71075+ },
71076+ {
71077+ .procname = "socket_all_gid",
71078+ .data = &grsec_socket_all_gid,
71079+ .maxlen = sizeof(int),
71080+ .mode = 0600,
71081+ .proc_handler = &proc_dointvec,
71082+ },
71083+#endif
71084+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
71085+ {
71086+ .procname = "socket_client",
71087+ .data = &grsec_enable_socket_client,
71088+ .maxlen = sizeof(int),
71089+ .mode = 0600,
71090+ .proc_handler = &proc_dointvec,
71091+ },
71092+ {
71093+ .procname = "socket_client_gid",
71094+ .data = &grsec_socket_client_gid,
71095+ .maxlen = sizeof(int),
71096+ .mode = 0600,
71097+ .proc_handler = &proc_dointvec,
71098+ },
71099+#endif
71100+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
71101+ {
71102+ .procname = "socket_server",
71103+ .data = &grsec_enable_socket_server,
71104+ .maxlen = sizeof(int),
71105+ .mode = 0600,
71106+ .proc_handler = &proc_dointvec,
71107+ },
71108+ {
71109+ .procname = "socket_server_gid",
71110+ .data = &grsec_socket_server_gid,
71111+ .maxlen = sizeof(int),
71112+ .mode = 0600,
71113+ .proc_handler = &proc_dointvec,
71114+ },
71115+#endif
71116+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
71117+ {
71118+ .procname = "audit_group",
71119+ .data = &grsec_enable_group,
71120+ .maxlen = sizeof(int),
71121+ .mode = 0600,
71122+ .proc_handler = &proc_dointvec,
71123+ },
71124+ {
71125+ .procname = "audit_gid",
71126+ .data = &grsec_audit_gid,
71127+ .maxlen = sizeof(int),
71128+ .mode = 0600,
71129+ .proc_handler = &proc_dointvec,
71130+ },
71131+#endif
71132+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
71133+ {
71134+ .procname = "audit_chdir",
71135+ .data = &grsec_enable_chdir,
71136+ .maxlen = sizeof(int),
71137+ .mode = 0600,
71138+ .proc_handler = &proc_dointvec,
71139+ },
71140+#endif
71141+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71142+ {
71143+ .procname = "audit_mount",
71144+ .data = &grsec_enable_mount,
71145+ .maxlen = sizeof(int),
71146+ .mode = 0600,
71147+ .proc_handler = &proc_dointvec,
71148+ },
71149+#endif
71150+#ifdef CONFIG_GRKERNSEC_DMESG
71151+ {
71152+ .procname = "dmesg",
71153+ .data = &grsec_enable_dmesg,
71154+ .maxlen = sizeof(int),
71155+ .mode = 0600,
71156+ .proc_handler = &proc_dointvec,
71157+ },
71158+#endif
71159+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
71160+ {
71161+ .procname = "chroot_findtask",
71162+ .data = &grsec_enable_chroot_findtask,
71163+ .maxlen = sizeof(int),
71164+ .mode = 0600,
71165+ .proc_handler = &proc_dointvec,
71166+ },
71167+#endif
71168+#ifdef CONFIG_GRKERNSEC_RESLOG
71169+ {
71170+ .procname = "resource_logging",
71171+ .data = &grsec_resource_logging,
71172+ .maxlen = sizeof(int),
71173+ .mode = 0600,
71174+ .proc_handler = &proc_dointvec,
71175+ },
71176+#endif
71177+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
71178+ {
71179+ .procname = "audit_ptrace",
71180+ .data = &grsec_enable_audit_ptrace,
71181+ .maxlen = sizeof(int),
71182+ .mode = 0600,
71183+ .proc_handler = &proc_dointvec,
71184+ },
71185+#endif
71186+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71187+ {
71188+ .procname = "harden_ptrace",
71189+ .data = &grsec_enable_harden_ptrace,
71190+ .maxlen = sizeof(int),
71191+ .mode = 0600,
71192+ .proc_handler = &proc_dointvec,
71193+ },
71194+#endif
71195+ {
71196+ .procname = "grsec_lock",
71197+ .data = &grsec_lock,
71198+ .maxlen = sizeof(int),
71199+ .mode = 0600,
71200+ .proc_handler = &proc_dointvec,
71201+ },
71202+#endif
71203+#ifdef CONFIG_GRKERNSEC_ROFS
71204+ {
71205+ .procname = "romount_protect",
71206+ .data = &grsec_enable_rofs,
71207+ .maxlen = sizeof(int),
71208+ .mode = 0600,
71209+ .proc_handler = &proc_dointvec_minmax,
71210+ .extra1 = &one,
71211+ .extra2 = &one,
71212+ },
71213+#endif
71214+#ifdef CONFIG_GRKERNSEC_DENYUSB
71215+ {
71216+ .procname = "deny_new_usb",
71217+ .data = &grsec_deny_new_usb,
71218+ .maxlen = sizeof(int),
71219+ .mode = 0600,
71220+ .proc_handler = &proc_dointvec_minmax,
71221+ .extra1 = &one,
71222+ .extra2 = &one,
71223+ },
71224+#endif
71225+ { }
71226+};
71227+#endif
71228diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
71229new file mode 100644
71230index 0000000..0dc13c3
71231--- /dev/null
71232+++ b/grsecurity/grsec_time.c
71233@@ -0,0 +1,16 @@
71234+#include <linux/kernel.h>
71235+#include <linux/sched.h>
71236+#include <linux/grinternal.h>
71237+#include <linux/module.h>
71238+
71239+void
71240+gr_log_timechange(void)
71241+{
71242+#ifdef CONFIG_GRKERNSEC_TIME
71243+ if (grsec_enable_time)
71244+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
71245+#endif
71246+ return;
71247+}
71248+
71249+EXPORT_SYMBOL(gr_log_timechange);
71250diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
71251new file mode 100644
71252index 0000000..ee57dcf
71253--- /dev/null
71254+++ b/grsecurity/grsec_tpe.c
71255@@ -0,0 +1,73 @@
71256+#include <linux/kernel.h>
71257+#include <linux/sched.h>
71258+#include <linux/file.h>
71259+#include <linux/fs.h>
71260+#include <linux/grinternal.h>
71261+
71262+extern int gr_acl_tpe_check(void);
71263+
71264+int
71265+gr_tpe_allow(const struct file *file)
71266+{
71267+#ifdef CONFIG_GRKERNSEC
71268+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
71269+ const struct cred *cred = current_cred();
71270+ char *msg = NULL;
71271+ char *msg2 = NULL;
71272+
71273+ // never restrict root
71274+ if (gr_is_global_root(cred->uid))
71275+ return 1;
71276+
71277+ if (grsec_enable_tpe) {
71278+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
71279+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
71280+ msg = "not being in trusted group";
71281+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
71282+ msg = "being in untrusted group";
71283+#else
71284+ if (in_group_p(grsec_tpe_gid))
71285+ msg = "being in untrusted group";
71286+#endif
71287+ }
71288+ if (!msg && gr_acl_tpe_check())
71289+ msg = "being in untrusted role";
71290+
71291+ // not in any affected group/role
71292+ if (!msg)
71293+ goto next_check;
71294+
71295+ if (gr_is_global_nonroot(inode->i_uid))
71296+ msg2 = "file in non-root-owned directory";
71297+ else if (inode->i_mode & S_IWOTH)
71298+ msg2 = "file in world-writable directory";
71299+ else if (inode->i_mode & S_IWGRP)
71300+ msg2 = "file in group-writable directory";
71301+
71302+ if (msg && msg2) {
71303+ char fullmsg[70] = {0};
71304+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
71305+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
71306+ return 0;
71307+ }
71308+ msg = NULL;
71309+next_check:
71310+#ifdef CONFIG_GRKERNSEC_TPE_ALL
71311+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
71312+ return 1;
71313+
71314+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
71315+ msg = "directory not owned by user";
71316+ else if (inode->i_mode & S_IWOTH)
71317+ msg = "file in world-writable directory";
71318+ else if (inode->i_mode & S_IWGRP)
71319+ msg = "file in group-writable directory";
71320+
71321+ if (msg) {
71322+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
71323+ return 0;
71324+ }
71325+#endif
71326+#endif
71327+ return 1;
71328+}
71329diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
71330new file mode 100644
71331index 0000000..ae02d8e
71332--- /dev/null
71333+++ b/grsecurity/grsec_usb.c
71334@@ -0,0 +1,15 @@
71335+#include <linux/kernel.h>
71336+#include <linux/grinternal.h>
71337+#include <linux/module.h>
71338+
71339+int gr_handle_new_usb(void)
71340+{
71341+#ifdef CONFIG_GRKERNSEC_DENYUSB
71342+ if (grsec_deny_new_usb) {
71343+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
71344+ return 1;
71345+ }
71346+#endif
71347+ return 0;
71348+}
71349+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
71350diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
71351new file mode 100644
71352index 0000000..9f7b1ac
71353--- /dev/null
71354+++ b/grsecurity/grsum.c
71355@@ -0,0 +1,61 @@
71356+#include <linux/err.h>
71357+#include <linux/kernel.h>
71358+#include <linux/sched.h>
71359+#include <linux/mm.h>
71360+#include <linux/scatterlist.h>
71361+#include <linux/crypto.h>
71362+#include <linux/gracl.h>
71363+
71364+
71365+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
71366+#error "crypto and sha256 must be built into the kernel"
71367+#endif
71368+
71369+int
71370+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
71371+{
71372+ char *p;
71373+ struct crypto_hash *tfm;
71374+ struct hash_desc desc;
71375+ struct scatterlist sg;
71376+ unsigned char temp_sum[GR_SHA_LEN];
71377+ volatile int retval = 0;
71378+ volatile int dummy = 0;
71379+ unsigned int i;
71380+
71381+ sg_init_table(&sg, 1);
71382+
71383+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
71384+ if (IS_ERR(tfm)) {
71385+ /* should never happen, since sha256 should be built in */
71386+ return 1;
71387+ }
71388+
71389+ desc.tfm = tfm;
71390+ desc.flags = 0;
71391+
71392+ crypto_hash_init(&desc);
71393+
71394+ p = salt;
71395+ sg_set_buf(&sg, p, GR_SALT_LEN);
71396+ crypto_hash_update(&desc, &sg, sg.length);
71397+
71398+ p = entry->pw;
71399+ sg_set_buf(&sg, p, strlen(p));
71400+
71401+ crypto_hash_update(&desc, &sg, sg.length);
71402+
71403+ crypto_hash_final(&desc, temp_sum);
71404+
71405+ memset(entry->pw, 0, GR_PW_LEN);
71406+
71407+ for (i = 0; i < GR_SHA_LEN; i++)
71408+ if (sum[i] != temp_sum[i])
71409+ retval = 1;
71410+ else
71411+ dummy = 1; // waste a cycle
71412+
71413+ crypto_free_hash(tfm);
71414+
71415+ return retval;
71416+}
71417diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
71418index 77ff547..181834f 100644
71419--- a/include/asm-generic/4level-fixup.h
71420+++ b/include/asm-generic/4level-fixup.h
71421@@ -13,8 +13,10 @@
71422 #define pmd_alloc(mm, pud, address) \
71423 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
71424 NULL: pmd_offset(pud, address))
71425+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
71426
71427 #define pud_alloc(mm, pgd, address) (pgd)
71428+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
71429 #define pud_offset(pgd, start) (pgd)
71430 #define pud_none(pud) 0
71431 #define pud_bad(pud) 0
71432diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
71433index b7babf0..04ad282 100644
71434--- a/include/asm-generic/atomic-long.h
71435+++ b/include/asm-generic/atomic-long.h
71436@@ -22,6 +22,12 @@
71437
71438 typedef atomic64_t atomic_long_t;
71439
71440+#ifdef CONFIG_PAX_REFCOUNT
71441+typedef atomic64_unchecked_t atomic_long_unchecked_t;
71442+#else
71443+typedef atomic64_t atomic_long_unchecked_t;
71444+#endif
71445+
71446 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
71447
71448 static inline long atomic_long_read(atomic_long_t *l)
71449@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
71450 return (long)atomic64_read(v);
71451 }
71452
71453+#ifdef CONFIG_PAX_REFCOUNT
71454+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
71455+{
71456+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71457+
71458+ return (long)atomic64_read_unchecked(v);
71459+}
71460+#endif
71461+
71462 static inline void atomic_long_set(atomic_long_t *l, long i)
71463 {
71464 atomic64_t *v = (atomic64_t *)l;
71465@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
71466 atomic64_set(v, i);
71467 }
71468
71469+#ifdef CONFIG_PAX_REFCOUNT
71470+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
71471+{
71472+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71473+
71474+ atomic64_set_unchecked(v, i);
71475+}
71476+#endif
71477+
71478 static inline void atomic_long_inc(atomic_long_t *l)
71479 {
71480 atomic64_t *v = (atomic64_t *)l;
71481@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
71482 atomic64_inc(v);
71483 }
71484
71485+#ifdef CONFIG_PAX_REFCOUNT
71486+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
71487+{
71488+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71489+
71490+ atomic64_inc_unchecked(v);
71491+}
71492+#endif
71493+
71494 static inline void atomic_long_dec(atomic_long_t *l)
71495 {
71496 atomic64_t *v = (atomic64_t *)l;
71497@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
71498 atomic64_dec(v);
71499 }
71500
71501+#ifdef CONFIG_PAX_REFCOUNT
71502+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
71503+{
71504+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71505+
71506+ atomic64_dec_unchecked(v);
71507+}
71508+#endif
71509+
71510 static inline void atomic_long_add(long i, atomic_long_t *l)
71511 {
71512 atomic64_t *v = (atomic64_t *)l;
71513@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
71514 atomic64_add(i, v);
71515 }
71516
71517+#ifdef CONFIG_PAX_REFCOUNT
71518+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
71519+{
71520+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71521+
71522+ atomic64_add_unchecked(i, v);
71523+}
71524+#endif
71525+
71526 static inline void atomic_long_sub(long i, atomic_long_t *l)
71527 {
71528 atomic64_t *v = (atomic64_t *)l;
71529@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
71530 atomic64_sub(i, v);
71531 }
71532
71533+#ifdef CONFIG_PAX_REFCOUNT
71534+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
71535+{
71536+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71537+
71538+ atomic64_sub_unchecked(i, v);
71539+}
71540+#endif
71541+
71542 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
71543 {
71544 atomic64_t *v = (atomic64_t *)l;
71545@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
71546 return (long)atomic64_add_return(i, v);
71547 }
71548
71549+#ifdef CONFIG_PAX_REFCOUNT
71550+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
71551+{
71552+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71553+
71554+ return (long)atomic64_add_return_unchecked(i, v);
71555+}
71556+#endif
71557+
71558 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
71559 {
71560 atomic64_t *v = (atomic64_t *)l;
71561@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
71562 return (long)atomic64_inc_return(v);
71563 }
71564
71565+#ifdef CONFIG_PAX_REFCOUNT
71566+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
71567+{
71568+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
71569+
71570+ return (long)atomic64_inc_return_unchecked(v);
71571+}
71572+#endif
71573+
71574 static inline long atomic_long_dec_return(atomic_long_t *l)
71575 {
71576 atomic64_t *v = (atomic64_t *)l;
71577@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
71578
71579 typedef atomic_t atomic_long_t;
71580
71581+#ifdef CONFIG_PAX_REFCOUNT
71582+typedef atomic_unchecked_t atomic_long_unchecked_t;
71583+#else
71584+typedef atomic_t atomic_long_unchecked_t;
71585+#endif
71586+
71587 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
71588 static inline long atomic_long_read(atomic_long_t *l)
71589 {
71590@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
71591 return (long)atomic_read(v);
71592 }
71593
71594+#ifdef CONFIG_PAX_REFCOUNT
71595+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
71596+{
71597+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71598+
71599+ return (long)atomic_read_unchecked(v);
71600+}
71601+#endif
71602+
71603 static inline void atomic_long_set(atomic_long_t *l, long i)
71604 {
71605 atomic_t *v = (atomic_t *)l;
71606@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
71607 atomic_set(v, i);
71608 }
71609
71610+#ifdef CONFIG_PAX_REFCOUNT
71611+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
71612+{
71613+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71614+
71615+ atomic_set_unchecked(v, i);
71616+}
71617+#endif
71618+
71619 static inline void atomic_long_inc(atomic_long_t *l)
71620 {
71621 atomic_t *v = (atomic_t *)l;
71622@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
71623 atomic_inc(v);
71624 }
71625
71626+#ifdef CONFIG_PAX_REFCOUNT
71627+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
71628+{
71629+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71630+
71631+ atomic_inc_unchecked(v);
71632+}
71633+#endif
71634+
71635 static inline void atomic_long_dec(atomic_long_t *l)
71636 {
71637 atomic_t *v = (atomic_t *)l;
71638@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
71639 atomic_dec(v);
71640 }
71641
71642+#ifdef CONFIG_PAX_REFCOUNT
71643+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
71644+{
71645+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71646+
71647+ atomic_dec_unchecked(v);
71648+}
71649+#endif
71650+
71651 static inline void atomic_long_add(long i, atomic_long_t *l)
71652 {
71653 atomic_t *v = (atomic_t *)l;
71654@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
71655 atomic_add(i, v);
71656 }
71657
71658+#ifdef CONFIG_PAX_REFCOUNT
71659+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
71660+{
71661+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71662+
71663+ atomic_add_unchecked(i, v);
71664+}
71665+#endif
71666+
71667 static inline void atomic_long_sub(long i, atomic_long_t *l)
71668 {
71669 atomic_t *v = (atomic_t *)l;
71670@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
71671 atomic_sub(i, v);
71672 }
71673
71674+#ifdef CONFIG_PAX_REFCOUNT
71675+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
71676+{
71677+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71678+
71679+ atomic_sub_unchecked(i, v);
71680+}
71681+#endif
71682+
71683 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
71684 {
71685 atomic_t *v = (atomic_t *)l;
71686@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
71687 return (long)atomic_add_return(i, v);
71688 }
71689
71690+#ifdef CONFIG_PAX_REFCOUNT
71691+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
71692+{
71693+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71694+
71695+ return (long)atomic_add_return_unchecked(i, v);
71696+}
71697+
71698+#endif
71699+
71700 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
71701 {
71702 atomic_t *v = (atomic_t *)l;
71703@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
71704 return (long)atomic_inc_return(v);
71705 }
71706
71707+#ifdef CONFIG_PAX_REFCOUNT
71708+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
71709+{
71710+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
71711+
71712+ return (long)atomic_inc_return_unchecked(v);
71713+}
71714+#endif
71715+
71716 static inline long atomic_long_dec_return(atomic_long_t *l)
71717 {
71718 atomic_t *v = (atomic_t *)l;
71719@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
71720
71721 #endif /* BITS_PER_LONG == 64 */
71722
71723+#ifdef CONFIG_PAX_REFCOUNT
71724+static inline void pax_refcount_needs_these_functions(void)
71725+{
71726+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
71727+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
71728+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
71729+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
71730+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
71731+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
71732+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
71733+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
71734+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
71735+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
71736+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
71737+#ifdef CONFIG_X86
71738+ atomic_clear_mask_unchecked(0, NULL);
71739+ atomic_set_mask_unchecked(0, NULL);
71740+#endif
71741+
71742+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
71743+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
71744+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
71745+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
71746+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
71747+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
71748+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
71749+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
71750+}
71751+#else
71752+#define atomic_read_unchecked(v) atomic_read(v)
71753+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
71754+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
71755+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
71756+#define atomic_inc_unchecked(v) atomic_inc(v)
71757+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
71758+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
71759+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
71760+#define atomic_dec_unchecked(v) atomic_dec(v)
71761+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
71762+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
71763+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
71764+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
71765+
71766+#define atomic_long_read_unchecked(v) atomic_long_read(v)
71767+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
71768+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
71769+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
71770+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
71771+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
71772+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
71773+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
71774+#endif
71775+
71776 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
71777diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
71778index 33bd2de..f31bff97 100644
71779--- a/include/asm-generic/atomic.h
71780+++ b/include/asm-generic/atomic.h
71781@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
71782 * Atomically clears the bits set in @mask from @v
71783 */
71784 #ifndef atomic_clear_mask
71785-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
71786+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
71787 {
71788 unsigned long flags;
71789
71790diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
71791index b18ce4f..2ee2843 100644
71792--- a/include/asm-generic/atomic64.h
71793+++ b/include/asm-generic/atomic64.h
71794@@ -16,6 +16,8 @@ typedef struct {
71795 long long counter;
71796 } atomic64_t;
71797
71798+typedef atomic64_t atomic64_unchecked_t;
71799+
71800 #define ATOMIC64_INIT(i) { (i) }
71801
71802 extern long long atomic64_read(const atomic64_t *v);
71803@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
71804 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
71805 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
71806
71807+#define atomic64_read_unchecked(v) atomic64_read(v)
71808+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
71809+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
71810+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
71811+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
71812+#define atomic64_inc_unchecked(v) atomic64_inc(v)
71813+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
71814+#define atomic64_dec_unchecked(v) atomic64_dec(v)
71815+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
71816+
71817 #endif /* _ASM_GENERIC_ATOMIC64_H */
71818diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
71819index 1bfcfe5..e04c5c9 100644
71820--- a/include/asm-generic/cache.h
71821+++ b/include/asm-generic/cache.h
71822@@ -6,7 +6,7 @@
71823 * cache lines need to provide their own cache.h.
71824 */
71825
71826-#define L1_CACHE_SHIFT 5
71827-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
71828+#define L1_CACHE_SHIFT 5UL
71829+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
71830
71831 #endif /* __ASM_GENERIC_CACHE_H */
71832diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
71833index 0d68a1e..b74a761 100644
71834--- a/include/asm-generic/emergency-restart.h
71835+++ b/include/asm-generic/emergency-restart.h
71836@@ -1,7 +1,7 @@
71837 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
71838 #define _ASM_GENERIC_EMERGENCY_RESTART_H
71839
71840-static inline void machine_emergency_restart(void)
71841+static inline __noreturn void machine_emergency_restart(void)
71842 {
71843 machine_restart(NULL);
71844 }
71845diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
71846index 90f99c7..00ce236 100644
71847--- a/include/asm-generic/kmap_types.h
71848+++ b/include/asm-generic/kmap_types.h
71849@@ -2,9 +2,9 @@
71850 #define _ASM_GENERIC_KMAP_TYPES_H
71851
71852 #ifdef __WITH_KM_FENCE
71853-# define KM_TYPE_NR 41
71854+# define KM_TYPE_NR 42
71855 #else
71856-# define KM_TYPE_NR 20
71857+# define KM_TYPE_NR 21
71858 #endif
71859
71860 #endif
71861diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
71862index 9ceb03b..62b0b8f 100644
71863--- a/include/asm-generic/local.h
71864+++ b/include/asm-generic/local.h
71865@@ -23,24 +23,37 @@ typedef struct
71866 atomic_long_t a;
71867 } local_t;
71868
71869+typedef struct {
71870+ atomic_long_unchecked_t a;
71871+} local_unchecked_t;
71872+
71873 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
71874
71875 #define local_read(l) atomic_long_read(&(l)->a)
71876+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
71877 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
71878+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
71879 #define local_inc(l) atomic_long_inc(&(l)->a)
71880+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
71881 #define local_dec(l) atomic_long_dec(&(l)->a)
71882+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
71883 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
71884+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
71885 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
71886+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
71887
71888 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
71889 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
71890 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
71891 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
71892 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
71893+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
71894 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
71895 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
71896+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
71897
71898 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
71899+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
71900 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
71901 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
71902 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
71903diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
71904index 725612b..9cc513a 100644
71905--- a/include/asm-generic/pgtable-nopmd.h
71906+++ b/include/asm-generic/pgtable-nopmd.h
71907@@ -1,14 +1,19 @@
71908 #ifndef _PGTABLE_NOPMD_H
71909 #define _PGTABLE_NOPMD_H
71910
71911-#ifndef __ASSEMBLY__
71912-
71913 #include <asm-generic/pgtable-nopud.h>
71914
71915-struct mm_struct;
71916-
71917 #define __PAGETABLE_PMD_FOLDED
71918
71919+#define PMD_SHIFT PUD_SHIFT
71920+#define PTRS_PER_PMD 1
71921+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
71922+#define PMD_MASK (~(PMD_SIZE-1))
71923+
71924+#ifndef __ASSEMBLY__
71925+
71926+struct mm_struct;
71927+
71928 /*
71929 * Having the pmd type consist of a pud gets the size right, and allows
71930 * us to conceptually access the pud entry that this pmd is folded into
71931@@ -16,11 +21,6 @@ struct mm_struct;
71932 */
71933 typedef struct { pud_t pud; } pmd_t;
71934
71935-#define PMD_SHIFT PUD_SHIFT
71936-#define PTRS_PER_PMD 1
71937-#define PMD_SIZE (1UL << PMD_SHIFT)
71938-#define PMD_MASK (~(PMD_SIZE-1))
71939-
71940 /*
71941 * The "pud_xxx()" functions here are trivial for a folded two-level
71942 * setup: the pmd is never bad, and a pmd always exists (as it's folded
71943diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
71944index 810431d..0ec4804f 100644
71945--- a/include/asm-generic/pgtable-nopud.h
71946+++ b/include/asm-generic/pgtable-nopud.h
71947@@ -1,10 +1,15 @@
71948 #ifndef _PGTABLE_NOPUD_H
71949 #define _PGTABLE_NOPUD_H
71950
71951-#ifndef __ASSEMBLY__
71952-
71953 #define __PAGETABLE_PUD_FOLDED
71954
71955+#define PUD_SHIFT PGDIR_SHIFT
71956+#define PTRS_PER_PUD 1
71957+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
71958+#define PUD_MASK (~(PUD_SIZE-1))
71959+
71960+#ifndef __ASSEMBLY__
71961+
71962 /*
71963 * Having the pud type consist of a pgd gets the size right, and allows
71964 * us to conceptually access the pgd entry that this pud is folded into
71965@@ -12,11 +17,6 @@
71966 */
71967 typedef struct { pgd_t pgd; } pud_t;
71968
71969-#define PUD_SHIFT PGDIR_SHIFT
71970-#define PTRS_PER_PUD 1
71971-#define PUD_SIZE (1UL << PUD_SHIFT)
71972-#define PUD_MASK (~(PUD_SIZE-1))
71973-
71974 /*
71975 * The "pgd_xxx()" functions here are trivial for a folded two-level
71976 * setup: the pud is never bad, and a pud always exists (as it's folded
71977@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
71978 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
71979
71980 #define pgd_populate(mm, pgd, pud) do { } while (0)
71981+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
71982 /*
71983 * (puds are folded into pgds so this doesn't get actually called,
71984 * but the define is needed for a generic inline function.)
71985diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
71986index a59ff51..2594a70 100644
71987--- a/include/asm-generic/pgtable.h
71988+++ b/include/asm-generic/pgtable.h
71989@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
71990 }
71991 #endif /* CONFIG_NUMA_BALANCING */
71992
71993+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
71994+static inline unsigned long pax_open_kernel(void) { return 0; }
71995+#endif
71996+
71997+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
71998+static inline unsigned long pax_close_kernel(void) { return 0; }
71999+#endif
72000+
72001 #endif /* CONFIG_MMU */
72002
72003 #endif /* !__ASSEMBLY__ */
72004diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
72005index c184aa8..d049942 100644
72006--- a/include/asm-generic/uaccess.h
72007+++ b/include/asm-generic/uaccess.h
72008@@ -343,4 +343,12 @@ clear_user(void __user *to, unsigned long n)
72009 return __clear_user(to, n);
72010 }
72011
72012+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
72013+//static inline unsigned long pax_open_userland(void) { return 0; }
72014+#endif
72015+
72016+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
72017+//static inline unsigned long pax_close_userland(void) { return 0; }
72018+#endif
72019+
72020 #endif /* __ASM_GENERIC_UACCESS_H */
72021diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
72022index eb58d2d..df131bf 100644
72023--- a/include/asm-generic/vmlinux.lds.h
72024+++ b/include/asm-generic/vmlinux.lds.h
72025@@ -239,6 +239,7 @@
72026 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
72027 VMLINUX_SYMBOL(__start_rodata) = .; \
72028 *(.rodata) *(.rodata.*) \
72029+ *(.data..read_only) \
72030 *(__vermagic) /* Kernel version magic */ \
72031 . = ALIGN(8); \
72032 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
72033@@ -749,17 +750,18 @@
72034 * section in the linker script will go there too. @phdr should have
72035 * a leading colon.
72036 *
72037- * Note that this macros defines __per_cpu_load as an absolute symbol.
72038+ * Note that this macros defines per_cpu_load as an absolute symbol.
72039 * If there is no need to put the percpu section at a predetermined
72040 * address, use PERCPU_SECTION.
72041 */
72042 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
72043- VMLINUX_SYMBOL(__per_cpu_load) = .; \
72044- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
72045+ per_cpu_load = .; \
72046+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
72047 - LOAD_OFFSET) { \
72048+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
72049 PERCPU_INPUT(cacheline) \
72050 } phdr \
72051- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
72052+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
72053
72054 /**
72055 * PERCPU_SECTION - define output section for percpu area, simple version
72056diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
72057index 418d270..bfd2794 100644
72058--- a/include/crypto/algapi.h
72059+++ b/include/crypto/algapi.h
72060@@ -34,7 +34,7 @@ struct crypto_type {
72061 unsigned int maskclear;
72062 unsigned int maskset;
72063 unsigned int tfmsize;
72064-};
72065+} __do_const;
72066
72067 struct crypto_instance {
72068 struct crypto_alg alg;
72069diff --git a/include/drm/drmP.h b/include/drm/drmP.h
72070index 63d17ee..716de2b 100644
72071--- a/include/drm/drmP.h
72072+++ b/include/drm/drmP.h
72073@@ -72,6 +72,7 @@
72074 #include <linux/workqueue.h>
72075 #include <linux/poll.h>
72076 #include <asm/pgalloc.h>
72077+#include <asm/local.h>
72078 #include <drm/drm.h>
72079 #include <drm/drm_sarea.h>
72080
72081@@ -296,10 +297,12 @@ do { \
72082 * \param cmd command.
72083 * \param arg argument.
72084 */
72085-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
72086+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
72087+ struct drm_file *file_priv);
72088+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
72089 struct drm_file *file_priv);
72090
72091-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
72092+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
72093 unsigned long arg);
72094
72095 #define DRM_IOCTL_NR(n) _IOC_NR(n)
72096@@ -314,10 +317,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
72097 struct drm_ioctl_desc {
72098 unsigned int cmd;
72099 int flags;
72100- drm_ioctl_t *func;
72101+ drm_ioctl_t func;
72102 unsigned int cmd_drv;
72103 const char *name;
72104-};
72105+} __do_const;
72106
72107 /**
72108 * Creates a driver or general drm_ioctl_desc array entry for the given
72109@@ -1015,7 +1018,7 @@ struct drm_info_list {
72110 int (*show)(struct seq_file*, void*); /** show callback */
72111 u32 driver_features; /**< Required driver features for this entry */
72112 void *data;
72113-};
72114+} __do_const;
72115
72116 /**
72117 * debugfs node structure. This structure represents a debugfs file.
72118@@ -1088,7 +1091,7 @@ struct drm_device {
72119
72120 /** \name Usage Counters */
72121 /*@{ */
72122- int open_count; /**< Outstanding files open */
72123+ local_t open_count; /**< Outstanding files open */
72124 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
72125 atomic_t vma_count; /**< Outstanding vma areas open */
72126 int buf_use; /**< Buffers in use -- cannot alloc */
72127@@ -1099,7 +1102,7 @@ struct drm_device {
72128 /*@{ */
72129 unsigned long counters;
72130 enum drm_stat_type types[15];
72131- atomic_t counts[15];
72132+ atomic_unchecked_t counts[15];
72133 /*@} */
72134
72135 struct list_head filelist;
72136diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
72137index f43d556..94d9343 100644
72138--- a/include/drm/drm_crtc_helper.h
72139+++ b/include/drm/drm_crtc_helper.h
72140@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
72141 struct drm_connector *connector);
72142 /* disable encoder when not in use - more explicit than dpms off */
72143 void (*disable)(struct drm_encoder *encoder);
72144-};
72145+} __no_const;
72146
72147 /**
72148 * drm_connector_helper_funcs - helper operations for connectors
72149diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
72150index 72dcbe8..8db58d7 100644
72151--- a/include/drm/ttm/ttm_memory.h
72152+++ b/include/drm/ttm/ttm_memory.h
72153@@ -48,7 +48,7 @@
72154
72155 struct ttm_mem_shrink {
72156 int (*do_shrink) (struct ttm_mem_shrink *);
72157-};
72158+} __no_const;
72159
72160 /**
72161 * struct ttm_mem_global - Global memory accounting structure.
72162diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
72163index 4b840e8..155d235 100644
72164--- a/include/keys/asymmetric-subtype.h
72165+++ b/include/keys/asymmetric-subtype.h
72166@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
72167 /* Verify the signature on a key of this subtype (optional) */
72168 int (*verify_signature)(const struct key *key,
72169 const struct public_key_signature *sig);
72170-};
72171+} __do_const;
72172
72173 /**
72174 * asymmetric_key_subtype - Get the subtype from an asymmetric key
72175diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
72176index c1da539..1dcec55 100644
72177--- a/include/linux/atmdev.h
72178+++ b/include/linux/atmdev.h
72179@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
72180 #endif
72181
72182 struct k_atm_aal_stats {
72183-#define __HANDLE_ITEM(i) atomic_t i
72184+#define __HANDLE_ITEM(i) atomic_unchecked_t i
72185 __AAL_STAT_ITEMS
72186 #undef __HANDLE_ITEM
72187 };
72188@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
72189 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
72190 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
72191 struct module *owner;
72192-};
72193+} __do_const ;
72194
72195 struct atmphy_ops {
72196 int (*start)(struct atm_dev *dev);
72197diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
72198index 70cf138..0418ee2 100644
72199--- a/include/linux/binfmts.h
72200+++ b/include/linux/binfmts.h
72201@@ -73,8 +73,10 @@ struct linux_binfmt {
72202 int (*load_binary)(struct linux_binprm *);
72203 int (*load_shlib)(struct file *);
72204 int (*core_dump)(struct coredump_params *cprm);
72205+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
72206+ void (*handle_mmap)(struct file *);
72207 unsigned long min_coredump; /* minimal dump size */
72208-};
72209+} __do_const;
72210
72211 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
72212
72213diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
72214index 2fdb4a4..54aad7e 100644
72215--- a/include/linux/blkdev.h
72216+++ b/include/linux/blkdev.h
72217@@ -1526,7 +1526,7 @@ struct block_device_operations {
72218 /* this callback is with swap_lock and sometimes page table lock held */
72219 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
72220 struct module *owner;
72221-};
72222+} __do_const;
72223
72224 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
72225 unsigned long);
72226diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
72227index 7c2e030..b72475d 100644
72228--- a/include/linux/blktrace_api.h
72229+++ b/include/linux/blktrace_api.h
72230@@ -23,7 +23,7 @@ struct blk_trace {
72231 struct dentry *dir;
72232 struct dentry *dropped_file;
72233 struct dentry *msg_file;
72234- atomic_t dropped;
72235+ atomic_unchecked_t dropped;
72236 };
72237
72238 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
72239diff --git a/include/linux/cache.h b/include/linux/cache.h
72240index 4c57065..4307975 100644
72241--- a/include/linux/cache.h
72242+++ b/include/linux/cache.h
72243@@ -16,6 +16,10 @@
72244 #define __read_mostly
72245 #endif
72246
72247+#ifndef __read_only
72248+#define __read_only __read_mostly
72249+#endif
72250+
72251 #ifndef ____cacheline_aligned
72252 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
72253 #endif
72254diff --git a/include/linux/capability.h b/include/linux/capability.h
72255index d9a4f7f4..19f77d6 100644
72256--- a/include/linux/capability.h
72257+++ b/include/linux/capability.h
72258@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
72259 extern bool nsown_capable(int cap);
72260 extern bool inode_capable(const struct inode *inode, int cap);
72261 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
72262+extern bool capable_nolog(int cap);
72263+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
72264+extern bool inode_capable_nolog(const struct inode *inode, int cap);
72265
72266 /* audit system wants to get cap info from files as well */
72267 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
72268
72269+extern int is_privileged_binary(const struct dentry *dentry);
72270+
72271 #endif /* !_LINUX_CAPABILITY_H */
72272diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
72273index 8609d57..86e4d79 100644
72274--- a/include/linux/cdrom.h
72275+++ b/include/linux/cdrom.h
72276@@ -87,7 +87,6 @@ struct cdrom_device_ops {
72277
72278 /* driver specifications */
72279 const int capability; /* capability flags */
72280- int n_minors; /* number of active minor devices */
72281 /* handle uniform packets for scsi type devices (scsi,atapi) */
72282 int (*generic_packet) (struct cdrom_device_info *,
72283 struct packet_command *);
72284diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
72285index 4ce9056..86caac6 100644
72286--- a/include/linux/cleancache.h
72287+++ b/include/linux/cleancache.h
72288@@ -31,7 +31,7 @@ struct cleancache_ops {
72289 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
72290 void (*invalidate_inode)(int, struct cleancache_filekey);
72291 void (*invalidate_fs)(int);
72292-};
72293+} __no_const;
72294
72295 extern struct cleancache_ops *
72296 cleancache_register_ops(struct cleancache_ops *ops);
72297diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
72298index 1186098..f87e53d 100644
72299--- a/include/linux/clk-provider.h
72300+++ b/include/linux/clk-provider.h
72301@@ -132,6 +132,7 @@ struct clk_ops {
72302 unsigned long);
72303 void (*init)(struct clk_hw *hw);
72304 };
72305+typedef struct clk_ops __no_const clk_ops_no_const;
72306
72307 /**
72308 * struct clk_init_data - holds init data that's common to all clocks and is
72309diff --git a/include/linux/compat.h b/include/linux/compat.h
72310index 7f0c1dd..206ac34 100644
72311--- a/include/linux/compat.h
72312+++ b/include/linux/compat.h
72313@@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72314 compat_size_t __user *len_ptr);
72315
72316 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
72317-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
72318+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
72319 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
72320 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
72321 compat_ssize_t msgsz, int msgflg);
72322@@ -419,7 +419,7 @@ extern int compat_ptrace_request(struct task_struct *child,
72323 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
72324 compat_ulong_t addr, compat_ulong_t data);
72325 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
72326- compat_long_t addr, compat_long_t data);
72327+ compat_ulong_t addr, compat_ulong_t data);
72328
72329 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
72330 /*
72331@@ -669,6 +669,7 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
72332
72333 int compat_restore_altstack(const compat_stack_t __user *uss);
72334 int __compat_save_altstack(compat_stack_t __user *, unsigned long);
72335+void __compat_save_altstack_ex(compat_stack_t __user *, unsigned long);
72336
72337 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
72338 struct compat_timespec __user *interval);
72339diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
72340index 842de22..7f3a41f 100644
72341--- a/include/linux/compiler-gcc4.h
72342+++ b/include/linux/compiler-gcc4.h
72343@@ -39,9 +39,29 @@
72344 # define __compiletime_warning(message) __attribute__((warning(message)))
72345 # define __compiletime_error(message) __attribute__((error(message)))
72346 #endif /* __CHECKER__ */
72347+
72348+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
72349+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
72350+#define __bos0(ptr) __bos((ptr), 0)
72351+#define __bos1(ptr) __bos((ptr), 1)
72352 #endif /* GCC_VERSION >= 40300 */
72353
72354 #if GCC_VERSION >= 40500
72355+
72356+#ifdef CONSTIFY_PLUGIN
72357+#define __no_const __attribute__((no_const))
72358+#define __do_const __attribute__((do_const))
72359+#endif
72360+
72361+#ifdef SIZE_OVERFLOW_PLUGIN
72362+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
72363+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
72364+#endif
72365+
72366+#ifdef LATENT_ENTROPY_PLUGIN
72367+#define __latent_entropy __attribute__((latent_entropy))
72368+#endif
72369+
72370 /*
72371 * Mark a position in code as unreachable. This can be used to
72372 * suppress control flow warnings after asm blocks that transfer
72373diff --git a/include/linux/compiler.h b/include/linux/compiler.h
72374index 92669cd..1771a15 100644
72375--- a/include/linux/compiler.h
72376+++ b/include/linux/compiler.h
72377@@ -5,11 +5,14 @@
72378
72379 #ifdef __CHECKER__
72380 # define __user __attribute__((noderef, address_space(1)))
72381+# define __force_user __force __user
72382 # define __kernel __attribute__((address_space(0)))
72383+# define __force_kernel __force __kernel
72384 # define __safe __attribute__((safe))
72385 # define __force __attribute__((force))
72386 # define __nocast __attribute__((nocast))
72387 # define __iomem __attribute__((noderef, address_space(2)))
72388+# define __force_iomem __force __iomem
72389 # define __must_hold(x) __attribute__((context(x,1,1)))
72390 # define __acquires(x) __attribute__((context(x,0,1)))
72391 # define __releases(x) __attribute__((context(x,1,0)))
72392@@ -17,20 +20,37 @@
72393 # define __release(x) __context__(x,-1)
72394 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
72395 # define __percpu __attribute__((noderef, address_space(3)))
72396+# define __force_percpu __force __percpu
72397 #ifdef CONFIG_SPARSE_RCU_POINTER
72398 # define __rcu __attribute__((noderef, address_space(4)))
72399+# define __force_rcu __force __rcu
72400 #else
72401 # define __rcu
72402+# define __force_rcu
72403 #endif
72404 extern void __chk_user_ptr(const volatile void __user *);
72405 extern void __chk_io_ptr(const volatile void __iomem *);
72406 #else
72407-# define __user
72408-# define __kernel
72409+# ifdef CHECKER_PLUGIN
72410+//# define __user
72411+//# define __force_user
72412+//# define __kernel
72413+//# define __force_kernel
72414+# else
72415+# ifdef STRUCTLEAK_PLUGIN
72416+# define __user __attribute__((user))
72417+# else
72418+# define __user
72419+# endif
72420+# define __force_user
72421+# define __kernel
72422+# define __force_kernel
72423+# endif
72424 # define __safe
72425 # define __force
72426 # define __nocast
72427 # define __iomem
72428+# define __force_iomem
72429 # define __chk_user_ptr(x) (void)0
72430 # define __chk_io_ptr(x) (void)0
72431 # define __builtin_warning(x, y...) (1)
72432@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
72433 # define __release(x) (void)0
72434 # define __cond_lock(x,c) (c)
72435 # define __percpu
72436+# define __force_percpu
72437 # define __rcu
72438+# define __force_rcu
72439 #endif
72440
72441 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
72442@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72443 # define __attribute_const__ /* unimplemented */
72444 #endif
72445
72446+#ifndef __no_const
72447+# define __no_const
72448+#endif
72449+
72450+#ifndef __do_const
72451+# define __do_const
72452+#endif
72453+
72454+#ifndef __size_overflow
72455+# define __size_overflow(...)
72456+#endif
72457+
72458+#ifndef __intentional_overflow
72459+# define __intentional_overflow(...)
72460+#endif
72461+
72462+#ifndef __latent_entropy
72463+# define __latent_entropy
72464+#endif
72465+
72466 /*
72467 * Tell gcc if a function is cold. The compiler will assume any path
72468 * directly leading to the call is unlikely.
72469@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72470 #define __cold
72471 #endif
72472
72473+#ifndef __alloc_size
72474+#define __alloc_size(...)
72475+#endif
72476+
72477+#ifndef __bos
72478+#define __bos(ptr, arg)
72479+#endif
72480+
72481+#ifndef __bos0
72482+#define __bos0(ptr)
72483+#endif
72484+
72485+#ifndef __bos1
72486+#define __bos1(ptr)
72487+#endif
72488+
72489 /* Simple shorthand for a section definition */
72490 #ifndef __section
72491 # define __section(S) __attribute__ ((__section__(#S)))
72492@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
72493 * use is to mediate communication between process-level code and irq/NMI
72494 * handlers, all running on the same CPU.
72495 */
72496-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
72497+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
72498+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
72499
72500 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
72501 #ifdef CONFIG_KPROBES
72502diff --git a/include/linux/completion.h b/include/linux/completion.h
72503index 33f0280..35c6568 100644
72504--- a/include/linux/completion.h
72505+++ b/include/linux/completion.h
72506@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
72507 extern void wait_for_completion(struct completion *);
72508 extern void wait_for_completion_io(struct completion *);
72509 extern int wait_for_completion_interruptible(struct completion *x);
72510-extern int wait_for_completion_killable(struct completion *x);
72511+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
72512 extern unsigned long wait_for_completion_timeout(struct completion *x,
72513 unsigned long timeout);
72514 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
72515 unsigned long timeout);
72516 extern long wait_for_completion_interruptible_timeout(
72517- struct completion *x, unsigned long timeout);
72518+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
72519 extern long wait_for_completion_killable_timeout(
72520- struct completion *x, unsigned long timeout);
72521+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
72522 extern bool try_wait_for_completion(struct completion *x);
72523 extern bool completion_done(struct completion *x);
72524
72525diff --git a/include/linux/configfs.h b/include/linux/configfs.h
72526index 34025df..d94bbbc 100644
72527--- a/include/linux/configfs.h
72528+++ b/include/linux/configfs.h
72529@@ -125,7 +125,7 @@ struct configfs_attribute {
72530 const char *ca_name;
72531 struct module *ca_owner;
72532 umode_t ca_mode;
72533-};
72534+} __do_const;
72535
72536 /*
72537 * Users often need to create attribute structures for their configurable
72538diff --git a/include/linux/cpu.h b/include/linux/cpu.h
72539index 9f3c7e8..a18c7b6 100644
72540--- a/include/linux/cpu.h
72541+++ b/include/linux/cpu.h
72542@@ -115,7 +115,7 @@ enum {
72543 /* Need to know about CPUs going up/down? */
72544 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
72545 #define cpu_notifier(fn, pri) { \
72546- static struct notifier_block fn##_nb __cpuinitdata = \
72547+ static struct notifier_block fn##_nb = \
72548 { .notifier_call = fn, .priority = pri }; \
72549 register_cpu_notifier(&fn##_nb); \
72550 }
72551diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
72552index 037d36a..ca5fe6e 100644
72553--- a/include/linux/cpufreq.h
72554+++ b/include/linux/cpufreq.h
72555@@ -262,7 +262,7 @@ struct cpufreq_driver {
72556 int (*suspend) (struct cpufreq_policy *policy);
72557 int (*resume) (struct cpufreq_policy *policy);
72558 struct freq_attr **attr;
72559-};
72560+} __do_const;
72561
72562 /* flags */
72563
72564@@ -321,6 +321,7 @@ struct global_attr {
72565 ssize_t (*store)(struct kobject *a, struct attribute *b,
72566 const char *c, size_t count);
72567 };
72568+typedef struct global_attr __no_const global_attr_no_const;
72569
72570 #define define_one_global_ro(_name) \
72571 static struct global_attr _name = \
72572diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
72573index 8f04062..900239a 100644
72574--- a/include/linux/cpuidle.h
72575+++ b/include/linux/cpuidle.h
72576@@ -52,7 +52,8 @@ struct cpuidle_state {
72577 int index);
72578
72579 int (*enter_dead) (struct cpuidle_device *dev, int index);
72580-};
72581+} __do_const;
72582+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
72583
72584 /* Idle State Flags */
72585 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
72586@@ -191,7 +192,7 @@ struct cpuidle_governor {
72587 void (*reflect) (struct cpuidle_device *dev, int index);
72588
72589 struct module *owner;
72590-};
72591+} __do_const;
72592
72593 #ifdef CONFIG_CPU_IDLE
72594
72595diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
72596index d08e4d2..95fad61 100644
72597--- a/include/linux/cpumask.h
72598+++ b/include/linux/cpumask.h
72599@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
72600 }
72601
72602 /* Valid inputs for n are -1 and 0. */
72603-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72604+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
72605 {
72606 return n+1;
72607 }
72608
72609-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72610+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
72611 {
72612 return n+1;
72613 }
72614
72615-static inline unsigned int cpumask_next_and(int n,
72616+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
72617 const struct cpumask *srcp,
72618 const struct cpumask *andp)
72619 {
72620@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
72621 *
72622 * Returns >= nr_cpu_ids if no further cpus set.
72623 */
72624-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72625+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
72626 {
72627 /* -1 is a legal arg here. */
72628 if (n != -1)
72629@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
72630 *
72631 * Returns >= nr_cpu_ids if no further cpus unset.
72632 */
72633-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72634+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
72635 {
72636 /* -1 is a legal arg here. */
72637 if (n != -1)
72638@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
72639 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
72640 }
72641
72642-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
72643+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
72644 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
72645
72646 /**
72647diff --git a/include/linux/cred.h b/include/linux/cred.h
72648index 04421e8..6bce4ef 100644
72649--- a/include/linux/cred.h
72650+++ b/include/linux/cred.h
72651@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
72652 static inline void validate_process_creds(void)
72653 {
72654 }
72655+static inline void validate_task_creds(struct task_struct *task)
72656+{
72657+}
72658 #endif
72659
72660 /**
72661diff --git a/include/linux/crypto.h b/include/linux/crypto.h
72662index b92eadf..b4ecdc1 100644
72663--- a/include/linux/crypto.h
72664+++ b/include/linux/crypto.h
72665@@ -373,7 +373,7 @@ struct cipher_tfm {
72666 const u8 *key, unsigned int keylen);
72667 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
72668 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
72669-};
72670+} __no_const;
72671
72672 struct hash_tfm {
72673 int (*init)(struct hash_desc *desc);
72674@@ -394,13 +394,13 @@ struct compress_tfm {
72675 int (*cot_decompress)(struct crypto_tfm *tfm,
72676 const u8 *src, unsigned int slen,
72677 u8 *dst, unsigned int *dlen);
72678-};
72679+} __no_const;
72680
72681 struct rng_tfm {
72682 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
72683 unsigned int dlen);
72684 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
72685-};
72686+} __no_const;
72687
72688 #define crt_ablkcipher crt_u.ablkcipher
72689 #define crt_aead crt_u.aead
72690diff --git a/include/linux/ctype.h b/include/linux/ctype.h
72691index 653589e..4ef254a 100644
72692--- a/include/linux/ctype.h
72693+++ b/include/linux/ctype.h
72694@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
72695 * Fast implementation of tolower() for internal usage. Do not use in your
72696 * code.
72697 */
72698-static inline char _tolower(const char c)
72699+static inline unsigned char _tolower(const unsigned char c)
72700 {
72701 return c | 0x20;
72702 }
72703diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
72704index 7925bf0..d5143d2 100644
72705--- a/include/linux/decompress/mm.h
72706+++ b/include/linux/decompress/mm.h
72707@@ -77,7 +77,7 @@ static void free(void *where)
72708 * warnings when not needed (indeed large_malloc / large_free are not
72709 * needed by inflate */
72710
72711-#define malloc(a) kmalloc(a, GFP_KERNEL)
72712+#define malloc(a) kmalloc((a), GFP_KERNEL)
72713 #define free(a) kfree(a)
72714
72715 #define large_malloc(a) vmalloc(a)
72716diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
72717index fe8c447..bdc1f33 100644
72718--- a/include/linux/devfreq.h
72719+++ b/include/linux/devfreq.h
72720@@ -114,7 +114,7 @@ struct devfreq_governor {
72721 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
72722 int (*event_handler)(struct devfreq *devfreq,
72723 unsigned int event, void *data);
72724-};
72725+} __do_const;
72726
72727 /**
72728 * struct devfreq - Device devfreq structure
72729diff --git a/include/linux/device.h b/include/linux/device.h
72730index c0a1261..dba7569 100644
72731--- a/include/linux/device.h
72732+++ b/include/linux/device.h
72733@@ -290,7 +290,7 @@ struct subsys_interface {
72734 struct list_head node;
72735 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
72736 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
72737-};
72738+} __do_const;
72739
72740 int subsys_interface_register(struct subsys_interface *sif);
72741 void subsys_interface_unregister(struct subsys_interface *sif);
72742@@ -473,7 +473,7 @@ struct device_type {
72743 void (*release)(struct device *dev);
72744
72745 const struct dev_pm_ops *pm;
72746-};
72747+} __do_const;
72748
72749 /* interface for exporting device attributes */
72750 struct device_attribute {
72751@@ -483,11 +483,12 @@ struct device_attribute {
72752 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
72753 const char *buf, size_t count);
72754 };
72755+typedef struct device_attribute __no_const device_attribute_no_const;
72756
72757 struct dev_ext_attribute {
72758 struct device_attribute attr;
72759 void *var;
72760-};
72761+} __do_const;
72762
72763 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
72764 char *buf);
72765diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
72766index 94af418..b1ca7a2 100644
72767--- a/include/linux/dma-mapping.h
72768+++ b/include/linux/dma-mapping.h
72769@@ -54,7 +54,7 @@ struct dma_map_ops {
72770 u64 (*get_required_mask)(struct device *dev);
72771 #endif
72772 int is_phys;
72773-};
72774+} __do_const;
72775
72776 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
72777
72778diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
72779index 96d3e4a..dc36433 100644
72780--- a/include/linux/dmaengine.h
72781+++ b/include/linux/dmaengine.h
72782@@ -1035,9 +1035,9 @@ struct dma_pinned_list {
72783 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
72784 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
72785
72786-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
72787+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
72788 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
72789-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
72790+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
72791 struct dma_pinned_list *pinned_list, struct page *page,
72792 unsigned int offset, size_t len);
72793
72794diff --git a/include/linux/efi.h b/include/linux/efi.h
72795index 2bc0ad7..3f7b006 100644
72796--- a/include/linux/efi.h
72797+++ b/include/linux/efi.h
72798@@ -745,6 +745,7 @@ struct efivar_operations {
72799 efi_set_variable_t *set_variable;
72800 efi_query_variable_store_t *query_variable_store;
72801 };
72802+typedef struct efivar_operations __no_const efivar_operations_no_const;
72803
72804 struct efivars {
72805 /*
72806diff --git a/include/linux/elf.h b/include/linux/elf.h
72807index 40a3c0e..4c45a38 100644
72808--- a/include/linux/elf.h
72809+++ b/include/linux/elf.h
72810@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
72811 #define elf_note elf32_note
72812 #define elf_addr_t Elf32_Off
72813 #define Elf_Half Elf32_Half
72814+#define elf_dyn Elf32_Dyn
72815
72816 #else
72817
72818@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
72819 #define elf_note elf64_note
72820 #define elf_addr_t Elf64_Off
72821 #define Elf_Half Elf64_Half
72822+#define elf_dyn Elf64_Dyn
72823
72824 #endif
72825
72826diff --git a/include/linux/err.h b/include/linux/err.h
72827index f2edce2..cc2082c 100644
72828--- a/include/linux/err.h
72829+++ b/include/linux/err.h
72830@@ -19,12 +19,12 @@
72831
72832 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
72833
72834-static inline void * __must_check ERR_PTR(long error)
72835+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
72836 {
72837 return (void *) error;
72838 }
72839
72840-static inline long __must_check PTR_ERR(const void *ptr)
72841+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
72842 {
72843 return (long) ptr;
72844 }
72845diff --git a/include/linux/extcon.h b/include/linux/extcon.h
72846index fcb51c8..bdafcf6 100644
72847--- a/include/linux/extcon.h
72848+++ b/include/linux/extcon.h
72849@@ -134,7 +134,7 @@ struct extcon_dev {
72850 /* /sys/class/extcon/.../mutually_exclusive/... */
72851 struct attribute_group attr_g_muex;
72852 struct attribute **attrs_muex;
72853- struct device_attribute *d_attrs_muex;
72854+ device_attribute_no_const *d_attrs_muex;
72855 };
72856
72857 /**
72858diff --git a/include/linux/fb.h b/include/linux/fb.h
72859index d49c60f..2834fbe 100644
72860--- a/include/linux/fb.h
72861+++ b/include/linux/fb.h
72862@@ -304,7 +304,7 @@ struct fb_ops {
72863 /* called at KDB enter and leave time to prepare the console */
72864 int (*fb_debug_enter)(struct fb_info *info);
72865 int (*fb_debug_leave)(struct fb_info *info);
72866-};
72867+} __do_const;
72868
72869 #ifdef CONFIG_FB_TILEBLITTING
72870 #define FB_TILE_CURSOR_NONE 0
72871diff --git a/include/linux/filter.h b/include/linux/filter.h
72872index f65f5a6..2f4f93a 100644
72873--- a/include/linux/filter.h
72874+++ b/include/linux/filter.h
72875@@ -20,6 +20,7 @@ struct compat_sock_fprog {
72876
72877 struct sk_buff;
72878 struct sock;
72879+struct bpf_jit_work;
72880
72881 struct sk_filter
72882 {
72883@@ -27,6 +28,9 @@ struct sk_filter
72884 unsigned int len; /* Number of filter blocks */
72885 unsigned int (*bpf_func)(const struct sk_buff *skb,
72886 const struct sock_filter *filter);
72887+#ifdef CONFIG_BPF_JIT
72888+ struct bpf_jit_work *work;
72889+#endif
72890 struct rcu_head rcu;
72891 struct sock_filter insns[0];
72892 };
72893diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
72894index 8293262..2b3b8bd 100644
72895--- a/include/linux/frontswap.h
72896+++ b/include/linux/frontswap.h
72897@@ -11,7 +11,7 @@ struct frontswap_ops {
72898 int (*load)(unsigned, pgoff_t, struct page *);
72899 void (*invalidate_page)(unsigned, pgoff_t);
72900 void (*invalidate_area)(unsigned);
72901-};
72902+} __no_const;
72903
72904 extern bool frontswap_enabled;
72905 extern struct frontswap_ops *
72906diff --git a/include/linux/fs.h b/include/linux/fs.h
72907index 65c2be2..4c53f6e 100644
72908--- a/include/linux/fs.h
72909+++ b/include/linux/fs.h
72910@@ -1543,7 +1543,8 @@ struct file_operations {
72911 long (*fallocate)(struct file *file, int mode, loff_t offset,
72912 loff_t len);
72913 int (*show_fdinfo)(struct seq_file *m, struct file *f);
72914-};
72915+} __do_const;
72916+typedef struct file_operations __no_const file_operations_no_const;
72917
72918 struct inode_operations {
72919 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
72920@@ -2688,4 +2689,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
72921 inode->i_flags |= S_NOSEC;
72922 }
72923
72924+static inline bool is_sidechannel_device(const struct inode *inode)
72925+{
72926+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
72927+ umode_t mode = inode->i_mode;
72928+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
72929+#else
72930+ return false;
72931+#endif
72932+}
72933+
72934 #endif /* _LINUX_FS_H */
72935diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
72936index 2b93a9a..855d94a 100644
72937--- a/include/linux/fs_struct.h
72938+++ b/include/linux/fs_struct.h
72939@@ -6,7 +6,7 @@
72940 #include <linux/seqlock.h>
72941
72942 struct fs_struct {
72943- int users;
72944+ atomic_t users;
72945 spinlock_t lock;
72946 seqcount_t seq;
72947 int umask;
72948diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
72949index 5dfa0aa..6acf322 100644
72950--- a/include/linux/fscache-cache.h
72951+++ b/include/linux/fscache-cache.h
72952@@ -112,7 +112,7 @@ struct fscache_operation {
72953 fscache_operation_release_t release;
72954 };
72955
72956-extern atomic_t fscache_op_debug_id;
72957+extern atomic_unchecked_t fscache_op_debug_id;
72958 extern void fscache_op_work_func(struct work_struct *work);
72959
72960 extern void fscache_enqueue_operation(struct fscache_operation *);
72961@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
72962 INIT_WORK(&op->work, fscache_op_work_func);
72963 atomic_set(&op->usage, 1);
72964 op->state = FSCACHE_OP_ST_INITIALISED;
72965- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
72966+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
72967 op->processor = processor;
72968 op->release = release;
72969 INIT_LIST_HEAD(&op->pend_link);
72970diff --git a/include/linux/fscache.h b/include/linux/fscache.h
72971index 7a08623..4c07b0f 100644
72972--- a/include/linux/fscache.h
72973+++ b/include/linux/fscache.h
72974@@ -152,7 +152,7 @@ struct fscache_cookie_def {
72975 * - this is mandatory for any object that may have data
72976 */
72977 void (*now_uncached)(void *cookie_netfs_data);
72978-};
72979+} __do_const;
72980
72981 /*
72982 * fscache cached network filesystem type
72983diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
72984index a78680a..87bd73e 100644
72985--- a/include/linux/fsnotify.h
72986+++ b/include/linux/fsnotify.h
72987@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
72988 struct inode *inode = path->dentry->d_inode;
72989 __u32 mask = FS_ACCESS;
72990
72991+ if (is_sidechannel_device(inode))
72992+ return;
72993+
72994 if (S_ISDIR(inode->i_mode))
72995 mask |= FS_ISDIR;
72996
72997@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
72998 struct inode *inode = path->dentry->d_inode;
72999 __u32 mask = FS_MODIFY;
73000
73001+ if (is_sidechannel_device(inode))
73002+ return;
73003+
73004 if (S_ISDIR(inode->i_mode))
73005 mask |= FS_ISDIR;
73006
73007@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
73008 */
73009 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
73010 {
73011- return kstrdup(name, GFP_KERNEL);
73012+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
73013 }
73014
73015 /*
73016diff --git a/include/linux/genhd.h b/include/linux/genhd.h
73017index 9f3c275..911b591 100644
73018--- a/include/linux/genhd.h
73019+++ b/include/linux/genhd.h
73020@@ -194,7 +194,7 @@ struct gendisk {
73021 struct kobject *slave_dir;
73022
73023 struct timer_rand_state *random;
73024- atomic_t sync_io; /* RAID */
73025+ atomic_unchecked_t sync_io; /* RAID */
73026 struct disk_events *ev;
73027 #ifdef CONFIG_BLK_DEV_INTEGRITY
73028 struct blk_integrity *integrity;
73029diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
73030index 023bc34..b02b46a 100644
73031--- a/include/linux/genl_magic_func.h
73032+++ b/include/linux/genl_magic_func.h
73033@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
73034 },
73035
73036 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
73037-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
73038+static struct genl_ops ZZZ_genl_ops[] = {
73039 #include GENL_MAGIC_INCLUDE_FILE
73040 };
73041
73042diff --git a/include/linux/gfp.h b/include/linux/gfp.h
73043index 0f615eb..5c3832f 100644
73044--- a/include/linux/gfp.h
73045+++ b/include/linux/gfp.h
73046@@ -35,6 +35,13 @@ struct vm_area_struct;
73047 #define ___GFP_NO_KSWAPD 0x400000u
73048 #define ___GFP_OTHER_NODE 0x800000u
73049 #define ___GFP_WRITE 0x1000000u
73050+
73051+#ifdef CONFIG_PAX_USERCOPY_SLABS
73052+#define ___GFP_USERCOPY 0x2000000u
73053+#else
73054+#define ___GFP_USERCOPY 0
73055+#endif
73056+
73057 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
73058
73059 /*
73060@@ -92,6 +99,7 @@ struct vm_area_struct;
73061 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
73062 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
73063 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
73064+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
73065
73066 /*
73067 * This may seem redundant, but it's a way of annotating false positives vs.
73068@@ -99,7 +107,7 @@ struct vm_area_struct;
73069 */
73070 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
73071
73072-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
73073+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
73074 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
73075
73076 /* This equals 0, but use constants in case they ever change */
73077@@ -153,6 +161,8 @@ struct vm_area_struct;
73078 /* 4GB DMA on some platforms */
73079 #define GFP_DMA32 __GFP_DMA32
73080
73081+#define GFP_USERCOPY __GFP_USERCOPY
73082+
73083 /* Convert GFP flags to their corresponding migrate type */
73084 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
73085 {
73086diff --git a/include/linux/gracl.h b/include/linux/gracl.h
73087new file mode 100644
73088index 0000000..ebe6d72
73089--- /dev/null
73090+++ b/include/linux/gracl.h
73091@@ -0,0 +1,319 @@
73092+#ifndef GR_ACL_H
73093+#define GR_ACL_H
73094+
73095+#include <linux/grdefs.h>
73096+#include <linux/resource.h>
73097+#include <linux/capability.h>
73098+#include <linux/dcache.h>
73099+#include <asm/resource.h>
73100+
73101+/* Major status information */
73102+
73103+#define GR_VERSION "grsecurity 2.9.1"
73104+#define GRSECURITY_VERSION 0x2901
73105+
73106+enum {
73107+ GR_SHUTDOWN = 0,
73108+ GR_ENABLE = 1,
73109+ GR_SPROLE = 2,
73110+ GR_RELOAD = 3,
73111+ GR_SEGVMOD = 4,
73112+ GR_STATUS = 5,
73113+ GR_UNSPROLE = 6,
73114+ GR_PASSSET = 7,
73115+ GR_SPROLEPAM = 8,
73116+};
73117+
73118+/* Password setup definitions
73119+ * kernel/grhash.c */
73120+enum {
73121+ GR_PW_LEN = 128,
73122+ GR_SALT_LEN = 16,
73123+ GR_SHA_LEN = 32,
73124+};
73125+
73126+enum {
73127+ GR_SPROLE_LEN = 64,
73128+};
73129+
73130+enum {
73131+ GR_NO_GLOB = 0,
73132+ GR_REG_GLOB,
73133+ GR_CREATE_GLOB
73134+};
73135+
73136+#define GR_NLIMITS 32
73137+
73138+/* Begin Data Structures */
73139+
73140+struct sprole_pw {
73141+ unsigned char *rolename;
73142+ unsigned char salt[GR_SALT_LEN];
73143+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
73144+};
73145+
73146+struct name_entry {
73147+ __u32 key;
73148+ ino_t inode;
73149+ dev_t device;
73150+ char *name;
73151+ __u16 len;
73152+ __u8 deleted;
73153+ struct name_entry *prev;
73154+ struct name_entry *next;
73155+};
73156+
73157+struct inodev_entry {
73158+ struct name_entry *nentry;
73159+ struct inodev_entry *prev;
73160+ struct inodev_entry *next;
73161+};
73162+
73163+struct acl_role_db {
73164+ struct acl_role_label **r_hash;
73165+ __u32 r_size;
73166+};
73167+
73168+struct inodev_db {
73169+ struct inodev_entry **i_hash;
73170+ __u32 i_size;
73171+};
73172+
73173+struct name_db {
73174+ struct name_entry **n_hash;
73175+ __u32 n_size;
73176+};
73177+
73178+struct crash_uid {
73179+ uid_t uid;
73180+ unsigned long expires;
73181+};
73182+
73183+struct gr_hash_struct {
73184+ void **table;
73185+ void **nametable;
73186+ void *first;
73187+ __u32 table_size;
73188+ __u32 used_size;
73189+ int type;
73190+};
73191+
73192+/* Userspace Grsecurity ACL data structures */
73193+
73194+struct acl_subject_label {
73195+ char *filename;
73196+ ino_t inode;
73197+ dev_t device;
73198+ __u32 mode;
73199+ kernel_cap_t cap_mask;
73200+ kernel_cap_t cap_lower;
73201+ kernel_cap_t cap_invert_audit;
73202+
73203+ struct rlimit res[GR_NLIMITS];
73204+ __u32 resmask;
73205+
73206+ __u8 user_trans_type;
73207+ __u8 group_trans_type;
73208+ uid_t *user_transitions;
73209+ gid_t *group_transitions;
73210+ __u16 user_trans_num;
73211+ __u16 group_trans_num;
73212+
73213+ __u32 sock_families[2];
73214+ __u32 ip_proto[8];
73215+ __u32 ip_type;
73216+ struct acl_ip_label **ips;
73217+ __u32 ip_num;
73218+ __u32 inaddr_any_override;
73219+
73220+ __u32 crashes;
73221+ unsigned long expires;
73222+
73223+ struct acl_subject_label *parent_subject;
73224+ struct gr_hash_struct *hash;
73225+ struct acl_subject_label *prev;
73226+ struct acl_subject_label *next;
73227+
73228+ struct acl_object_label **obj_hash;
73229+ __u32 obj_hash_size;
73230+ __u16 pax_flags;
73231+};
73232+
73233+struct role_allowed_ip {
73234+ __u32 addr;
73235+ __u32 netmask;
73236+
73237+ struct role_allowed_ip *prev;
73238+ struct role_allowed_ip *next;
73239+};
73240+
73241+struct role_transition {
73242+ char *rolename;
73243+
73244+ struct role_transition *prev;
73245+ struct role_transition *next;
73246+};
73247+
73248+struct acl_role_label {
73249+ char *rolename;
73250+ uid_t uidgid;
73251+ __u16 roletype;
73252+
73253+ __u16 auth_attempts;
73254+ unsigned long expires;
73255+
73256+ struct acl_subject_label *root_label;
73257+ struct gr_hash_struct *hash;
73258+
73259+ struct acl_role_label *prev;
73260+ struct acl_role_label *next;
73261+
73262+ struct role_transition *transitions;
73263+ struct role_allowed_ip *allowed_ips;
73264+ uid_t *domain_children;
73265+ __u16 domain_child_num;
73266+
73267+ umode_t umask;
73268+
73269+ struct acl_subject_label **subj_hash;
73270+ __u32 subj_hash_size;
73271+};
73272+
73273+struct user_acl_role_db {
73274+ struct acl_role_label **r_table;
73275+ __u32 num_pointers; /* Number of allocations to track */
73276+ __u32 num_roles; /* Number of roles */
73277+ __u32 num_domain_children; /* Number of domain children */
73278+ __u32 num_subjects; /* Number of subjects */
73279+ __u32 num_objects; /* Number of objects */
73280+};
73281+
73282+struct acl_object_label {
73283+ char *filename;
73284+ ino_t inode;
73285+ dev_t device;
73286+ __u32 mode;
73287+
73288+ struct acl_subject_label *nested;
73289+ struct acl_object_label *globbed;
73290+
73291+ /* next two structures not used */
73292+
73293+ struct acl_object_label *prev;
73294+ struct acl_object_label *next;
73295+};
73296+
73297+struct acl_ip_label {
73298+ char *iface;
73299+ __u32 addr;
73300+ __u32 netmask;
73301+ __u16 low, high;
73302+ __u8 mode;
73303+ __u32 type;
73304+ __u32 proto[8];
73305+
73306+ /* next two structures not used */
73307+
73308+ struct acl_ip_label *prev;
73309+ struct acl_ip_label *next;
73310+};
73311+
73312+struct gr_arg {
73313+ struct user_acl_role_db role_db;
73314+ unsigned char pw[GR_PW_LEN];
73315+ unsigned char salt[GR_SALT_LEN];
73316+ unsigned char sum[GR_SHA_LEN];
73317+ unsigned char sp_role[GR_SPROLE_LEN];
73318+ struct sprole_pw *sprole_pws;
73319+ dev_t segv_device;
73320+ ino_t segv_inode;
73321+ uid_t segv_uid;
73322+ __u16 num_sprole_pws;
73323+ __u16 mode;
73324+};
73325+
73326+struct gr_arg_wrapper {
73327+ struct gr_arg *arg;
73328+ __u32 version;
73329+ __u32 size;
73330+};
73331+
73332+struct subject_map {
73333+ struct acl_subject_label *user;
73334+ struct acl_subject_label *kernel;
73335+ struct subject_map *prev;
73336+ struct subject_map *next;
73337+};
73338+
73339+struct acl_subj_map_db {
73340+ struct subject_map **s_hash;
73341+ __u32 s_size;
73342+};
73343+
73344+/* End Data Structures Section */
73345+
73346+/* Hash functions generated by empirical testing by Brad Spengler
73347+ Makes good use of the low bits of the inode. Generally 0-1 times
73348+ in loop for successful match. 0-3 for unsuccessful match.
73349+ Shift/add algorithm with modulus of table size and an XOR*/
73350+
73351+static __inline__ unsigned int
73352+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
73353+{
73354+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
73355+}
73356+
73357+ static __inline__ unsigned int
73358+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
73359+{
73360+ return ((const unsigned long)userp % sz);
73361+}
73362+
73363+static __inline__ unsigned int
73364+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
73365+{
73366+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
73367+}
73368+
73369+static __inline__ unsigned int
73370+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
73371+{
73372+ return full_name_hash((const unsigned char *)name, len) % sz;
73373+}
73374+
73375+#define FOR_EACH_ROLE_START(role) \
73376+ role = role_list; \
73377+ while (role) {
73378+
73379+#define FOR_EACH_ROLE_END(role) \
73380+ role = role->prev; \
73381+ }
73382+
73383+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
73384+ subj = NULL; \
73385+ iter = 0; \
73386+ while (iter < role->subj_hash_size) { \
73387+ if (subj == NULL) \
73388+ subj = role->subj_hash[iter]; \
73389+ if (subj == NULL) { \
73390+ iter++; \
73391+ continue; \
73392+ }
73393+
73394+#define FOR_EACH_SUBJECT_END(subj,iter) \
73395+ subj = subj->next; \
73396+ if (subj == NULL) \
73397+ iter++; \
73398+ }
73399+
73400+
73401+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
73402+ subj = role->hash->first; \
73403+ while (subj != NULL) {
73404+
73405+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
73406+ subj = subj->next; \
73407+ }
73408+
73409+#endif
73410+
73411diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
73412new file mode 100644
73413index 0000000..33ebd1f
73414--- /dev/null
73415+++ b/include/linux/gracl_compat.h
73416@@ -0,0 +1,156 @@
73417+#ifndef GR_ACL_COMPAT_H
73418+#define GR_ACL_COMPAT_H
73419+
73420+#include <linux/resource.h>
73421+#include <asm/resource.h>
73422+
73423+struct sprole_pw_compat {
73424+ compat_uptr_t rolename;
73425+ unsigned char salt[GR_SALT_LEN];
73426+ unsigned char sum[GR_SHA_LEN];
73427+};
73428+
73429+struct gr_hash_struct_compat {
73430+ compat_uptr_t table;
73431+ compat_uptr_t nametable;
73432+ compat_uptr_t first;
73433+ __u32 table_size;
73434+ __u32 used_size;
73435+ int type;
73436+};
73437+
73438+struct acl_subject_label_compat {
73439+ compat_uptr_t filename;
73440+ compat_ino_t inode;
73441+ __u32 device;
73442+ __u32 mode;
73443+ kernel_cap_t cap_mask;
73444+ kernel_cap_t cap_lower;
73445+ kernel_cap_t cap_invert_audit;
73446+
73447+ struct compat_rlimit res[GR_NLIMITS];
73448+ __u32 resmask;
73449+
73450+ __u8 user_trans_type;
73451+ __u8 group_trans_type;
73452+ compat_uptr_t user_transitions;
73453+ compat_uptr_t group_transitions;
73454+ __u16 user_trans_num;
73455+ __u16 group_trans_num;
73456+
73457+ __u32 sock_families[2];
73458+ __u32 ip_proto[8];
73459+ __u32 ip_type;
73460+ compat_uptr_t ips;
73461+ __u32 ip_num;
73462+ __u32 inaddr_any_override;
73463+
73464+ __u32 crashes;
73465+ compat_ulong_t expires;
73466+
73467+ compat_uptr_t parent_subject;
73468+ compat_uptr_t hash;
73469+ compat_uptr_t prev;
73470+ compat_uptr_t next;
73471+
73472+ compat_uptr_t obj_hash;
73473+ __u32 obj_hash_size;
73474+ __u16 pax_flags;
73475+};
73476+
73477+struct role_allowed_ip_compat {
73478+ __u32 addr;
73479+ __u32 netmask;
73480+
73481+ compat_uptr_t prev;
73482+ compat_uptr_t next;
73483+};
73484+
73485+struct role_transition_compat {
73486+ compat_uptr_t rolename;
73487+
73488+ compat_uptr_t prev;
73489+ compat_uptr_t next;
73490+};
73491+
73492+struct acl_role_label_compat {
73493+ compat_uptr_t rolename;
73494+ uid_t uidgid;
73495+ __u16 roletype;
73496+
73497+ __u16 auth_attempts;
73498+ compat_ulong_t expires;
73499+
73500+ compat_uptr_t root_label;
73501+ compat_uptr_t hash;
73502+
73503+ compat_uptr_t prev;
73504+ compat_uptr_t next;
73505+
73506+ compat_uptr_t transitions;
73507+ compat_uptr_t allowed_ips;
73508+ compat_uptr_t domain_children;
73509+ __u16 domain_child_num;
73510+
73511+ umode_t umask;
73512+
73513+ compat_uptr_t subj_hash;
73514+ __u32 subj_hash_size;
73515+};
73516+
73517+struct user_acl_role_db_compat {
73518+ compat_uptr_t r_table;
73519+ __u32 num_pointers;
73520+ __u32 num_roles;
73521+ __u32 num_domain_children;
73522+ __u32 num_subjects;
73523+ __u32 num_objects;
73524+};
73525+
73526+struct acl_object_label_compat {
73527+ compat_uptr_t filename;
73528+ compat_ino_t inode;
73529+ __u32 device;
73530+ __u32 mode;
73531+
73532+ compat_uptr_t nested;
73533+ compat_uptr_t globbed;
73534+
73535+ compat_uptr_t prev;
73536+ compat_uptr_t next;
73537+};
73538+
73539+struct acl_ip_label_compat {
73540+ compat_uptr_t iface;
73541+ __u32 addr;
73542+ __u32 netmask;
73543+ __u16 low, high;
73544+ __u8 mode;
73545+ __u32 type;
73546+ __u32 proto[8];
73547+
73548+ compat_uptr_t prev;
73549+ compat_uptr_t next;
73550+};
73551+
73552+struct gr_arg_compat {
73553+ struct user_acl_role_db_compat role_db;
73554+ unsigned char pw[GR_PW_LEN];
73555+ unsigned char salt[GR_SALT_LEN];
73556+ unsigned char sum[GR_SHA_LEN];
73557+ unsigned char sp_role[GR_SPROLE_LEN];
73558+ compat_uptr_t sprole_pws;
73559+ __u32 segv_device;
73560+ compat_ino_t segv_inode;
73561+ uid_t segv_uid;
73562+ __u16 num_sprole_pws;
73563+ __u16 mode;
73564+};
73565+
73566+struct gr_arg_wrapper_compat {
73567+ compat_uptr_t arg;
73568+ __u32 version;
73569+ __u32 size;
73570+};
73571+
73572+#endif
73573diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
73574new file mode 100644
73575index 0000000..323ecf2
73576--- /dev/null
73577+++ b/include/linux/gralloc.h
73578@@ -0,0 +1,9 @@
73579+#ifndef __GRALLOC_H
73580+#define __GRALLOC_H
73581+
73582+void acl_free_all(void);
73583+int acl_alloc_stack_init(unsigned long size);
73584+void *acl_alloc(unsigned long len);
73585+void *acl_alloc_num(unsigned long num, unsigned long len);
73586+
73587+#endif
73588diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
73589new file mode 100644
73590index 0000000..be66033
73591--- /dev/null
73592+++ b/include/linux/grdefs.h
73593@@ -0,0 +1,140 @@
73594+#ifndef GRDEFS_H
73595+#define GRDEFS_H
73596+
73597+/* Begin grsecurity status declarations */
73598+
73599+enum {
73600+ GR_READY = 0x01,
73601+ GR_STATUS_INIT = 0x00 // disabled state
73602+};
73603+
73604+/* Begin ACL declarations */
73605+
73606+/* Role flags */
73607+
73608+enum {
73609+ GR_ROLE_USER = 0x0001,
73610+ GR_ROLE_GROUP = 0x0002,
73611+ GR_ROLE_DEFAULT = 0x0004,
73612+ GR_ROLE_SPECIAL = 0x0008,
73613+ GR_ROLE_AUTH = 0x0010,
73614+ GR_ROLE_NOPW = 0x0020,
73615+ GR_ROLE_GOD = 0x0040,
73616+ GR_ROLE_LEARN = 0x0080,
73617+ GR_ROLE_TPE = 0x0100,
73618+ GR_ROLE_DOMAIN = 0x0200,
73619+ GR_ROLE_PAM = 0x0400,
73620+ GR_ROLE_PERSIST = 0x0800
73621+};
73622+
73623+/* ACL Subject and Object mode flags */
73624+enum {
73625+ GR_DELETED = 0x80000000
73626+};
73627+
73628+/* ACL Object-only mode flags */
73629+enum {
73630+ GR_READ = 0x00000001,
73631+ GR_APPEND = 0x00000002,
73632+ GR_WRITE = 0x00000004,
73633+ GR_EXEC = 0x00000008,
73634+ GR_FIND = 0x00000010,
73635+ GR_INHERIT = 0x00000020,
73636+ GR_SETID = 0x00000040,
73637+ GR_CREATE = 0x00000080,
73638+ GR_DELETE = 0x00000100,
73639+ GR_LINK = 0x00000200,
73640+ GR_AUDIT_READ = 0x00000400,
73641+ GR_AUDIT_APPEND = 0x00000800,
73642+ GR_AUDIT_WRITE = 0x00001000,
73643+ GR_AUDIT_EXEC = 0x00002000,
73644+ GR_AUDIT_FIND = 0x00004000,
73645+ GR_AUDIT_INHERIT= 0x00008000,
73646+ GR_AUDIT_SETID = 0x00010000,
73647+ GR_AUDIT_CREATE = 0x00020000,
73648+ GR_AUDIT_DELETE = 0x00040000,
73649+ GR_AUDIT_LINK = 0x00080000,
73650+ GR_PTRACERD = 0x00100000,
73651+ GR_NOPTRACE = 0x00200000,
73652+ GR_SUPPRESS = 0x00400000,
73653+ GR_NOLEARN = 0x00800000,
73654+ GR_INIT_TRANSFER= 0x01000000
73655+};
73656+
73657+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
73658+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
73659+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
73660+
73661+/* ACL subject-only mode flags */
73662+enum {
73663+ GR_KILL = 0x00000001,
73664+ GR_VIEW = 0x00000002,
73665+ GR_PROTECTED = 0x00000004,
73666+ GR_LEARN = 0x00000008,
73667+ GR_OVERRIDE = 0x00000010,
73668+ /* just a placeholder, this mode is only used in userspace */
73669+ GR_DUMMY = 0x00000020,
73670+ GR_PROTSHM = 0x00000040,
73671+ GR_KILLPROC = 0x00000080,
73672+ GR_KILLIPPROC = 0x00000100,
73673+ /* just a placeholder, this mode is only used in userspace */
73674+ GR_NOTROJAN = 0x00000200,
73675+ GR_PROTPROCFD = 0x00000400,
73676+ GR_PROCACCT = 0x00000800,
73677+ GR_RELAXPTRACE = 0x00001000,
73678+ //GR_NESTED = 0x00002000,
73679+ GR_INHERITLEARN = 0x00004000,
73680+ GR_PROCFIND = 0x00008000,
73681+ GR_POVERRIDE = 0x00010000,
73682+ GR_KERNELAUTH = 0x00020000,
73683+ GR_ATSECURE = 0x00040000,
73684+ GR_SHMEXEC = 0x00080000
73685+};
73686+
73687+enum {
73688+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
73689+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
73690+ GR_PAX_ENABLE_MPROTECT = 0x0004,
73691+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
73692+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
73693+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
73694+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
73695+ GR_PAX_DISABLE_MPROTECT = 0x0400,
73696+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
73697+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
73698+};
73699+
73700+enum {
73701+ GR_ID_USER = 0x01,
73702+ GR_ID_GROUP = 0x02,
73703+};
73704+
73705+enum {
73706+ GR_ID_ALLOW = 0x01,
73707+ GR_ID_DENY = 0x02,
73708+};
73709+
73710+#define GR_CRASH_RES 31
73711+#define GR_UIDTABLE_MAX 500
73712+
73713+/* begin resource learning section */
73714+enum {
73715+ GR_RLIM_CPU_BUMP = 60,
73716+ GR_RLIM_FSIZE_BUMP = 50000,
73717+ GR_RLIM_DATA_BUMP = 10000,
73718+ GR_RLIM_STACK_BUMP = 1000,
73719+ GR_RLIM_CORE_BUMP = 10000,
73720+ GR_RLIM_RSS_BUMP = 500000,
73721+ GR_RLIM_NPROC_BUMP = 1,
73722+ GR_RLIM_NOFILE_BUMP = 5,
73723+ GR_RLIM_MEMLOCK_BUMP = 50000,
73724+ GR_RLIM_AS_BUMP = 500000,
73725+ GR_RLIM_LOCKS_BUMP = 2,
73726+ GR_RLIM_SIGPENDING_BUMP = 5,
73727+ GR_RLIM_MSGQUEUE_BUMP = 10000,
73728+ GR_RLIM_NICE_BUMP = 1,
73729+ GR_RLIM_RTPRIO_BUMP = 1,
73730+ GR_RLIM_RTTIME_BUMP = 1000000
73731+};
73732+
73733+#endif
73734diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
73735new file mode 100644
73736index 0000000..e337683
73737--- /dev/null
73738+++ b/include/linux/grinternal.h
73739@@ -0,0 +1,229 @@
73740+#ifndef __GRINTERNAL_H
73741+#define __GRINTERNAL_H
73742+
73743+#ifdef CONFIG_GRKERNSEC
73744+
73745+#include <linux/fs.h>
73746+#include <linux/mnt_namespace.h>
73747+#include <linux/nsproxy.h>
73748+#include <linux/gracl.h>
73749+#include <linux/grdefs.h>
73750+#include <linux/grmsg.h>
73751+
73752+void gr_add_learn_entry(const char *fmt, ...)
73753+ __attribute__ ((format (printf, 1, 2)));
73754+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
73755+ const struct vfsmount *mnt);
73756+__u32 gr_check_create(const struct dentry *new_dentry,
73757+ const struct dentry *parent,
73758+ const struct vfsmount *mnt, const __u32 mode);
73759+int gr_check_protected_task(const struct task_struct *task);
73760+__u32 to_gr_audit(const __u32 reqmode);
73761+int gr_set_acls(const int type);
73762+int gr_apply_subject_to_task(struct task_struct *task);
73763+int gr_acl_is_enabled(void);
73764+char gr_roletype_to_char(void);
73765+
73766+void gr_handle_alertkill(struct task_struct *task);
73767+char *gr_to_filename(const struct dentry *dentry,
73768+ const struct vfsmount *mnt);
73769+char *gr_to_filename1(const struct dentry *dentry,
73770+ const struct vfsmount *mnt);
73771+char *gr_to_filename2(const struct dentry *dentry,
73772+ const struct vfsmount *mnt);
73773+char *gr_to_filename3(const struct dentry *dentry,
73774+ const struct vfsmount *mnt);
73775+
73776+extern int grsec_enable_ptrace_readexec;
73777+extern int grsec_enable_harden_ptrace;
73778+extern int grsec_enable_link;
73779+extern int grsec_enable_fifo;
73780+extern int grsec_enable_execve;
73781+extern int grsec_enable_shm;
73782+extern int grsec_enable_execlog;
73783+extern int grsec_enable_signal;
73784+extern int grsec_enable_audit_ptrace;
73785+extern int grsec_enable_forkfail;
73786+extern int grsec_enable_time;
73787+extern int grsec_enable_rofs;
73788+extern int grsec_deny_new_usb;
73789+extern int grsec_enable_chroot_shmat;
73790+extern int grsec_enable_chroot_mount;
73791+extern int grsec_enable_chroot_double;
73792+extern int grsec_enable_chroot_pivot;
73793+extern int grsec_enable_chroot_chdir;
73794+extern int grsec_enable_chroot_chmod;
73795+extern int grsec_enable_chroot_mknod;
73796+extern int grsec_enable_chroot_fchdir;
73797+extern int grsec_enable_chroot_nice;
73798+extern int grsec_enable_chroot_execlog;
73799+extern int grsec_enable_chroot_caps;
73800+extern int grsec_enable_chroot_sysctl;
73801+extern int grsec_enable_chroot_unix;
73802+extern int grsec_enable_symlinkown;
73803+extern kgid_t grsec_symlinkown_gid;
73804+extern int grsec_enable_tpe;
73805+extern kgid_t grsec_tpe_gid;
73806+extern int grsec_enable_tpe_all;
73807+extern int grsec_enable_tpe_invert;
73808+extern int grsec_enable_socket_all;
73809+extern kgid_t grsec_socket_all_gid;
73810+extern int grsec_enable_socket_client;
73811+extern kgid_t grsec_socket_client_gid;
73812+extern int grsec_enable_socket_server;
73813+extern kgid_t grsec_socket_server_gid;
73814+extern kgid_t grsec_audit_gid;
73815+extern int grsec_enable_group;
73816+extern int grsec_enable_log_rwxmaps;
73817+extern int grsec_enable_mount;
73818+extern int grsec_enable_chdir;
73819+extern int grsec_resource_logging;
73820+extern int grsec_enable_blackhole;
73821+extern int grsec_lastack_retries;
73822+extern int grsec_enable_brute;
73823+extern int grsec_lock;
73824+
73825+extern spinlock_t grsec_alert_lock;
73826+extern unsigned long grsec_alert_wtime;
73827+extern unsigned long grsec_alert_fyet;
73828+
73829+extern spinlock_t grsec_audit_lock;
73830+
73831+extern rwlock_t grsec_exec_file_lock;
73832+
73833+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
73834+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
73835+ (tsk)->exec_file->f_path.mnt) : "/")
73836+
73837+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
73838+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
73839+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
73840+
73841+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
73842+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
73843+ (tsk)->exec_file->f_path.mnt) : "/")
73844+
73845+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
73846+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
73847+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
73848+
73849+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
73850+
73851+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
73852+
73853+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
73854+{
73855+ if (file1 && file2) {
73856+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
73857+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
73858+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
73859+ return true;
73860+ }
73861+
73862+ return false;
73863+}
73864+
73865+#define GR_CHROOT_CAPS {{ \
73866+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
73867+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
73868+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
73869+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
73870+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
73871+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
73872+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
73873+
73874+#define security_learn(normal_msg,args...) \
73875+({ \
73876+ read_lock(&grsec_exec_file_lock); \
73877+ gr_add_learn_entry(normal_msg "\n", ## args); \
73878+ read_unlock(&grsec_exec_file_lock); \
73879+})
73880+
73881+enum {
73882+ GR_DO_AUDIT,
73883+ GR_DONT_AUDIT,
73884+ /* used for non-audit messages that we shouldn't kill the task on */
73885+ GR_DONT_AUDIT_GOOD
73886+};
73887+
73888+enum {
73889+ GR_TTYSNIFF,
73890+ GR_RBAC,
73891+ GR_RBAC_STR,
73892+ GR_STR_RBAC,
73893+ GR_RBAC_MODE2,
73894+ GR_RBAC_MODE3,
73895+ GR_FILENAME,
73896+ GR_SYSCTL_HIDDEN,
73897+ GR_NOARGS,
73898+ GR_ONE_INT,
73899+ GR_ONE_INT_TWO_STR,
73900+ GR_ONE_STR,
73901+ GR_STR_INT,
73902+ GR_TWO_STR_INT,
73903+ GR_TWO_INT,
73904+ GR_TWO_U64,
73905+ GR_THREE_INT,
73906+ GR_FIVE_INT_TWO_STR,
73907+ GR_TWO_STR,
73908+ GR_THREE_STR,
73909+ GR_FOUR_STR,
73910+ GR_STR_FILENAME,
73911+ GR_FILENAME_STR,
73912+ GR_FILENAME_TWO_INT,
73913+ GR_FILENAME_TWO_INT_STR,
73914+ GR_TEXTREL,
73915+ GR_PTRACE,
73916+ GR_RESOURCE,
73917+ GR_CAP,
73918+ GR_SIG,
73919+ GR_SIG2,
73920+ GR_CRASH1,
73921+ GR_CRASH2,
73922+ GR_PSACCT,
73923+ GR_RWXMAP,
73924+ GR_RWXMAPVMA
73925+};
73926+
73927+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
73928+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
73929+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
73930+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
73931+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
73932+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
73933+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
73934+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
73935+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
73936+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
73937+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
73938+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
73939+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
73940+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
73941+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
73942+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
73943+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
73944+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
73945+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
73946+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
73947+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
73948+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
73949+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
73950+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
73951+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
73952+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
73953+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
73954+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
73955+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
73956+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
73957+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
73958+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
73959+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
73960+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
73961+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
73962+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
73963+
73964+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
73965+
73966+#endif
73967+
73968+#endif
73969diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
73970new file mode 100644
73971index 0000000..a4396b5
73972--- /dev/null
73973+++ b/include/linux/grmsg.h
73974@@ -0,0 +1,113 @@
73975+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
73976+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
73977+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
73978+#define GR_STOPMOD_MSG "denied modification of module state by "
73979+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
73980+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
73981+#define GR_IOPERM_MSG "denied use of ioperm() by "
73982+#define GR_IOPL_MSG "denied use of iopl() by "
73983+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
73984+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
73985+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
73986+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
73987+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
73988+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
73989+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
73990+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
73991+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
73992+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
73993+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
73994+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
73995+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
73996+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
73997+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
73998+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
73999+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
74000+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
74001+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
74002+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
74003+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
74004+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
74005+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
74006+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
74007+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
74008+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
74009+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
74010+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
74011+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
74012+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
74013+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
74014+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
74015+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
74016+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
74017+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
74018+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
74019+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
74020+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
74021+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
74022+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
74023+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
74024+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
74025+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
74026+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
74027+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
74028+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
74029+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
74030+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
74031+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
74032+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
74033+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
74034+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
74035+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
74036+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
74037+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
74038+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
74039+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
74040+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
74041+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
74042+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
74043+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
74044+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
74045+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
74046+#define GR_FAILFORK_MSG "failed fork with errno %s by "
74047+#define GR_NICE_CHROOT_MSG "denied priority change by "
74048+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
74049+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
74050+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
74051+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
74052+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
74053+#define GR_TIME_MSG "time set by "
74054+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
74055+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
74056+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
74057+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
74058+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
74059+#define GR_BIND_MSG "denied bind() by "
74060+#define GR_CONNECT_MSG "denied connect() by "
74061+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
74062+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
74063+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
74064+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
74065+#define GR_CAP_ACL_MSG "use of %s denied for "
74066+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
74067+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
74068+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
74069+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
74070+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
74071+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
74072+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
74073+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
74074+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
74075+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
74076+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
74077+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
74078+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
74079+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
74080+#define GR_VM86_MSG "denied use of vm86 by "
74081+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
74082+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
74083+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
74084+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
74085+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
74086+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
74087+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
74088diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
74089new file mode 100644
74090index 0000000..d6f5a21
74091--- /dev/null
74092+++ b/include/linux/grsecurity.h
74093@@ -0,0 +1,244 @@
74094+#ifndef GR_SECURITY_H
74095+#define GR_SECURITY_H
74096+#include <linux/fs.h>
74097+#include <linux/fs_struct.h>
74098+#include <linux/binfmts.h>
74099+#include <linux/gracl.h>
74100+
74101+/* notify of brain-dead configs */
74102+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74103+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
74104+#endif
74105+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
74106+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
74107+#endif
74108+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
74109+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
74110+#endif
74111+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
74112+#error "CONFIG_PAX enabled, but no PaX options are enabled."
74113+#endif
74114+
74115+int gr_handle_new_usb(void);
74116+
74117+void gr_handle_brute_attach(unsigned long mm_flags);
74118+void gr_handle_brute_check(void);
74119+void gr_handle_kernel_exploit(void);
74120+
74121+char gr_roletype_to_char(void);
74122+
74123+int gr_acl_enable_at_secure(void);
74124+
74125+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
74126+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
74127+
74128+void gr_del_task_from_ip_table(struct task_struct *p);
74129+
74130+int gr_pid_is_chrooted(struct task_struct *p);
74131+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
74132+int gr_handle_chroot_nice(void);
74133+int gr_handle_chroot_sysctl(const int op);
74134+int gr_handle_chroot_setpriority(struct task_struct *p,
74135+ const int niceval);
74136+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
74137+int gr_handle_chroot_chroot(const struct dentry *dentry,
74138+ const struct vfsmount *mnt);
74139+void gr_handle_chroot_chdir(const struct path *path);
74140+int gr_handle_chroot_chmod(const struct dentry *dentry,
74141+ const struct vfsmount *mnt, const int mode);
74142+int gr_handle_chroot_mknod(const struct dentry *dentry,
74143+ const struct vfsmount *mnt, const int mode);
74144+int gr_handle_chroot_mount(const struct dentry *dentry,
74145+ const struct vfsmount *mnt,
74146+ const char *dev_name);
74147+int gr_handle_chroot_pivot(void);
74148+int gr_handle_chroot_unix(const pid_t pid);
74149+
74150+int gr_handle_rawio(const struct inode *inode);
74151+
74152+void gr_handle_ioperm(void);
74153+void gr_handle_iopl(void);
74154+
74155+umode_t gr_acl_umask(void);
74156+
74157+int gr_tpe_allow(const struct file *file);
74158+
74159+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
74160+void gr_clear_chroot_entries(struct task_struct *task);
74161+
74162+void gr_log_forkfail(const int retval);
74163+void gr_log_timechange(void);
74164+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
74165+void gr_log_chdir(const struct dentry *dentry,
74166+ const struct vfsmount *mnt);
74167+void gr_log_chroot_exec(const struct dentry *dentry,
74168+ const struct vfsmount *mnt);
74169+void gr_log_remount(const char *devname, const int retval);
74170+void gr_log_unmount(const char *devname, const int retval);
74171+void gr_log_mount(const char *from, const char *to, const int retval);
74172+void gr_log_textrel(struct vm_area_struct *vma);
74173+void gr_log_ptgnustack(struct file *file);
74174+void gr_log_rwxmmap(struct file *file);
74175+void gr_log_rwxmprotect(struct vm_area_struct *vma);
74176+
74177+int gr_handle_follow_link(const struct inode *parent,
74178+ const struct inode *inode,
74179+ const struct dentry *dentry,
74180+ const struct vfsmount *mnt);
74181+int gr_handle_fifo(const struct dentry *dentry,
74182+ const struct vfsmount *mnt,
74183+ const struct dentry *dir, const int flag,
74184+ const int acc_mode);
74185+int gr_handle_hardlink(const struct dentry *dentry,
74186+ const struct vfsmount *mnt,
74187+ struct inode *inode,
74188+ const int mode, const struct filename *to);
74189+
74190+int gr_is_capable(const int cap);
74191+int gr_is_capable_nolog(const int cap);
74192+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
74193+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
74194+
74195+void gr_copy_label(struct task_struct *tsk);
74196+void gr_handle_crash(struct task_struct *task, const int sig);
74197+int gr_handle_signal(const struct task_struct *p, const int sig);
74198+int gr_check_crash_uid(const kuid_t uid);
74199+int gr_check_protected_task(const struct task_struct *task);
74200+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
74201+int gr_acl_handle_mmap(const struct file *file,
74202+ const unsigned long prot);
74203+int gr_acl_handle_mprotect(const struct file *file,
74204+ const unsigned long prot);
74205+int gr_check_hidden_task(const struct task_struct *tsk);
74206+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
74207+ const struct vfsmount *mnt);
74208+__u32 gr_acl_handle_utime(const struct dentry *dentry,
74209+ const struct vfsmount *mnt);
74210+__u32 gr_acl_handle_access(const struct dentry *dentry,
74211+ const struct vfsmount *mnt, const int fmode);
74212+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
74213+ const struct vfsmount *mnt, umode_t *mode);
74214+__u32 gr_acl_handle_chown(const struct dentry *dentry,
74215+ const struct vfsmount *mnt);
74216+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
74217+ const struct vfsmount *mnt);
74218+int gr_handle_ptrace(struct task_struct *task, const long request);
74219+int gr_handle_proc_ptrace(struct task_struct *task);
74220+__u32 gr_acl_handle_execve(const struct dentry *dentry,
74221+ const struct vfsmount *mnt);
74222+int gr_check_crash_exec(const struct file *filp);
74223+int gr_acl_is_enabled(void);
74224+void gr_set_kernel_label(struct task_struct *task);
74225+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
74226+ const kgid_t gid);
74227+int gr_set_proc_label(const struct dentry *dentry,
74228+ const struct vfsmount *mnt,
74229+ const int unsafe_flags);
74230+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
74231+ const struct vfsmount *mnt);
74232+__u32 gr_acl_handle_open(const struct dentry *dentry,
74233+ const struct vfsmount *mnt, int acc_mode);
74234+__u32 gr_acl_handle_creat(const struct dentry *dentry,
74235+ const struct dentry *p_dentry,
74236+ const struct vfsmount *p_mnt,
74237+ int open_flags, int acc_mode, const int imode);
74238+void gr_handle_create(const struct dentry *dentry,
74239+ const struct vfsmount *mnt);
74240+void gr_handle_proc_create(const struct dentry *dentry,
74241+ const struct inode *inode);
74242+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
74243+ const struct dentry *parent_dentry,
74244+ const struct vfsmount *parent_mnt,
74245+ const int mode);
74246+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
74247+ const struct dentry *parent_dentry,
74248+ const struct vfsmount *parent_mnt);
74249+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
74250+ const struct vfsmount *mnt);
74251+void gr_handle_delete(const ino_t ino, const dev_t dev);
74252+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
74253+ const struct vfsmount *mnt);
74254+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
74255+ const struct dentry *parent_dentry,
74256+ const struct vfsmount *parent_mnt,
74257+ const struct filename *from);
74258+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
74259+ const struct dentry *parent_dentry,
74260+ const struct vfsmount *parent_mnt,
74261+ const struct dentry *old_dentry,
74262+ const struct vfsmount *old_mnt, const struct filename *to);
74263+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
74264+int gr_acl_handle_rename(struct dentry *new_dentry,
74265+ struct dentry *parent_dentry,
74266+ const struct vfsmount *parent_mnt,
74267+ struct dentry *old_dentry,
74268+ struct inode *old_parent_inode,
74269+ struct vfsmount *old_mnt, const struct filename *newname);
74270+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
74271+ struct dentry *old_dentry,
74272+ struct dentry *new_dentry,
74273+ struct vfsmount *mnt, const __u8 replace);
74274+__u32 gr_check_link(const struct dentry *new_dentry,
74275+ const struct dentry *parent_dentry,
74276+ const struct vfsmount *parent_mnt,
74277+ const struct dentry *old_dentry,
74278+ const struct vfsmount *old_mnt);
74279+int gr_acl_handle_filldir(const struct file *file, const char *name,
74280+ const unsigned int namelen, const ino_t ino);
74281+
74282+__u32 gr_acl_handle_unix(const struct dentry *dentry,
74283+ const struct vfsmount *mnt);
74284+void gr_acl_handle_exit(void);
74285+void gr_acl_handle_psacct(struct task_struct *task, const long code);
74286+int gr_acl_handle_procpidmem(const struct task_struct *task);
74287+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
74288+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
74289+void gr_audit_ptrace(struct task_struct *task);
74290+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
74291+void gr_put_exec_file(struct task_struct *task);
74292+
74293+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
74294+
74295+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
74296+extern void gr_learn_resource(const struct task_struct *task, const int res,
74297+ const unsigned long wanted, const int gt);
74298+#else
74299+static inline void gr_learn_resource(const struct task_struct *task, const int res,
74300+ const unsigned long wanted, const int gt)
74301+{
74302+}
74303+#endif
74304+
74305+#ifdef CONFIG_GRKERNSEC_RESLOG
74306+extern void gr_log_resource(const struct task_struct *task, const int res,
74307+ const unsigned long wanted, const int gt);
74308+#else
74309+static inline void gr_log_resource(const struct task_struct *task, const int res,
74310+ const unsigned long wanted, const int gt)
74311+{
74312+}
74313+#endif
74314+
74315+#ifdef CONFIG_GRKERNSEC
74316+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
74317+void gr_handle_vm86(void);
74318+void gr_handle_mem_readwrite(u64 from, u64 to);
74319+
74320+void gr_log_badprocpid(const char *entry);
74321+
74322+extern int grsec_enable_dmesg;
74323+extern int grsec_disable_privio;
74324+
74325+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74326+extern kgid_t grsec_proc_gid;
74327+#endif
74328+
74329+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74330+extern int grsec_enable_chroot_findtask;
74331+#endif
74332+#ifdef CONFIG_GRKERNSEC_SETXID
74333+extern int grsec_enable_setxid;
74334+#endif
74335+#endif
74336+
74337+#endif
74338diff --git a/include/linux/grsock.h b/include/linux/grsock.h
74339new file mode 100644
74340index 0000000..e7ffaaf
74341--- /dev/null
74342+++ b/include/linux/grsock.h
74343@@ -0,0 +1,19 @@
74344+#ifndef __GRSOCK_H
74345+#define __GRSOCK_H
74346+
74347+extern void gr_attach_curr_ip(const struct sock *sk);
74348+extern int gr_handle_sock_all(const int family, const int type,
74349+ const int protocol);
74350+extern int gr_handle_sock_server(const struct sockaddr *sck);
74351+extern int gr_handle_sock_server_other(const struct sock *sck);
74352+extern int gr_handle_sock_client(const struct sockaddr *sck);
74353+extern int gr_search_connect(struct socket * sock,
74354+ struct sockaddr_in * addr);
74355+extern int gr_search_bind(struct socket * sock,
74356+ struct sockaddr_in * addr);
74357+extern int gr_search_listen(struct socket * sock);
74358+extern int gr_search_accept(struct socket * sock);
74359+extern int gr_search_socket(const int domain, const int type,
74360+ const int protocol);
74361+
74362+#endif
74363diff --git a/include/linux/hid.h b/include/linux/hid.h
74364index 0c48991..76e41d8 100644
74365--- a/include/linux/hid.h
74366+++ b/include/linux/hid.h
74367@@ -393,10 +393,12 @@ struct hid_report {
74368 struct hid_device *device; /* associated device */
74369 };
74370
74371+#define HID_MAX_IDS 256
74372+
74373 struct hid_report_enum {
74374 unsigned numbered;
74375 struct list_head report_list;
74376- struct hid_report *report_id_hash[256];
74377+ struct hid_report *report_id_hash[HID_MAX_IDS];
74378 };
74379
74380 #define HID_REPORT_TYPES 3
74381@@ -747,6 +749,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
74382 struct hid_device *hid_allocate_device(void);
74383 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
74384 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
74385+struct hid_report *hid_validate_report(struct hid_device *hid,
74386+ unsigned int type, unsigned int id,
74387+ unsigned int fields,
74388+ unsigned int report_counts);
74389 int hid_open_report(struct hid_device *device);
74390 int hid_check_keys_pressed(struct hid_device *hid);
74391 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
74392diff --git a/include/linux/highmem.h b/include/linux/highmem.h
74393index 7fb31da..08b5114 100644
74394--- a/include/linux/highmem.h
74395+++ b/include/linux/highmem.h
74396@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
74397 kunmap_atomic(kaddr);
74398 }
74399
74400+static inline void sanitize_highpage(struct page *page)
74401+{
74402+ void *kaddr;
74403+ unsigned long flags;
74404+
74405+ local_irq_save(flags);
74406+ kaddr = kmap_atomic(page);
74407+ clear_page(kaddr);
74408+ kunmap_atomic(kaddr);
74409+ local_irq_restore(flags);
74410+}
74411+
74412 static inline void zero_user_segments(struct page *page,
74413 unsigned start1, unsigned end1,
74414 unsigned start2, unsigned end2)
74415diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
74416index 1c7b89a..7f52502 100644
74417--- a/include/linux/hwmon-sysfs.h
74418+++ b/include/linux/hwmon-sysfs.h
74419@@ -25,7 +25,8 @@
74420 struct sensor_device_attribute{
74421 struct device_attribute dev_attr;
74422 int index;
74423-};
74424+} __do_const;
74425+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
74426 #define to_sensor_dev_attr(_dev_attr) \
74427 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
74428
74429@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
74430 struct device_attribute dev_attr;
74431 u8 index;
74432 u8 nr;
74433-};
74434+} __do_const;
74435 #define to_sensor_dev_attr_2(_dev_attr) \
74436 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
74437
74438diff --git a/include/linux/i2c.h b/include/linux/i2c.h
74439index e988fa9..ff9f17e 100644
74440--- a/include/linux/i2c.h
74441+++ b/include/linux/i2c.h
74442@@ -366,6 +366,7 @@ struct i2c_algorithm {
74443 /* To determine what the adapter supports */
74444 u32 (*functionality) (struct i2c_adapter *);
74445 };
74446+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
74447
74448 /**
74449 * struct i2c_bus_recovery_info - I2C bus recovery information
74450diff --git a/include/linux/i2o.h b/include/linux/i2o.h
74451index d23c3c2..eb63c81 100644
74452--- a/include/linux/i2o.h
74453+++ b/include/linux/i2o.h
74454@@ -565,7 +565,7 @@ struct i2o_controller {
74455 struct i2o_device *exec; /* Executive */
74456 #if BITS_PER_LONG == 64
74457 spinlock_t context_list_lock; /* lock for context_list */
74458- atomic_t context_list_counter; /* needed for unique contexts */
74459+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
74460 struct list_head context_list; /* list of context id's
74461 and pointers */
74462 #endif
74463diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
74464index aff7ad8..3942bbd 100644
74465--- a/include/linux/if_pppox.h
74466+++ b/include/linux/if_pppox.h
74467@@ -76,7 +76,7 @@ struct pppox_proto {
74468 int (*ioctl)(struct socket *sock, unsigned int cmd,
74469 unsigned long arg);
74470 struct module *owner;
74471-};
74472+} __do_const;
74473
74474 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
74475 extern void unregister_pppox_proto(int proto_num);
74476diff --git a/include/linux/init.h b/include/linux/init.h
74477index 8618147..0821126 100644
74478--- a/include/linux/init.h
74479+++ b/include/linux/init.h
74480@@ -39,9 +39,36 @@
74481 * Also note, that this data cannot be "const".
74482 */
74483
74484+#ifdef MODULE
74485+#define add_init_latent_entropy
74486+#define add_devinit_latent_entropy
74487+#define add_cpuinit_latent_entropy
74488+#define add_meminit_latent_entropy
74489+#else
74490+#define add_init_latent_entropy __latent_entropy
74491+
74492+#ifdef CONFIG_HOTPLUG
74493+#define add_devinit_latent_entropy
74494+#else
74495+#define add_devinit_latent_entropy __latent_entropy
74496+#endif
74497+
74498+#ifdef CONFIG_HOTPLUG_CPU
74499+#define add_cpuinit_latent_entropy
74500+#else
74501+#define add_cpuinit_latent_entropy __latent_entropy
74502+#endif
74503+
74504+#ifdef CONFIG_MEMORY_HOTPLUG
74505+#define add_meminit_latent_entropy
74506+#else
74507+#define add_meminit_latent_entropy __latent_entropy
74508+#endif
74509+#endif
74510+
74511 /* These are for everybody (although not all archs will actually
74512 discard it in modules) */
74513-#define __init __section(.init.text) __cold notrace
74514+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
74515 #define __initdata __section(.init.data)
74516 #define __initconst __constsection(.init.rodata)
74517 #define __exitdata __section(.exit.data)
74518@@ -94,7 +121,7 @@
74519 #define __exit __section(.exit.text) __exitused __cold notrace
74520
74521 /* Used for HOTPLUG_CPU */
74522-#define __cpuinit __section(.cpuinit.text) __cold notrace
74523+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
74524 #define __cpuinitdata __section(.cpuinit.data)
74525 #define __cpuinitconst __constsection(.cpuinit.rodata)
74526 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
74527@@ -102,7 +129,7 @@
74528 #define __cpuexitconst __constsection(.cpuexit.rodata)
74529
74530 /* Used for MEMORY_HOTPLUG */
74531-#define __meminit __section(.meminit.text) __cold notrace
74532+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
74533 #define __meminitdata __section(.meminit.data)
74534 #define __meminitconst __constsection(.meminit.rodata)
74535 #define __memexit __section(.memexit.text) __exitused __cold notrace
74536diff --git a/include/linux/init_task.h b/include/linux/init_task.h
74537index 5cd0f09..c9f67cc 100644
74538--- a/include/linux/init_task.h
74539+++ b/include/linux/init_task.h
74540@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
74541
74542 #define INIT_TASK_COMM "swapper"
74543
74544+#ifdef CONFIG_X86
74545+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
74546+#else
74547+#define INIT_TASK_THREAD_INFO
74548+#endif
74549+
74550 /*
74551 * INIT_TASK is used to set up the first task table, touch at
74552 * your own risk!. Base=0, limit=0x1fffff (=2MB)
74553@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
74554 RCU_POINTER_INITIALIZER(cred, &init_cred), \
74555 .comm = INIT_TASK_COMM, \
74556 .thread = INIT_THREAD, \
74557+ INIT_TASK_THREAD_INFO \
74558 .fs = &init_fs, \
74559 .files = &init_files, \
74560 .signal = &init_signals, \
74561diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
74562index 5fa5afe..ac55b25 100644
74563--- a/include/linux/interrupt.h
74564+++ b/include/linux/interrupt.h
74565@@ -430,7 +430,7 @@ enum
74566 /* map softirq index to softirq name. update 'softirq_to_name' in
74567 * kernel/softirq.c when adding a new softirq.
74568 */
74569-extern char *softirq_to_name[NR_SOFTIRQS];
74570+extern const char * const softirq_to_name[NR_SOFTIRQS];
74571
74572 /* softirq mask and active fields moved to irq_cpustat_t in
74573 * asm/hardirq.h to get better cache usage. KAO
74574@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
74575
74576 struct softirq_action
74577 {
74578- void (*action)(struct softirq_action *);
74579-};
74580+ void (*action)(void);
74581+} __no_const;
74582
74583 asmlinkage void do_softirq(void);
74584 asmlinkage void __do_softirq(void);
74585-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
74586+extern void open_softirq(int nr, void (*action)(void));
74587 extern void softirq_init(void);
74588 extern void __raise_softirq_irqoff(unsigned int nr);
74589
74590diff --git a/include/linux/iommu.h b/include/linux/iommu.h
74591index 3aeb730..2177f39 100644
74592--- a/include/linux/iommu.h
74593+++ b/include/linux/iommu.h
74594@@ -113,7 +113,7 @@ struct iommu_ops {
74595 u32 (*domain_get_windows)(struct iommu_domain *domain);
74596
74597 unsigned long pgsize_bitmap;
74598-};
74599+} __do_const;
74600
74601 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
74602 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
74603diff --git a/include/linux/ioport.h b/include/linux/ioport.h
74604index 89b7c24..382af74 100644
74605--- a/include/linux/ioport.h
74606+++ b/include/linux/ioport.h
74607@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
74608 int adjust_resource(struct resource *res, resource_size_t start,
74609 resource_size_t size);
74610 resource_size_t resource_alignment(struct resource *res);
74611-static inline resource_size_t resource_size(const struct resource *res)
74612+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
74613 {
74614 return res->end - res->start + 1;
74615 }
74616diff --git a/include/linux/irq.h b/include/linux/irq.h
74617index bc4e066..50468a9 100644
74618--- a/include/linux/irq.h
74619+++ b/include/linux/irq.h
74620@@ -328,7 +328,8 @@ struct irq_chip {
74621 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
74622
74623 unsigned long flags;
74624-};
74625+} __do_const;
74626+typedef struct irq_chip __no_const irq_chip_no_const;
74627
74628 /*
74629 * irq_chip specific flags
74630diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
74631index 3e203eb..3fe68d0 100644
74632--- a/include/linux/irqchip/arm-gic.h
74633+++ b/include/linux/irqchip/arm-gic.h
74634@@ -59,9 +59,11 @@
74635
74636 #ifndef __ASSEMBLY__
74637
74638+#include <linux/irq.h>
74639+
74640 struct device_node;
74641
74642-extern struct irq_chip gic_arch_extn;
74643+extern irq_chip_no_const gic_arch_extn;
74644
74645 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
74646 u32 offset, struct device_node *);
74647diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
74648index 6883e19..e854fcb 100644
74649--- a/include/linux/kallsyms.h
74650+++ b/include/linux/kallsyms.h
74651@@ -15,7 +15,8 @@
74652
74653 struct module;
74654
74655-#ifdef CONFIG_KALLSYMS
74656+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
74657+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74658 /* Lookup the address for a symbol. Returns 0 if not found. */
74659 unsigned long kallsyms_lookup_name(const char *name);
74660
74661@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
74662 /* Stupid that this does nothing, but I didn't create this mess. */
74663 #define __print_symbol(fmt, addr)
74664 #endif /*CONFIG_KALLSYMS*/
74665+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
74666+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
74667+extern unsigned long kallsyms_lookup_name(const char *name);
74668+extern void __print_symbol(const char *fmt, unsigned long address);
74669+extern int sprint_backtrace(char *buffer, unsigned long address);
74670+extern int sprint_symbol(char *buffer, unsigned long address);
74671+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
74672+const char *kallsyms_lookup(unsigned long addr,
74673+ unsigned long *symbolsize,
74674+ unsigned long *offset,
74675+ char **modname, char *namebuf);
74676+extern int kallsyms_lookup_size_offset(unsigned long addr,
74677+ unsigned long *symbolsize,
74678+ unsigned long *offset);
74679+#endif
74680
74681 /* This macro allows us to keep printk typechecking */
74682 static __printf(1, 2)
74683diff --git a/include/linux/key-type.h b/include/linux/key-type.h
74684index 518a53a..5e28358 100644
74685--- a/include/linux/key-type.h
74686+++ b/include/linux/key-type.h
74687@@ -125,7 +125,7 @@ struct key_type {
74688 /* internal fields */
74689 struct list_head link; /* link in types list */
74690 struct lock_class_key lock_class; /* key->sem lock class */
74691-};
74692+} __do_const;
74693
74694 extern struct key_type key_type_keyring;
74695
74696diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
74697index c6e091b..a940adf 100644
74698--- a/include/linux/kgdb.h
74699+++ b/include/linux/kgdb.h
74700@@ -52,7 +52,7 @@ extern int kgdb_connected;
74701 extern int kgdb_io_module_registered;
74702
74703 extern atomic_t kgdb_setting_breakpoint;
74704-extern atomic_t kgdb_cpu_doing_single_step;
74705+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
74706
74707 extern struct task_struct *kgdb_usethread;
74708 extern struct task_struct *kgdb_contthread;
74709@@ -254,7 +254,7 @@ struct kgdb_arch {
74710 void (*correct_hw_break)(void);
74711
74712 void (*enable_nmi)(bool on);
74713-};
74714+} __do_const;
74715
74716 /**
74717 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
74718@@ -279,7 +279,7 @@ struct kgdb_io {
74719 void (*pre_exception) (void);
74720 void (*post_exception) (void);
74721 int is_console;
74722-};
74723+} __do_const;
74724
74725 extern struct kgdb_arch arch_kgdb_ops;
74726
74727diff --git a/include/linux/kmod.h b/include/linux/kmod.h
74728index 0555cc6..b16a7a4 100644
74729--- a/include/linux/kmod.h
74730+++ b/include/linux/kmod.h
74731@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
74732 * usually useless though. */
74733 extern __printf(2, 3)
74734 int __request_module(bool wait, const char *name, ...);
74735+extern __printf(3, 4)
74736+int ___request_module(bool wait, char *param_name, const char *name, ...);
74737 #define request_module(mod...) __request_module(true, mod)
74738 #define request_module_nowait(mod...) __request_module(false, mod)
74739 #define try_then_request_module(x, mod...) \
74740diff --git a/include/linux/kobject.h b/include/linux/kobject.h
74741index 939b112..ed6ed51 100644
74742--- a/include/linux/kobject.h
74743+++ b/include/linux/kobject.h
74744@@ -111,7 +111,7 @@ struct kobj_type {
74745 struct attribute **default_attrs;
74746 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
74747 const void *(*namespace)(struct kobject *kobj);
74748-};
74749+} __do_const;
74750
74751 struct kobj_uevent_env {
74752 char *envp[UEVENT_NUM_ENVP];
74753@@ -134,6 +134,7 @@ struct kobj_attribute {
74754 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
74755 const char *buf, size_t count);
74756 };
74757+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
74758
74759 extern const struct sysfs_ops kobj_sysfs_ops;
74760
74761diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
74762index f66b065..c2c29b4 100644
74763--- a/include/linux/kobject_ns.h
74764+++ b/include/linux/kobject_ns.h
74765@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
74766 const void *(*netlink_ns)(struct sock *sk);
74767 const void *(*initial_ns)(void);
74768 void (*drop_ns)(void *);
74769-};
74770+} __do_const;
74771
74772 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
74773 int kobj_ns_type_registered(enum kobj_ns_type type);
74774diff --git a/include/linux/kref.h b/include/linux/kref.h
74775index 484604d..0f6c5b6 100644
74776--- a/include/linux/kref.h
74777+++ b/include/linux/kref.h
74778@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
74779 static inline int kref_sub(struct kref *kref, unsigned int count,
74780 void (*release)(struct kref *kref))
74781 {
74782- WARN_ON(release == NULL);
74783+ BUG_ON(release == NULL);
74784
74785 if (atomic_sub_and_test((int) count, &kref->refcount)) {
74786 release(kref);
74787diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
74788index 8db53cf..c21121d 100644
74789--- a/include/linux/kvm_host.h
74790+++ b/include/linux/kvm_host.h
74791@@ -444,7 +444,7 @@ static inline void kvm_irqfd_exit(void)
74792 {
74793 }
74794 #endif
74795-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
74796+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
74797 struct module *module);
74798 void kvm_exit(void);
74799
74800@@ -616,7 +616,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
74801 struct kvm_guest_debug *dbg);
74802 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
74803
74804-int kvm_arch_init(void *opaque);
74805+int kvm_arch_init(const void *opaque);
74806 void kvm_arch_exit(void);
74807
74808 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
74809diff --git a/include/linux/libata.h b/include/linux/libata.h
74810index eae7a05..2cdd875 100644
74811--- a/include/linux/libata.h
74812+++ b/include/linux/libata.h
74813@@ -919,7 +919,7 @@ struct ata_port_operations {
74814 * fields must be pointers.
74815 */
74816 const struct ata_port_operations *inherits;
74817-};
74818+} __do_const;
74819
74820 struct ata_port_info {
74821 unsigned long flags;
74822diff --git a/include/linux/list.h b/include/linux/list.h
74823index b83e565..baa6c1d 100644
74824--- a/include/linux/list.h
74825+++ b/include/linux/list.h
74826@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
74827 extern void list_del(struct list_head *entry);
74828 #endif
74829
74830+extern void __pax_list_add(struct list_head *new,
74831+ struct list_head *prev,
74832+ struct list_head *next);
74833+static inline void pax_list_add(struct list_head *new, struct list_head *head)
74834+{
74835+ __pax_list_add(new, head, head->next);
74836+}
74837+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
74838+{
74839+ __pax_list_add(new, head->prev, head);
74840+}
74841+extern void pax_list_del(struct list_head *entry);
74842+
74843 /**
74844 * list_replace - replace old entry by new one
74845 * @old : the element to be replaced
74846@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
74847 INIT_LIST_HEAD(entry);
74848 }
74849
74850+extern void pax_list_del_init(struct list_head *entry);
74851+
74852 /**
74853 * list_move - delete from one list and add as another's head
74854 * @list: the entry to move
74855diff --git a/include/linux/math64.h b/include/linux/math64.h
74856index 2913b86..8dcbb1e 100644
74857--- a/include/linux/math64.h
74858+++ b/include/linux/math64.h
74859@@ -15,7 +15,7 @@
74860 * This is commonly provided by 32bit archs to provide an optimized 64bit
74861 * divide.
74862 */
74863-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74864+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74865 {
74866 *remainder = dividend % divisor;
74867 return dividend / divisor;
74868@@ -33,7 +33,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
74869 /**
74870 * div64_u64 - unsigned 64bit divide with 64bit divisor
74871 */
74872-static inline u64 div64_u64(u64 dividend, u64 divisor)
74873+static inline u64 __intentional_overflow(0) div64_u64(u64 dividend, u64 divisor)
74874 {
74875 return dividend / divisor;
74876 }
74877@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
74878 #define div64_ul(x, y) div_u64((x), (y))
74879
74880 #ifndef div_u64_rem
74881-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74882+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
74883 {
74884 *remainder = do_div(dividend, divisor);
74885 return dividend;
74886@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
74887 * divide.
74888 */
74889 #ifndef div_u64
74890-static inline u64 div_u64(u64 dividend, u32 divisor)
74891+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
74892 {
74893 u32 remainder;
74894 return div_u64_rem(dividend, divisor, &remainder);
74895diff --git a/include/linux/mm.h b/include/linux/mm.h
74896index e0c8528..bcf0c29 100644
74897--- a/include/linux/mm.h
74898+++ b/include/linux/mm.h
74899@@ -104,6 +104,11 @@ extern unsigned int kobjsize(const void *objp);
74900 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
74901 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
74902 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
74903+
74904+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
74905+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
74906+#endif
74907+
74908 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
74909
74910 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
74911@@ -205,8 +210,8 @@ struct vm_operations_struct {
74912 /* called by access_process_vm when get_user_pages() fails, typically
74913 * for use by special VMAs that can switch between memory and hardware
74914 */
74915- int (*access)(struct vm_area_struct *vma, unsigned long addr,
74916- void *buf, int len, int write);
74917+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
74918+ void *buf, size_t len, int write);
74919 #ifdef CONFIG_NUMA
74920 /*
74921 * set_policy() op must add a reference to any non-NULL @new mempolicy
74922@@ -236,6 +241,7 @@ struct vm_operations_struct {
74923 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
74924 unsigned long size, pgoff_t pgoff);
74925 };
74926+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
74927
74928 struct mmu_gather;
74929 struct inode;
74930@@ -980,8 +986,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
74931 unsigned long *pfn);
74932 int follow_phys(struct vm_area_struct *vma, unsigned long address,
74933 unsigned int flags, unsigned long *prot, resource_size_t *phys);
74934-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
74935- void *buf, int len, int write);
74936+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
74937+ void *buf, size_t len, int write);
74938
74939 static inline void unmap_shared_mapping_range(struct address_space *mapping,
74940 loff_t const holebegin, loff_t const holelen)
74941@@ -1020,9 +1026,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
74942 }
74943 #endif
74944
74945-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
74946-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
74947- void *buf, int len, int write);
74948+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
74949+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
74950+ void *buf, size_t len, int write);
74951
74952 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
74953 unsigned long start, unsigned long nr_pages,
74954@@ -1053,34 +1059,6 @@ int set_page_dirty(struct page *page);
74955 int set_page_dirty_lock(struct page *page);
74956 int clear_page_dirty_for_io(struct page *page);
74957
74958-/* Is the vma a continuation of the stack vma above it? */
74959-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
74960-{
74961- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
74962-}
74963-
74964-static inline int stack_guard_page_start(struct vm_area_struct *vma,
74965- unsigned long addr)
74966-{
74967- return (vma->vm_flags & VM_GROWSDOWN) &&
74968- (vma->vm_start == addr) &&
74969- !vma_growsdown(vma->vm_prev, addr);
74970-}
74971-
74972-/* Is the vma a continuation of the stack vma below it? */
74973-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
74974-{
74975- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
74976-}
74977-
74978-static inline int stack_guard_page_end(struct vm_area_struct *vma,
74979- unsigned long addr)
74980-{
74981- return (vma->vm_flags & VM_GROWSUP) &&
74982- (vma->vm_end == addr) &&
74983- !vma_growsup(vma->vm_next, addr);
74984-}
74985-
74986 extern pid_t
74987 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
74988
74989@@ -1180,6 +1158,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
74990 }
74991 #endif
74992
74993+#ifdef CONFIG_MMU
74994+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
74995+#else
74996+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
74997+{
74998+ return __pgprot(0);
74999+}
75000+#endif
75001+
75002 int vma_wants_writenotify(struct vm_area_struct *vma);
75003
75004 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
75005@@ -1198,8 +1185,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
75006 {
75007 return 0;
75008 }
75009+
75010+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
75011+ unsigned long address)
75012+{
75013+ return 0;
75014+}
75015 #else
75016 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
75017+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
75018 #endif
75019
75020 #ifdef __PAGETABLE_PMD_FOLDED
75021@@ -1208,8 +1202,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
75022 {
75023 return 0;
75024 }
75025+
75026+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
75027+ unsigned long address)
75028+{
75029+ return 0;
75030+}
75031 #else
75032 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
75033+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
75034 #endif
75035
75036 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
75037@@ -1227,11 +1228,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
75038 NULL: pud_offset(pgd, address);
75039 }
75040
75041+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
75042+{
75043+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
75044+ NULL: pud_offset(pgd, address);
75045+}
75046+
75047 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
75048 {
75049 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
75050 NULL: pmd_offset(pud, address);
75051 }
75052+
75053+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
75054+{
75055+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
75056+ NULL: pmd_offset(pud, address);
75057+}
75058 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
75059
75060 #if USE_SPLIT_PTLOCKS
75061@@ -1517,6 +1530,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
75062 unsigned long len, unsigned long prot, unsigned long flags,
75063 unsigned long pgoff, unsigned long *populate);
75064 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
75065+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
75066
75067 #ifdef CONFIG_MMU
75068 extern int __mm_populate(unsigned long addr, unsigned long len,
75069@@ -1545,10 +1559,11 @@ struct vm_unmapped_area_info {
75070 unsigned long high_limit;
75071 unsigned long align_mask;
75072 unsigned long align_offset;
75073+ unsigned long threadstack_offset;
75074 };
75075
75076-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
75077-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
75078+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
75079+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
75080
75081 /*
75082 * Search for an unmapped address range.
75083@@ -1560,7 +1575,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
75084 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
75085 */
75086 static inline unsigned long
75087-vm_unmapped_area(struct vm_unmapped_area_info *info)
75088+vm_unmapped_area(const struct vm_unmapped_area_info *info)
75089 {
75090 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
75091 return unmapped_area(info);
75092@@ -1623,6 +1638,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
75093 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
75094 struct vm_area_struct **pprev);
75095
75096+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
75097+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
75098+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
75099+
75100 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
75101 NULL if none. Assume start_addr < end_addr. */
75102 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
75103@@ -1651,15 +1670,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
75104 return vma;
75105 }
75106
75107-#ifdef CONFIG_MMU
75108-pgprot_t vm_get_page_prot(unsigned long vm_flags);
75109-#else
75110-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
75111-{
75112- return __pgprot(0);
75113-}
75114-#endif
75115-
75116 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
75117 unsigned long change_prot_numa(struct vm_area_struct *vma,
75118 unsigned long start, unsigned long end);
75119@@ -1711,6 +1721,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
75120 static inline void vm_stat_account(struct mm_struct *mm,
75121 unsigned long flags, struct file *file, long pages)
75122 {
75123+
75124+#ifdef CONFIG_PAX_RANDMMAP
75125+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
75126+#endif
75127+
75128 mm->total_vm += pages;
75129 }
75130 #endif /* CONFIG_PROC_FS */
75131@@ -1791,7 +1806,7 @@ extern int unpoison_memory(unsigned long pfn);
75132 extern int sysctl_memory_failure_early_kill;
75133 extern int sysctl_memory_failure_recovery;
75134 extern void shake_page(struct page *p, int access);
75135-extern atomic_long_t num_poisoned_pages;
75136+extern atomic_long_unchecked_t num_poisoned_pages;
75137 extern int soft_offline_page(struct page *page, int flags);
75138
75139 extern void dump_page(struct page *page);
75140@@ -1828,5 +1843,11 @@ void __init setup_nr_node_ids(void);
75141 static inline void setup_nr_node_ids(void) {}
75142 #endif
75143
75144+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
75145+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
75146+#else
75147+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
75148+#endif
75149+
75150 #endif /* __KERNEL__ */
75151 #endif /* _LINUX_MM_H */
75152diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
75153index ace9a5f..81bdb59 100644
75154--- a/include/linux/mm_types.h
75155+++ b/include/linux/mm_types.h
75156@@ -289,6 +289,8 @@ struct vm_area_struct {
75157 #ifdef CONFIG_NUMA
75158 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
75159 #endif
75160+
75161+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
75162 };
75163
75164 struct core_thread {
75165@@ -437,6 +439,24 @@ struct mm_struct {
75166 int first_nid;
75167 #endif
75168 struct uprobes_state uprobes_state;
75169+
75170+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
75171+ unsigned long pax_flags;
75172+#endif
75173+
75174+#ifdef CONFIG_PAX_DLRESOLVE
75175+ unsigned long call_dl_resolve;
75176+#endif
75177+
75178+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
75179+ unsigned long call_syscall;
75180+#endif
75181+
75182+#ifdef CONFIG_PAX_ASLR
75183+ unsigned long delta_mmap; /* randomized offset */
75184+ unsigned long delta_stack; /* randomized offset */
75185+#endif
75186+
75187 };
75188
75189 /* first nid will either be a valid NID or one of these values */
75190diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
75191index c5d5278..f0b68c8 100644
75192--- a/include/linux/mmiotrace.h
75193+++ b/include/linux/mmiotrace.h
75194@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
75195 /* Called from ioremap.c */
75196 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
75197 void __iomem *addr);
75198-extern void mmiotrace_iounmap(volatile void __iomem *addr);
75199+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
75200
75201 /* For anyone to insert markers. Remember trailing newline. */
75202 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
75203@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
75204 {
75205 }
75206
75207-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
75208+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
75209 {
75210 }
75211
75212diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
75213index 5c76737..61f518e 100644
75214--- a/include/linux/mmzone.h
75215+++ b/include/linux/mmzone.h
75216@@ -396,7 +396,7 @@ struct zone {
75217 unsigned long flags; /* zone flags, see below */
75218
75219 /* Zone statistics */
75220- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75221+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75222
75223 /*
75224 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
75225diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
75226index b508016..237cfe5 100644
75227--- a/include/linux/mod_devicetable.h
75228+++ b/include/linux/mod_devicetable.h
75229@@ -13,7 +13,7 @@
75230 typedef unsigned long kernel_ulong_t;
75231 #endif
75232
75233-#define PCI_ANY_ID (~0)
75234+#define PCI_ANY_ID ((__u16)~0)
75235
75236 struct pci_device_id {
75237 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
75238@@ -139,7 +139,7 @@ struct usb_device_id {
75239 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
75240 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
75241
75242-#define HID_ANY_ID (~0)
75243+#define HID_ANY_ID (~0U)
75244 #define HID_BUS_ANY 0xffff
75245 #define HID_GROUP_ANY 0x0000
75246
75247@@ -465,7 +465,7 @@ struct dmi_system_id {
75248 const char *ident;
75249 struct dmi_strmatch matches[4];
75250 void *driver_data;
75251-};
75252+} __do_const;
75253 /*
75254 * struct dmi_device_id appears during expansion of
75255 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
75256diff --git a/include/linux/module.h b/include/linux/module.h
75257index 46f1ea0..a34ca37 100644
75258--- a/include/linux/module.h
75259+++ b/include/linux/module.h
75260@@ -17,9 +17,11 @@
75261 #include <linux/moduleparam.h>
75262 #include <linux/tracepoint.h>
75263 #include <linux/export.h>
75264+#include <linux/fs.h>
75265
75266 #include <linux/percpu.h>
75267 #include <asm/module.h>
75268+#include <asm/pgtable.h>
75269
75270 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
75271 #define MODULE_SIG_STRING "~Module signature appended~\n"
75272@@ -54,12 +56,13 @@ struct module_attribute {
75273 int (*test)(struct module *);
75274 void (*free)(struct module *);
75275 };
75276+typedef struct module_attribute __no_const module_attribute_no_const;
75277
75278 struct module_version_attribute {
75279 struct module_attribute mattr;
75280 const char *module_name;
75281 const char *version;
75282-} __attribute__ ((__aligned__(sizeof(void *))));
75283+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
75284
75285 extern ssize_t __modver_version_show(struct module_attribute *,
75286 struct module_kobject *, char *);
75287@@ -232,7 +235,7 @@ struct module
75288
75289 /* Sysfs stuff. */
75290 struct module_kobject mkobj;
75291- struct module_attribute *modinfo_attrs;
75292+ module_attribute_no_const *modinfo_attrs;
75293 const char *version;
75294 const char *srcversion;
75295 struct kobject *holders_dir;
75296@@ -281,19 +284,16 @@ struct module
75297 int (*init)(void);
75298
75299 /* If this is non-NULL, vfree after init() returns */
75300- void *module_init;
75301+ void *module_init_rx, *module_init_rw;
75302
75303 /* Here is the actual code + data, vfree'd on unload. */
75304- void *module_core;
75305+ void *module_core_rx, *module_core_rw;
75306
75307 /* Here are the sizes of the init and core sections */
75308- unsigned int init_size, core_size;
75309+ unsigned int init_size_rw, core_size_rw;
75310
75311 /* The size of the executable code in each section. */
75312- unsigned int init_text_size, core_text_size;
75313-
75314- /* Size of RO sections of the module (text+rodata) */
75315- unsigned int init_ro_size, core_ro_size;
75316+ unsigned int init_size_rx, core_size_rx;
75317
75318 /* Arch-specific module values */
75319 struct mod_arch_specific arch;
75320@@ -349,6 +349,10 @@ struct module
75321 #ifdef CONFIG_EVENT_TRACING
75322 struct ftrace_event_call **trace_events;
75323 unsigned int num_trace_events;
75324+ struct file_operations trace_id;
75325+ struct file_operations trace_enable;
75326+ struct file_operations trace_format;
75327+ struct file_operations trace_filter;
75328 #endif
75329 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
75330 unsigned int num_ftrace_callsites;
75331@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
75332 bool is_module_percpu_address(unsigned long addr);
75333 bool is_module_text_address(unsigned long addr);
75334
75335+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
75336+{
75337+
75338+#ifdef CONFIG_PAX_KERNEXEC
75339+ if (ktla_ktva(addr) >= (unsigned long)start &&
75340+ ktla_ktva(addr) < (unsigned long)start + size)
75341+ return 1;
75342+#endif
75343+
75344+ return ((void *)addr >= start && (void *)addr < start + size);
75345+}
75346+
75347+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
75348+{
75349+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
75350+}
75351+
75352+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
75353+{
75354+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
75355+}
75356+
75357+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
75358+{
75359+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
75360+}
75361+
75362+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
75363+{
75364+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
75365+}
75366+
75367 static inline int within_module_core(unsigned long addr, const struct module *mod)
75368 {
75369- return (unsigned long)mod->module_core <= addr &&
75370- addr < (unsigned long)mod->module_core + mod->core_size;
75371+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
75372 }
75373
75374 static inline int within_module_init(unsigned long addr, const struct module *mod)
75375 {
75376- return (unsigned long)mod->module_init <= addr &&
75377- addr < (unsigned long)mod->module_init + mod->init_size;
75378+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
75379 }
75380
75381 /* Search for module by name: must hold module_mutex. */
75382diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
75383index 560ca53..ef621ef 100644
75384--- a/include/linux/moduleloader.h
75385+++ b/include/linux/moduleloader.h
75386@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
75387 sections. Returns NULL on failure. */
75388 void *module_alloc(unsigned long size);
75389
75390+#ifdef CONFIG_PAX_KERNEXEC
75391+void *module_alloc_exec(unsigned long size);
75392+#else
75393+#define module_alloc_exec(x) module_alloc(x)
75394+#endif
75395+
75396 /* Free memory returned from module_alloc. */
75397 void module_free(struct module *mod, void *module_region);
75398
75399+#ifdef CONFIG_PAX_KERNEXEC
75400+void module_free_exec(struct module *mod, void *module_region);
75401+#else
75402+#define module_free_exec(x, y) module_free((x), (y))
75403+#endif
75404+
75405 /*
75406 * Apply the given relocation to the (simplified) ELF. Return -error
75407 * or 0.
75408@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
75409 unsigned int relsec,
75410 struct module *me)
75411 {
75412+#ifdef CONFIG_MODULES
75413 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
75414+#endif
75415 return -ENOEXEC;
75416 }
75417 #endif
75418@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
75419 unsigned int relsec,
75420 struct module *me)
75421 {
75422+#ifdef CONFIG_MODULES
75423 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
75424+#endif
75425 return -ENOEXEC;
75426 }
75427 #endif
75428diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
75429index 137b419..fe663ec 100644
75430--- a/include/linux/moduleparam.h
75431+++ b/include/linux/moduleparam.h
75432@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
75433 * @len is usually just sizeof(string).
75434 */
75435 #define module_param_string(name, string, len, perm) \
75436- static const struct kparam_string __param_string_##name \
75437+ static const struct kparam_string __param_string_##name __used \
75438 = { len, string }; \
75439 __module_param_call(MODULE_PARAM_PREFIX, name, \
75440 &param_ops_string, \
75441@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
75442 */
75443 #define module_param_array_named(name, array, type, nump, perm) \
75444 param_check_##type(name, &(array)[0]); \
75445- static const struct kparam_array __param_arr_##name \
75446+ static const struct kparam_array __param_arr_##name __used \
75447 = { .max = ARRAY_SIZE(array), .num = nump, \
75448 .ops = &param_ops_##type, \
75449 .elemsize = sizeof(array[0]), .elem = array }; \
75450diff --git a/include/linux/namei.h b/include/linux/namei.h
75451index 5a5ff57..5ae5070 100644
75452--- a/include/linux/namei.h
75453+++ b/include/linux/namei.h
75454@@ -19,7 +19,7 @@ struct nameidata {
75455 unsigned seq;
75456 int last_type;
75457 unsigned depth;
75458- char *saved_names[MAX_NESTED_LINKS + 1];
75459+ const char *saved_names[MAX_NESTED_LINKS + 1];
75460 };
75461
75462 /*
75463@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
75464
75465 extern void nd_jump_link(struct nameidata *nd, struct path *path);
75466
75467-static inline void nd_set_link(struct nameidata *nd, char *path)
75468+static inline void nd_set_link(struct nameidata *nd, const char *path)
75469 {
75470 nd->saved_names[nd->depth] = path;
75471 }
75472
75473-static inline char *nd_get_link(struct nameidata *nd)
75474+static inline const char *nd_get_link(const struct nameidata *nd)
75475 {
75476 return nd->saved_names[nd->depth];
75477 }
75478diff --git a/include/linux/net.h b/include/linux/net.h
75479index 99c9f0c..e1cf296 100644
75480--- a/include/linux/net.h
75481+++ b/include/linux/net.h
75482@@ -183,7 +183,7 @@ struct net_proto_family {
75483 int (*create)(struct net *net, struct socket *sock,
75484 int protocol, int kern);
75485 struct module *owner;
75486-};
75487+} __do_const;
75488
75489 struct iovec;
75490 struct kvec;
75491diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
75492index 96e4c21..9cc8278 100644
75493--- a/include/linux/netdevice.h
75494+++ b/include/linux/netdevice.h
75495@@ -1026,6 +1026,7 @@ struct net_device_ops {
75496 int (*ndo_change_carrier)(struct net_device *dev,
75497 bool new_carrier);
75498 };
75499+typedef struct net_device_ops __no_const net_device_ops_no_const;
75500
75501 /*
75502 * The DEVICE structure.
75503@@ -1094,7 +1095,7 @@ struct net_device {
75504 int iflink;
75505
75506 struct net_device_stats stats;
75507- atomic_long_t rx_dropped; /* dropped packets by core network
75508+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
75509 * Do not use this in drivers.
75510 */
75511
75512diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
75513index 0060fde..481c6ae 100644
75514--- a/include/linux/netfilter.h
75515+++ b/include/linux/netfilter.h
75516@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
75517 #endif
75518 /* Use the module struct to lock set/get code in place */
75519 struct module *owner;
75520-};
75521+} __do_const;
75522
75523 /* Function to register/unregister hook points. */
75524 int nf_register_hook(struct nf_hook_ops *reg);
75525diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
75526index d80e275..c3510b8 100644
75527--- a/include/linux/netfilter/ipset/ip_set.h
75528+++ b/include/linux/netfilter/ipset/ip_set.h
75529@@ -124,7 +124,7 @@ struct ip_set_type_variant {
75530 /* Return true if "b" set is the same as "a"
75531 * according to the create set parameters */
75532 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
75533-};
75534+} __do_const;
75535
75536 /* The core set type structure */
75537 struct ip_set_type {
75538diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
75539index cadb740..d7c37c0 100644
75540--- a/include/linux/netfilter/nfnetlink.h
75541+++ b/include/linux/netfilter/nfnetlink.h
75542@@ -16,7 +16,7 @@ struct nfnl_callback {
75543 const struct nlattr * const cda[]);
75544 const struct nla_policy *policy; /* netlink attribute policy */
75545 const u_int16_t attr_count; /* number of nlattr's */
75546-};
75547+} __do_const;
75548
75549 struct nfnetlink_subsystem {
75550 const char *name;
75551diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
75552new file mode 100644
75553index 0000000..33f4af8
75554--- /dev/null
75555+++ b/include/linux/netfilter/xt_gradm.h
75556@@ -0,0 +1,9 @@
75557+#ifndef _LINUX_NETFILTER_XT_GRADM_H
75558+#define _LINUX_NETFILTER_XT_GRADM_H 1
75559+
75560+struct xt_gradm_mtinfo {
75561+ __u16 flags;
75562+ __u16 invflags;
75563+};
75564+
75565+#endif
75566diff --git a/include/linux/nls.h b/include/linux/nls.h
75567index 5dc635f..35f5e11 100644
75568--- a/include/linux/nls.h
75569+++ b/include/linux/nls.h
75570@@ -31,7 +31,7 @@ struct nls_table {
75571 const unsigned char *charset2upper;
75572 struct module *owner;
75573 struct nls_table *next;
75574-};
75575+} __do_const;
75576
75577 /* this value hold the maximum octet of charset */
75578 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
75579diff --git a/include/linux/notifier.h b/include/linux/notifier.h
75580index d14a4c3..a078786 100644
75581--- a/include/linux/notifier.h
75582+++ b/include/linux/notifier.h
75583@@ -54,7 +54,8 @@ struct notifier_block {
75584 notifier_fn_t notifier_call;
75585 struct notifier_block __rcu *next;
75586 int priority;
75587-};
75588+} __do_const;
75589+typedef struct notifier_block __no_const notifier_block_no_const;
75590
75591 struct atomic_notifier_head {
75592 spinlock_t lock;
75593diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
75594index a4c5624..79d6d88 100644
75595--- a/include/linux/oprofile.h
75596+++ b/include/linux/oprofile.h
75597@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
75598 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
75599 char const * name, ulong * val);
75600
75601-/** Create a file for read-only access to an atomic_t. */
75602+/** Create a file for read-only access to an atomic_unchecked_t. */
75603 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
75604- char const * name, atomic_t * val);
75605+ char const * name, atomic_unchecked_t * val);
75606
75607 /** create a directory */
75608 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
75609diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
75610index 8db71dc..a76bf2c 100644
75611--- a/include/linux/pci_hotplug.h
75612+++ b/include/linux/pci_hotplug.h
75613@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
75614 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
75615 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
75616 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
75617-};
75618+} __do_const;
75619+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
75620
75621 /**
75622 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
75623diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
75624index c5b6dbf..b124155 100644
75625--- a/include/linux/perf_event.h
75626+++ b/include/linux/perf_event.h
75627@@ -318,8 +318,8 @@ struct perf_event {
75628
75629 enum perf_event_active_state state;
75630 unsigned int attach_state;
75631- local64_t count;
75632- atomic64_t child_count;
75633+ local64_t count; /* PaX: fix it one day */
75634+ atomic64_unchecked_t child_count;
75635
75636 /*
75637 * These are the total time in nanoseconds that the event
75638@@ -370,8 +370,8 @@ struct perf_event {
75639 * These accumulate total time (in nanoseconds) that children
75640 * events have been enabled and running, respectively.
75641 */
75642- atomic64_t child_total_time_enabled;
75643- atomic64_t child_total_time_running;
75644+ atomic64_unchecked_t child_total_time_enabled;
75645+ atomic64_unchecked_t child_total_time_running;
75646
75647 /*
75648 * Protect attach/detach and child_list:
75649@@ -692,7 +692,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
75650 entry->ip[entry->nr++] = ip;
75651 }
75652
75653-extern int sysctl_perf_event_paranoid;
75654+extern int sysctl_perf_event_legitimately_concerned;
75655 extern int sysctl_perf_event_mlock;
75656 extern int sysctl_perf_event_sample_rate;
75657
75658@@ -700,19 +700,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
75659 void __user *buffer, size_t *lenp,
75660 loff_t *ppos);
75661
75662+static inline bool perf_paranoid_any(void)
75663+{
75664+ return sysctl_perf_event_legitimately_concerned > 2;
75665+}
75666+
75667 static inline bool perf_paranoid_tracepoint_raw(void)
75668 {
75669- return sysctl_perf_event_paranoid > -1;
75670+ return sysctl_perf_event_legitimately_concerned > -1;
75671 }
75672
75673 static inline bool perf_paranoid_cpu(void)
75674 {
75675- return sysctl_perf_event_paranoid > 0;
75676+ return sysctl_perf_event_legitimately_concerned > 0;
75677 }
75678
75679 static inline bool perf_paranoid_kernel(void)
75680 {
75681- return sysctl_perf_event_paranoid > 1;
75682+ return sysctl_perf_event_legitimately_concerned > 1;
75683 }
75684
75685 extern void perf_event_init(void);
75686@@ -806,7 +811,7 @@ static inline void perf_restore_debug_store(void) { }
75687 */
75688 #define perf_cpu_notifier(fn) \
75689 do { \
75690- static struct notifier_block fn##_nb __cpuinitdata = \
75691+ static struct notifier_block fn##_nb = \
75692 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
75693 unsigned long cpu = smp_processor_id(); \
75694 unsigned long flags; \
75695@@ -826,7 +831,7 @@ struct perf_pmu_events_attr {
75696 struct device_attribute attr;
75697 u64 id;
75698 const char *event_str;
75699-};
75700+} __do_const;
75701
75702 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
75703 static struct perf_pmu_events_attr _var = { \
75704diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
75705index b8809fe..ae4ccd0 100644
75706--- a/include/linux/pipe_fs_i.h
75707+++ b/include/linux/pipe_fs_i.h
75708@@ -47,10 +47,10 @@ struct pipe_inode_info {
75709 struct mutex mutex;
75710 wait_queue_head_t wait;
75711 unsigned int nrbufs, curbuf, buffers;
75712- unsigned int readers;
75713- unsigned int writers;
75714- unsigned int files;
75715- unsigned int waiting_writers;
75716+ atomic_t readers;
75717+ atomic_t writers;
75718+ atomic_t files;
75719+ atomic_t waiting_writers;
75720 unsigned int r_counter;
75721 unsigned int w_counter;
75722 struct page *tmp_page;
75723diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
75724index 5f28cae..3d23723 100644
75725--- a/include/linux/platform_data/usb-ehci-s5p.h
75726+++ b/include/linux/platform_data/usb-ehci-s5p.h
75727@@ -14,7 +14,7 @@
75728 struct s5p_ehci_platdata {
75729 int (*phy_init)(struct platform_device *pdev, int type);
75730 int (*phy_exit)(struct platform_device *pdev, int type);
75731-};
75732+} __no_const;
75733
75734 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
75735
75736diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
75737index c256c59..8ea94c7 100644
75738--- a/include/linux/platform_data/usb-ohci-exynos.h
75739+++ b/include/linux/platform_data/usb-ohci-exynos.h
75740@@ -14,7 +14,7 @@
75741 struct exynos4_ohci_platdata {
75742 int (*phy_init)(struct platform_device *pdev, int type);
75743 int (*phy_exit)(struct platform_device *pdev, int type);
75744-};
75745+} __no_const;
75746
75747 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
75748
75749diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
75750index 7c1d252..c5c773e 100644
75751--- a/include/linux/pm_domain.h
75752+++ b/include/linux/pm_domain.h
75753@@ -48,7 +48,7 @@ struct gpd_dev_ops {
75754
75755 struct gpd_cpu_data {
75756 unsigned int saved_exit_latency;
75757- struct cpuidle_state *idle_state;
75758+ cpuidle_state_no_const *idle_state;
75759 };
75760
75761 struct generic_pm_domain {
75762diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
75763index 7d7e09e..8671ef8 100644
75764--- a/include/linux/pm_runtime.h
75765+++ b/include/linux/pm_runtime.h
75766@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
75767
75768 static inline void pm_runtime_mark_last_busy(struct device *dev)
75769 {
75770- ACCESS_ONCE(dev->power.last_busy) = jiffies;
75771+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
75772 }
75773
75774 #else /* !CONFIG_PM_RUNTIME */
75775diff --git a/include/linux/pnp.h b/include/linux/pnp.h
75776index 195aafc..49a7bc2 100644
75777--- a/include/linux/pnp.h
75778+++ b/include/linux/pnp.h
75779@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
75780 struct pnp_fixup {
75781 char id[7];
75782 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
75783-};
75784+} __do_const;
75785
75786 /* config parameters */
75787 #define PNP_CONFIG_NORMAL 0x0001
75788diff --git a/include/linux/poison.h b/include/linux/poison.h
75789index 2110a81..13a11bb 100644
75790--- a/include/linux/poison.h
75791+++ b/include/linux/poison.h
75792@@ -19,8 +19,8 @@
75793 * under normal circumstances, used to verify that nobody uses
75794 * non-initialized list entries.
75795 */
75796-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
75797-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
75798+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
75799+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
75800
75801 /********** include/linux/timer.h **********/
75802 /*
75803diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
75804index c0f44c2..1572583 100644
75805--- a/include/linux/power/smartreflex.h
75806+++ b/include/linux/power/smartreflex.h
75807@@ -238,7 +238,7 @@ struct omap_sr_class_data {
75808 int (*notify)(struct omap_sr *sr, u32 status);
75809 u8 notify_flags;
75810 u8 class_type;
75811-};
75812+} __do_const;
75813
75814 /**
75815 * struct omap_sr_nvalue_table - Smartreflex n-target value info
75816diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
75817index 4ea1d37..80f4b33 100644
75818--- a/include/linux/ppp-comp.h
75819+++ b/include/linux/ppp-comp.h
75820@@ -84,7 +84,7 @@ struct compressor {
75821 struct module *owner;
75822 /* Extra skb space needed by the compressor algorithm */
75823 unsigned int comp_extra;
75824-};
75825+} __do_const;
75826
75827 /*
75828 * The return value from decompress routine is the length of the
75829diff --git a/include/linux/preempt.h b/include/linux/preempt.h
75830index f5d4723..a6ea2fa 100644
75831--- a/include/linux/preempt.h
75832+++ b/include/linux/preempt.h
75833@@ -18,8 +18,13 @@
75834 # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
75835 #endif
75836
75837+#define raw_add_preempt_count(val) do { preempt_count() += (val); } while (0)
75838+#define raw_sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
75839+
75840 #define inc_preempt_count() add_preempt_count(1)
75841+#define raw_inc_preempt_count() raw_add_preempt_count(1)
75842 #define dec_preempt_count() sub_preempt_count(1)
75843+#define raw_dec_preempt_count() raw_sub_preempt_count(1)
75844
75845 #define preempt_count() (current_thread_info()->preempt_count)
75846
75847@@ -64,6 +69,12 @@ do { \
75848 barrier(); \
75849 } while (0)
75850
75851+#define raw_preempt_disable() \
75852+do { \
75853+ raw_inc_preempt_count(); \
75854+ barrier(); \
75855+} while (0)
75856+
75857 #define sched_preempt_enable_no_resched() \
75858 do { \
75859 barrier(); \
75860@@ -72,6 +83,12 @@ do { \
75861
75862 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
75863
75864+#define raw_preempt_enable_no_resched() \
75865+do { \
75866+ barrier(); \
75867+ raw_dec_preempt_count(); \
75868+} while (0)
75869+
75870 #define preempt_enable() \
75871 do { \
75872 preempt_enable_no_resched(); \
75873@@ -116,8 +133,10 @@ do { \
75874 * region.
75875 */
75876 #define preempt_disable() barrier()
75877+#define raw_preempt_disable() barrier()
75878 #define sched_preempt_enable_no_resched() barrier()
75879 #define preempt_enable_no_resched() barrier()
75880+#define raw_preempt_enable_no_resched() barrier()
75881 #define preempt_enable() barrier()
75882
75883 #define preempt_disable_notrace() barrier()
75884diff --git a/include/linux/printk.h b/include/linux/printk.h
75885index 22c7052..ad3fa0a 100644
75886--- a/include/linux/printk.h
75887+++ b/include/linux/printk.h
75888@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
75889 void early_printk(const char *s, ...) { }
75890 #endif
75891
75892+extern int kptr_restrict;
75893+
75894 #ifdef CONFIG_PRINTK
75895 asmlinkage __printf(5, 0)
75896 int vprintk_emit(int facility, int level,
75897@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
75898
75899 extern int printk_delay_msec;
75900 extern int dmesg_restrict;
75901-extern int kptr_restrict;
75902
75903 extern void wake_up_klogd(void);
75904
75905diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
75906index 608e60a..c26f864 100644
75907--- a/include/linux/proc_fs.h
75908+++ b/include/linux/proc_fs.h
75909@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
75910 return proc_create_data(name, mode, parent, proc_fops, NULL);
75911 }
75912
75913+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
75914+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
75915+{
75916+#ifdef CONFIG_GRKERNSEC_PROC_USER
75917+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
75918+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75919+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
75920+#else
75921+ return proc_create_data(name, mode, parent, proc_fops, NULL);
75922+#endif
75923+}
75924+
75925+
75926 extern void proc_set_size(struct proc_dir_entry *, loff_t);
75927 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
75928 extern void *PDE_DATA(const struct inode *);
75929diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
75930index 34a1e10..03a6d03 100644
75931--- a/include/linux/proc_ns.h
75932+++ b/include/linux/proc_ns.h
75933@@ -14,7 +14,7 @@ struct proc_ns_operations {
75934 void (*put)(void *ns);
75935 int (*install)(struct nsproxy *nsproxy, void *ns);
75936 unsigned int (*inum)(void *ns);
75937-};
75938+} __do_const;
75939
75940 struct proc_ns {
75941 void *ns;
75942diff --git a/include/linux/random.h b/include/linux/random.h
75943index 3b9377d..61b506a 100644
75944--- a/include/linux/random.h
75945+++ b/include/linux/random.h
75946@@ -32,6 +32,11 @@ void prandom_seed(u32 seed);
75947 u32 prandom_u32_state(struct rnd_state *);
75948 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
75949
75950+static inline unsigned long pax_get_random_long(void)
75951+{
75952+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
75953+}
75954+
75955 /*
75956 * Handle minimum values for seeds
75957 */
75958diff --git a/include/linux/rculist.h b/include/linux/rculist.h
75959index f4b1001..8ddb2b6 100644
75960--- a/include/linux/rculist.h
75961+++ b/include/linux/rculist.h
75962@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
75963 struct list_head *prev, struct list_head *next);
75964 #endif
75965
75966+extern void __pax_list_add_rcu(struct list_head *new,
75967+ struct list_head *prev, struct list_head *next);
75968+
75969 /**
75970 * list_add_rcu - add a new entry to rcu-protected list
75971 * @new: new entry to be added
75972@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
75973 __list_add_rcu(new, head, head->next);
75974 }
75975
75976+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
75977+{
75978+ __pax_list_add_rcu(new, head, head->next);
75979+}
75980+
75981 /**
75982 * list_add_tail_rcu - add a new entry to rcu-protected list
75983 * @new: new entry to be added
75984@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
75985 __list_add_rcu(new, head->prev, head);
75986 }
75987
75988+static inline void pax_list_add_tail_rcu(struct list_head *new,
75989+ struct list_head *head)
75990+{
75991+ __pax_list_add_rcu(new, head->prev, head);
75992+}
75993+
75994 /**
75995 * list_del_rcu - deletes entry from list without re-initialization
75996 * @entry: the element to delete from the list.
75997@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
75998 entry->prev = LIST_POISON2;
75999 }
76000
76001+extern void pax_list_del_rcu(struct list_head *entry);
76002+
76003 /**
76004 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
76005 * @n: the element to delete from the hash list.
76006diff --git a/include/linux/reboot.h b/include/linux/reboot.h
76007index 23b3630..e1bc12b 100644
76008--- a/include/linux/reboot.h
76009+++ b/include/linux/reboot.h
76010@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
76011 * Architecture-specific implementations of sys_reboot commands.
76012 */
76013
76014-extern void machine_restart(char *cmd);
76015-extern void machine_halt(void);
76016-extern void machine_power_off(void);
76017+extern void machine_restart(char *cmd) __noreturn;
76018+extern void machine_halt(void) __noreturn;
76019+extern void machine_power_off(void) __noreturn;
76020
76021 extern void machine_shutdown(void);
76022 struct pt_regs;
76023@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
76024 */
76025
76026 extern void kernel_restart_prepare(char *cmd);
76027-extern void kernel_restart(char *cmd);
76028-extern void kernel_halt(void);
76029-extern void kernel_power_off(void);
76030+extern void kernel_restart(char *cmd) __noreturn;
76031+extern void kernel_halt(void) __noreturn;
76032+extern void kernel_power_off(void) __noreturn;
76033
76034 extern int C_A_D; /* for sysctl */
76035 void ctrl_alt_del(void);
76036@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
76037 * Emergency restart, callable from an interrupt handler.
76038 */
76039
76040-extern void emergency_restart(void);
76041+extern void emergency_restart(void) __noreturn;
76042 #include <asm/emergency-restart.h>
76043
76044 #endif /* _LINUX_REBOOT_H */
76045diff --git a/include/linux/regset.h b/include/linux/regset.h
76046index 8e0c9fe..ac4d221 100644
76047--- a/include/linux/regset.h
76048+++ b/include/linux/regset.h
76049@@ -161,7 +161,8 @@ struct user_regset {
76050 unsigned int align;
76051 unsigned int bias;
76052 unsigned int core_note_type;
76053-};
76054+} __do_const;
76055+typedef struct user_regset __no_const user_regset_no_const;
76056
76057 /**
76058 * struct user_regset_view - available regsets
76059diff --git a/include/linux/relay.h b/include/linux/relay.h
76060index d7c8359..818daf5 100644
76061--- a/include/linux/relay.h
76062+++ b/include/linux/relay.h
76063@@ -157,7 +157,7 @@ struct rchan_callbacks
76064 * The callback should return 0 if successful, negative if not.
76065 */
76066 int (*remove_buf_file)(struct dentry *dentry);
76067-};
76068+} __no_const;
76069
76070 /*
76071 * CONFIG_RELAY kernel API, kernel/relay.c
76072diff --git a/include/linux/rio.h b/include/linux/rio.h
76073index 18e0993..8ab5b21 100644
76074--- a/include/linux/rio.h
76075+++ b/include/linux/rio.h
76076@@ -345,7 +345,7 @@ struct rio_ops {
76077 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
76078 u64 rstart, u32 size, u32 flags);
76079 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
76080-};
76081+} __no_const;
76082
76083 #define RIO_RESOURCE_MEM 0x00000100
76084 #define RIO_RESOURCE_DOORBELL 0x00000200
76085diff --git a/include/linux/rmap.h b/include/linux/rmap.h
76086index 6dacb93..6174423 100644
76087--- a/include/linux/rmap.h
76088+++ b/include/linux/rmap.h
76089@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
76090 void anon_vma_init(void); /* create anon_vma_cachep */
76091 int anon_vma_prepare(struct vm_area_struct *);
76092 void unlink_anon_vmas(struct vm_area_struct *);
76093-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
76094-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
76095+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
76096+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
76097
76098 static inline void anon_vma_merge(struct vm_area_struct *vma,
76099 struct vm_area_struct *next)
76100diff --git a/include/linux/sched.h b/include/linux/sched.h
76101index 3aeb14b..73816a6 100644
76102--- a/include/linux/sched.h
76103+++ b/include/linux/sched.h
76104@@ -62,6 +62,7 @@ struct bio_list;
76105 struct fs_struct;
76106 struct perf_event_context;
76107 struct blk_plug;
76108+struct linux_binprm;
76109
76110 /*
76111 * List of flags we want to share for kernel threads,
76112@@ -303,7 +304,7 @@ extern char __sched_text_start[], __sched_text_end[];
76113 extern int in_sched_functions(unsigned long addr);
76114
76115 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
76116-extern signed long schedule_timeout(signed long timeout);
76117+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
76118 extern signed long schedule_timeout_interruptible(signed long timeout);
76119 extern signed long schedule_timeout_killable(signed long timeout);
76120 extern signed long schedule_timeout_uninterruptible(signed long timeout);
76121@@ -314,7 +315,19 @@ struct nsproxy;
76122 struct user_namespace;
76123
76124 #ifdef CONFIG_MMU
76125-extern unsigned long mmap_legacy_base(void);
76126+
76127+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
76128+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
76129+#else
76130+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
76131+{
76132+ return 0;
76133+}
76134+#endif
76135+
76136+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
76137+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
76138+extern unsigned long mmap_legacy_base(struct mm_struct *mm);
76139 extern void arch_pick_mmap_layout(struct mm_struct *mm);
76140 extern unsigned long
76141 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
76142@@ -592,6 +605,17 @@ struct signal_struct {
76143 #ifdef CONFIG_TASKSTATS
76144 struct taskstats *stats;
76145 #endif
76146+
76147+#ifdef CONFIG_GRKERNSEC
76148+ u32 curr_ip;
76149+ u32 saved_ip;
76150+ u32 gr_saddr;
76151+ u32 gr_daddr;
76152+ u16 gr_sport;
76153+ u16 gr_dport;
76154+ u8 used_accept:1;
76155+#endif
76156+
76157 #ifdef CONFIG_AUDIT
76158 unsigned audit_tty;
76159 unsigned audit_tty_log_passwd;
76160@@ -672,6 +696,14 @@ struct user_struct {
76161 struct key *session_keyring; /* UID's default session keyring */
76162 #endif
76163
76164+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
76165+ unsigned char kernel_banned;
76166+#endif
76167+#ifdef CONFIG_GRKERNSEC_BRUTE
76168+ unsigned char suid_banned;
76169+ unsigned long suid_ban_expires;
76170+#endif
76171+
76172 /* Hash table maintenance information */
76173 struct hlist_node uidhash_node;
76174 kuid_t uid;
76175@@ -1159,8 +1191,8 @@ struct task_struct {
76176 struct list_head thread_group;
76177
76178 struct completion *vfork_done; /* for vfork() */
76179- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
76180- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
76181+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
76182+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
76183
76184 cputime_t utime, stime, utimescaled, stimescaled;
76185 cputime_t gtime;
76186@@ -1185,11 +1217,6 @@ struct task_struct {
76187 struct task_cputime cputime_expires;
76188 struct list_head cpu_timers[3];
76189
76190-/* process credentials */
76191- const struct cred __rcu *real_cred; /* objective and real subjective task
76192- * credentials (COW) */
76193- const struct cred __rcu *cred; /* effective (overridable) subjective task
76194- * credentials (COW) */
76195 char comm[TASK_COMM_LEN]; /* executable name excluding path
76196 - access with [gs]et_task_comm (which lock
76197 it with task_lock())
76198@@ -1206,6 +1233,10 @@ struct task_struct {
76199 #endif
76200 /* CPU-specific state of this task */
76201 struct thread_struct thread;
76202+/* thread_info moved to task_struct */
76203+#ifdef CONFIG_X86
76204+ struct thread_info tinfo;
76205+#endif
76206 /* filesystem information */
76207 struct fs_struct *fs;
76208 /* open file information */
76209@@ -1279,6 +1310,10 @@ struct task_struct {
76210 gfp_t lockdep_reclaim_gfp;
76211 #endif
76212
76213+/* process credentials */
76214+ const struct cred __rcu *real_cred; /* objective and real subjective task
76215+ * credentials (COW) */
76216+
76217 /* journalling filesystem info */
76218 void *journal_info;
76219
76220@@ -1317,6 +1352,10 @@ struct task_struct {
76221 /* cg_list protected by css_set_lock and tsk->alloc_lock */
76222 struct list_head cg_list;
76223 #endif
76224+
76225+ const struct cred __rcu *cred; /* effective (overridable) subjective task
76226+ * credentials (COW) */
76227+
76228 #ifdef CONFIG_FUTEX
76229 struct robust_list_head __user *robust_list;
76230 #ifdef CONFIG_COMPAT
76231@@ -1417,8 +1456,76 @@ struct task_struct {
76232 unsigned int sequential_io;
76233 unsigned int sequential_io_avg;
76234 #endif
76235+
76236+#ifdef CONFIG_GRKERNSEC
76237+ /* grsecurity */
76238+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76239+ u64 exec_id;
76240+#endif
76241+#ifdef CONFIG_GRKERNSEC_SETXID
76242+ const struct cred *delayed_cred;
76243+#endif
76244+ struct dentry *gr_chroot_dentry;
76245+ struct acl_subject_label *acl;
76246+ struct acl_role_label *role;
76247+ struct file *exec_file;
76248+ unsigned long brute_expires;
76249+ u16 acl_role_id;
76250+ /* is this the task that authenticated to the special role */
76251+ u8 acl_sp_role;
76252+ u8 is_writable;
76253+ u8 brute;
76254+ u8 gr_is_chrooted;
76255+#endif
76256+
76257 };
76258
76259+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
76260+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
76261+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
76262+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
76263+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
76264+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
76265+
76266+#ifdef CONFIG_PAX_SOFTMODE
76267+extern int pax_softmode;
76268+#endif
76269+
76270+extern int pax_check_flags(unsigned long *);
76271+
76272+/* if tsk != current then task_lock must be held on it */
76273+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
76274+static inline unsigned long pax_get_flags(struct task_struct *tsk)
76275+{
76276+ if (likely(tsk->mm))
76277+ return tsk->mm->pax_flags;
76278+ else
76279+ return 0UL;
76280+}
76281+
76282+/* if tsk != current then task_lock must be held on it */
76283+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
76284+{
76285+ if (likely(tsk->mm)) {
76286+ tsk->mm->pax_flags = flags;
76287+ return 0;
76288+ }
76289+ return -EINVAL;
76290+}
76291+#endif
76292+
76293+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76294+extern void pax_set_initial_flags(struct linux_binprm *bprm);
76295+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
76296+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
76297+#endif
76298+
76299+struct path;
76300+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
76301+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
76302+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
76303+extern void pax_report_refcount_overflow(struct pt_regs *regs);
76304+
76305 /* Future-safe accessor for struct task_struct's cpus_allowed. */
76306 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
76307
76308@@ -1477,7 +1584,7 @@ struct pid_namespace;
76309 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
76310 struct pid_namespace *ns);
76311
76312-static inline pid_t task_pid_nr(struct task_struct *tsk)
76313+static inline pid_t task_pid_nr(const struct task_struct *tsk)
76314 {
76315 return tsk->pid;
76316 }
76317@@ -1920,7 +2027,9 @@ void yield(void);
76318 extern struct exec_domain default_exec_domain;
76319
76320 union thread_union {
76321+#ifndef CONFIG_X86
76322 struct thread_info thread_info;
76323+#endif
76324 unsigned long stack[THREAD_SIZE/sizeof(long)];
76325 };
76326
76327@@ -1953,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
76328 */
76329
76330 extern struct task_struct *find_task_by_vpid(pid_t nr);
76331+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
76332 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
76333 struct pid_namespace *ns);
76334
76335@@ -2119,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
76336 extern void exit_itimers(struct signal_struct *);
76337 extern void flush_itimer_signals(void);
76338
76339-extern void do_group_exit(int);
76340+extern __noreturn void do_group_exit(int);
76341
76342 extern int allow_signal(int);
76343 extern int disallow_signal(int);
76344@@ -2310,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
76345
76346 #endif
76347
76348-static inline int object_is_on_stack(void *obj)
76349+static inline int object_starts_on_stack(void *obj)
76350 {
76351- void *stack = task_stack_page(current);
76352+ const void *stack = task_stack_page(current);
76353
76354 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
76355 }
76356diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
76357index bf8086b..962b035 100644
76358--- a/include/linux/sched/sysctl.h
76359+++ b/include/linux/sched/sysctl.h
76360@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
76361 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
76362
76363 extern int sysctl_max_map_count;
76364+extern unsigned long sysctl_heap_stack_gap;
76365
76366 extern unsigned int sysctl_sched_latency;
76367 extern unsigned int sysctl_sched_min_granularity;
76368diff --git a/include/linux/security.h b/include/linux/security.h
76369index 4686491..2bd210e 100644
76370--- a/include/linux/security.h
76371+++ b/include/linux/security.h
76372@@ -26,6 +26,7 @@
76373 #include <linux/capability.h>
76374 #include <linux/slab.h>
76375 #include <linux/err.h>
76376+#include <linux/grsecurity.h>
76377
76378 struct linux_binprm;
76379 struct cred;
76380diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
76381index 2da29ac..aac448ec 100644
76382--- a/include/linux/seq_file.h
76383+++ b/include/linux/seq_file.h
76384@@ -26,6 +26,9 @@ struct seq_file {
76385 struct mutex lock;
76386 const struct seq_operations *op;
76387 int poll_event;
76388+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
76389+ u64 exec_id;
76390+#endif
76391 #ifdef CONFIG_USER_NS
76392 struct user_namespace *user_ns;
76393 #endif
76394@@ -38,6 +41,7 @@ struct seq_operations {
76395 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
76396 int (*show) (struct seq_file *m, void *v);
76397 };
76398+typedef struct seq_operations __no_const seq_operations_no_const;
76399
76400 #define SEQ_SKIP 1
76401
76402diff --git a/include/linux/shm.h b/include/linux/shm.h
76403index 429c199..4d42e38 100644
76404--- a/include/linux/shm.h
76405+++ b/include/linux/shm.h
76406@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
76407
76408 /* The task created the shm object. NULL if the task is dead. */
76409 struct task_struct *shm_creator;
76410+#ifdef CONFIG_GRKERNSEC
76411+ time_t shm_createtime;
76412+ pid_t shm_lapid;
76413+#endif
76414 };
76415
76416 /* shm_mode upper byte flags */
76417diff --git a/include/linux/signal.h b/include/linux/signal.h
76418index d897484..323ba98 100644
76419--- a/include/linux/signal.h
76420+++ b/include/linux/signal.h
76421@@ -433,6 +433,7 @@ void signals_init(void);
76422
76423 int restore_altstack(const stack_t __user *);
76424 int __save_altstack(stack_t __user *, unsigned long);
76425+void __save_altstack_ex(stack_t __user *, unsigned long);
76426
76427 #ifdef CONFIG_PROC_FS
76428 struct seq_file;
76429diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
76430index dec1748..112c1f9 100644
76431--- a/include/linux/skbuff.h
76432+++ b/include/linux/skbuff.h
76433@@ -640,7 +640,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
76434 extern struct sk_buff *__alloc_skb(unsigned int size,
76435 gfp_t priority, int flags, int node);
76436 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
76437-static inline struct sk_buff *alloc_skb(unsigned int size,
76438+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
76439 gfp_t priority)
76440 {
76441 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
76442@@ -756,7 +756,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
76443 */
76444 static inline int skb_queue_empty(const struct sk_buff_head *list)
76445 {
76446- return list->next == (struct sk_buff *)list;
76447+ return list->next == (const struct sk_buff *)list;
76448 }
76449
76450 /**
76451@@ -769,7 +769,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
76452 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
76453 const struct sk_buff *skb)
76454 {
76455- return skb->next == (struct sk_buff *)list;
76456+ return skb->next == (const struct sk_buff *)list;
76457 }
76458
76459 /**
76460@@ -782,7 +782,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
76461 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
76462 const struct sk_buff *skb)
76463 {
76464- return skb->prev == (struct sk_buff *)list;
76465+ return skb->prev == (const struct sk_buff *)list;
76466 }
76467
76468 /**
76469@@ -1848,7 +1848,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
76470 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
76471 */
76472 #ifndef NET_SKB_PAD
76473-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
76474+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
76475 #endif
76476
76477 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
76478@@ -2443,7 +2443,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
76479 int noblock, int *err);
76480 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
76481 struct poll_table_struct *wait);
76482-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
76483+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
76484 int offset, struct iovec *to,
76485 int size);
76486 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
76487@@ -2733,6 +2733,9 @@ static inline void nf_reset(struct sk_buff *skb)
76488 nf_bridge_put(skb->nf_bridge);
76489 skb->nf_bridge = NULL;
76490 #endif
76491+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
76492+ skb->nf_trace = 0;
76493+#endif
76494 }
76495
76496 static inline void nf_reset_trace(struct sk_buff *skb)
76497diff --git a/include/linux/slab.h b/include/linux/slab.h
76498index 0c62175..f016ac1 100644
76499--- a/include/linux/slab.h
76500+++ b/include/linux/slab.h
76501@@ -12,15 +12,29 @@
76502 #include <linux/gfp.h>
76503 #include <linux/types.h>
76504 #include <linux/workqueue.h>
76505-
76506+#include <linux/err.h>
76507
76508 /*
76509 * Flags to pass to kmem_cache_create().
76510 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
76511 */
76512 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
76513+
76514+#ifdef CONFIG_PAX_USERCOPY_SLABS
76515+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
76516+#else
76517+#define SLAB_USERCOPY 0x00000000UL
76518+#endif
76519+
76520 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
76521 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
76522+
76523+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76524+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
76525+#else
76526+#define SLAB_NO_SANITIZE 0x00000000UL
76527+#endif
76528+
76529 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
76530 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
76531 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
76532@@ -89,10 +103,13 @@
76533 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
76534 * Both make kfree a no-op.
76535 */
76536-#define ZERO_SIZE_PTR ((void *)16)
76537+#define ZERO_SIZE_PTR \
76538+({ \
76539+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
76540+ (void *)(-MAX_ERRNO-1L); \
76541+})
76542
76543-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
76544- (unsigned long)ZERO_SIZE_PTR)
76545+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
76546
76547
76548 struct mem_cgroup;
76549@@ -132,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
76550 void kfree(const void *);
76551 void kzfree(const void *);
76552 size_t ksize(const void *);
76553+const char *check_heap_object(const void *ptr, unsigned long n);
76554+bool is_usercopy_object(const void *ptr);
76555
76556 /*
76557 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
76558@@ -164,7 +183,7 @@ struct kmem_cache {
76559 unsigned int align; /* Alignment as calculated */
76560 unsigned long flags; /* Active flags on the slab */
76561 const char *name; /* Slab name for sysfs */
76562- int refcount; /* Use counter */
76563+ atomic_t refcount; /* Use counter */
76564 void (*ctor)(void *); /* Called on object slot creation */
76565 struct list_head list; /* List of all slab caches on the system */
76566 };
76567@@ -226,6 +245,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
76568 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
76569 #endif
76570
76571+#ifdef CONFIG_PAX_USERCOPY_SLABS
76572+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
76573+#endif
76574+
76575 /*
76576 * Figure out which kmalloc slab an allocation of a certain size
76577 * belongs to.
76578@@ -234,7 +257,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
76579 * 2 = 120 .. 192 bytes
76580 * n = 2^(n-1) .. 2^n -1
76581 */
76582-static __always_inline int kmalloc_index(size_t size)
76583+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
76584 {
76585 if (!size)
76586 return 0;
76587@@ -406,6 +429,7 @@ void print_slabinfo_header(struct seq_file *m);
76588 * for general use, and so are not documented here. For a full list of
76589 * potential flags, always refer to linux/gfp.h.
76590 */
76591+
76592 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
76593 {
76594 if (size != 0 && n > SIZE_MAX / size)
76595@@ -465,7 +489,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
76596 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
76597 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
76598 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
76599-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
76600+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
76601 #define kmalloc_track_caller(size, flags) \
76602 __kmalloc_track_caller(size, flags, _RET_IP_)
76603 #else
76604@@ -485,7 +509,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
76605 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
76606 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
76607 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
76608-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
76609+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
76610 #define kmalloc_node_track_caller(size, flags, node) \
76611 __kmalloc_node_track_caller(size, flags, node, \
76612 _RET_IP_)
76613diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
76614index cd40158..4e2f7af 100644
76615--- a/include/linux/slab_def.h
76616+++ b/include/linux/slab_def.h
76617@@ -50,7 +50,7 @@ struct kmem_cache {
76618 /* 4) cache creation/removal */
76619 const char *name;
76620 struct list_head list;
76621- int refcount;
76622+ atomic_t refcount;
76623 int object_size;
76624 int align;
76625
76626@@ -66,10 +66,14 @@ struct kmem_cache {
76627 unsigned long node_allocs;
76628 unsigned long node_frees;
76629 unsigned long node_overflow;
76630- atomic_t allochit;
76631- atomic_t allocmiss;
76632- atomic_t freehit;
76633- atomic_t freemiss;
76634+ atomic_unchecked_t allochit;
76635+ atomic_unchecked_t allocmiss;
76636+ atomic_unchecked_t freehit;
76637+ atomic_unchecked_t freemiss;
76638+#ifdef CONFIG_PAX_MEMORY_SANITIZE
76639+ atomic_unchecked_t sanitized;
76640+ atomic_unchecked_t not_sanitized;
76641+#endif
76642
76643 /*
76644 * If debugging is enabled, then the allocator can add additional
76645@@ -103,7 +107,7 @@ struct kmem_cache {
76646 };
76647
76648 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
76649-void *__kmalloc(size_t size, gfp_t flags);
76650+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
76651
76652 #ifdef CONFIG_TRACING
76653 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
76654@@ -136,6 +140,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76655 cachep = kmalloc_dma_caches[i];
76656 else
76657 #endif
76658+
76659+#ifdef CONFIG_PAX_USERCOPY_SLABS
76660+ if (flags & GFP_USERCOPY)
76661+ cachep = kmalloc_usercopy_caches[i];
76662+ else
76663+#endif
76664+
76665 cachep = kmalloc_caches[i];
76666
76667 ret = kmem_cache_alloc_trace(cachep, flags, size);
76668@@ -146,7 +157,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76669 }
76670
76671 #ifdef CONFIG_NUMA
76672-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
76673+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76674 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
76675
76676 #ifdef CONFIG_TRACING
76677@@ -185,6 +196,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76678 cachep = kmalloc_dma_caches[i];
76679 else
76680 #endif
76681+
76682+#ifdef CONFIG_PAX_USERCOPY_SLABS
76683+ if (flags & GFP_USERCOPY)
76684+ cachep = kmalloc_usercopy_caches[i];
76685+ else
76686+#endif
76687+
76688 cachep = kmalloc_caches[i];
76689
76690 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
76691diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
76692index f28e14a..7831211 100644
76693--- a/include/linux/slob_def.h
76694+++ b/include/linux/slob_def.h
76695@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
76696 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
76697 }
76698
76699-void *__kmalloc_node(size_t size, gfp_t flags, int node);
76700+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76701
76702 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
76703 {
76704@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76705 return __kmalloc_node(size, flags, NUMA_NO_NODE);
76706 }
76707
76708-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
76709+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
76710 {
76711 return kmalloc(size, flags);
76712 }
76713diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
76714index 027276f..092bfe8 100644
76715--- a/include/linux/slub_def.h
76716+++ b/include/linux/slub_def.h
76717@@ -80,7 +80,7 @@ struct kmem_cache {
76718 struct kmem_cache_order_objects max;
76719 struct kmem_cache_order_objects min;
76720 gfp_t allocflags; /* gfp flags to use on each alloc */
76721- int refcount; /* Refcount for slab cache destroy */
76722+ atomic_t refcount; /* Refcount for slab cache destroy */
76723 void (*ctor)(void *);
76724 int inuse; /* Offset to metadata */
76725 int align; /* Alignment */
76726@@ -105,7 +105,7 @@ struct kmem_cache {
76727 };
76728
76729 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
76730-void *__kmalloc(size_t size, gfp_t flags);
76731+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
76732
76733 static __always_inline void *
76734 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
76735@@ -149,7 +149,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
76736 }
76737 #endif
76738
76739-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
76740+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
76741 {
76742 unsigned int order = get_order(size);
76743 return kmalloc_order_trace(size, flags, order);
76744@@ -175,7 +175,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
76745 }
76746
76747 #ifdef CONFIG_NUMA
76748-void *__kmalloc_node(size_t size, gfp_t flags, int node);
76749+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
76750 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
76751
76752 #ifdef CONFIG_TRACING
76753diff --git a/include/linux/smp.h b/include/linux/smp.h
76754index c848876..11e8a84 100644
76755--- a/include/linux/smp.h
76756+++ b/include/linux/smp.h
76757@@ -221,7 +221,9 @@ static inline void kick_all_cpus_sync(void) { }
76758 #endif
76759
76760 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
76761+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
76762 #define put_cpu() preempt_enable()
76763+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
76764
76765 /*
76766 * Callback to arch code if there's nosmp or maxcpus=0 on the
76767diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
76768index 54f91d3..be2c379 100644
76769--- a/include/linux/sock_diag.h
76770+++ b/include/linux/sock_diag.h
76771@@ -11,7 +11,7 @@ struct sock;
76772 struct sock_diag_handler {
76773 __u8 family;
76774 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
76775-};
76776+} __do_const;
76777
76778 int sock_diag_register(const struct sock_diag_handler *h);
76779 void sock_diag_unregister(const struct sock_diag_handler *h);
76780diff --git a/include/linux/sonet.h b/include/linux/sonet.h
76781index 680f9a3..f13aeb0 100644
76782--- a/include/linux/sonet.h
76783+++ b/include/linux/sonet.h
76784@@ -7,7 +7,7 @@
76785 #include <uapi/linux/sonet.h>
76786
76787 struct k_sonet_stats {
76788-#define __HANDLE_ITEM(i) atomic_t i
76789+#define __HANDLE_ITEM(i) atomic_unchecked_t i
76790 __SONET_ITEMS
76791 #undef __HANDLE_ITEM
76792 };
76793diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
76794index 07d8e53..dc934c9 100644
76795--- a/include/linux/sunrpc/addr.h
76796+++ b/include/linux/sunrpc/addr.h
76797@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
76798 {
76799 switch (sap->sa_family) {
76800 case AF_INET:
76801- return ntohs(((struct sockaddr_in *)sap)->sin_port);
76802+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
76803 case AF_INET6:
76804- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
76805+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
76806 }
76807 return 0;
76808 }
76809@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
76810 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
76811 const struct sockaddr *src)
76812 {
76813- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
76814+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
76815 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
76816
76817 dsin->sin_family = ssin->sin_family;
76818@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
76819 if (sa->sa_family != AF_INET6)
76820 return 0;
76821
76822- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
76823+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
76824 }
76825
76826 #endif /* _LINUX_SUNRPC_ADDR_H */
76827diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
76828index bfe11be..12bc8c4 100644
76829--- a/include/linux/sunrpc/clnt.h
76830+++ b/include/linux/sunrpc/clnt.h
76831@@ -96,7 +96,7 @@ struct rpc_procinfo {
76832 unsigned int p_timer; /* Which RTT timer to use */
76833 u32 p_statidx; /* Which procedure to account */
76834 const char * p_name; /* name of procedure */
76835-};
76836+} __do_const;
76837
76838 #ifdef __KERNEL__
76839
76840diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
76841index 1f0216b..6a4fa50 100644
76842--- a/include/linux/sunrpc/svc.h
76843+++ b/include/linux/sunrpc/svc.h
76844@@ -411,7 +411,7 @@ struct svc_procedure {
76845 unsigned int pc_count; /* call count */
76846 unsigned int pc_cachetype; /* cache info (NFS) */
76847 unsigned int pc_xdrressize; /* maximum size of XDR reply */
76848-};
76849+} __do_const;
76850
76851 /*
76852 * Function prototypes.
76853diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
76854index 0b8e3e6..33e0a01 100644
76855--- a/include/linux/sunrpc/svc_rdma.h
76856+++ b/include/linux/sunrpc/svc_rdma.h
76857@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
76858 extern unsigned int svcrdma_max_requests;
76859 extern unsigned int svcrdma_max_req_size;
76860
76861-extern atomic_t rdma_stat_recv;
76862-extern atomic_t rdma_stat_read;
76863-extern atomic_t rdma_stat_write;
76864-extern atomic_t rdma_stat_sq_starve;
76865-extern atomic_t rdma_stat_rq_starve;
76866-extern atomic_t rdma_stat_rq_poll;
76867-extern atomic_t rdma_stat_rq_prod;
76868-extern atomic_t rdma_stat_sq_poll;
76869-extern atomic_t rdma_stat_sq_prod;
76870+extern atomic_unchecked_t rdma_stat_recv;
76871+extern atomic_unchecked_t rdma_stat_read;
76872+extern atomic_unchecked_t rdma_stat_write;
76873+extern atomic_unchecked_t rdma_stat_sq_starve;
76874+extern atomic_unchecked_t rdma_stat_rq_starve;
76875+extern atomic_unchecked_t rdma_stat_rq_poll;
76876+extern atomic_unchecked_t rdma_stat_rq_prod;
76877+extern atomic_unchecked_t rdma_stat_sq_poll;
76878+extern atomic_unchecked_t rdma_stat_sq_prod;
76879
76880 #define RPCRDMA_VERSION 1
76881
76882diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
76883index ff374ab..7fd2ecb 100644
76884--- a/include/linux/sunrpc/svcauth.h
76885+++ b/include/linux/sunrpc/svcauth.h
76886@@ -109,7 +109,7 @@ struct auth_ops {
76887 int (*release)(struct svc_rqst *rq);
76888 void (*domain_release)(struct auth_domain *);
76889 int (*set_client)(struct svc_rqst *rq);
76890-};
76891+} __do_const;
76892
76893 #define SVC_GARBAGE 1
76894 #define SVC_SYSERR 2
76895diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
76896index a5ffd32..0935dea 100644
76897--- a/include/linux/swiotlb.h
76898+++ b/include/linux/swiotlb.h
76899@@ -60,7 +60,8 @@ extern void
76900
76901 extern void
76902 swiotlb_free_coherent(struct device *hwdev, size_t size,
76903- void *vaddr, dma_addr_t dma_handle);
76904+ void *vaddr, dma_addr_t dma_handle,
76905+ struct dma_attrs *attrs);
76906
76907 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
76908 unsigned long offset, size_t size,
76909diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
76910index 84662ec..d8f8adb 100644
76911--- a/include/linux/syscalls.h
76912+++ b/include/linux/syscalls.h
76913@@ -97,8 +97,12 @@ struct sigaltstack;
76914 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
76915
76916 #define __SC_DECL(t, a) t a
76917-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
76918-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
76919+#define __TYPE_IS_SL(t) (__same_type((t)0, 0L))
76920+#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
76921+#define __TYPE_IS_SLL(t) (__same_type((t)0, 0LL))
76922+#define __TYPE_IS_ULL(t) (__same_type((t)0, 0ULL))
76923+#define __TYPE_IS_LL(t) (__TYPE_IS_SLL(t) || __TYPE_IS_ULL(t))
76924+#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), __builtin_choose_expr(__TYPE_IS_ULL(t), 0ULL, 0LL), __builtin_choose_expr(__TYPE_IS_UL(t), 0UL, 0L))) a
76925 #define __SC_CAST(t, a) (t) a
76926 #define __SC_ARGS(t, a) a
76927 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
76928@@ -362,11 +366,11 @@ asmlinkage long sys_sync(void);
76929 asmlinkage long sys_fsync(unsigned int fd);
76930 asmlinkage long sys_fdatasync(unsigned int fd);
76931 asmlinkage long sys_bdflush(int func, long data);
76932-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
76933- char __user *type, unsigned long flags,
76934+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
76935+ const char __user *type, unsigned long flags,
76936 void __user *data);
76937-asmlinkage long sys_umount(char __user *name, int flags);
76938-asmlinkage long sys_oldumount(char __user *name);
76939+asmlinkage long sys_umount(const char __user *name, int flags);
76940+asmlinkage long sys_oldumount(const char __user *name);
76941 asmlinkage long sys_truncate(const char __user *path, long length);
76942 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
76943 asmlinkage long sys_stat(const char __user *filename,
76944@@ -578,7 +582,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
76945 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
76946 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
76947 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
76948- struct sockaddr __user *, int);
76949+ struct sockaddr __user *, int) __intentional_overflow(0);
76950 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
76951 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
76952 unsigned int vlen, unsigned flags);
76953diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
76954index 27b3b0b..e093dd9 100644
76955--- a/include/linux/syscore_ops.h
76956+++ b/include/linux/syscore_ops.h
76957@@ -16,7 +16,7 @@ struct syscore_ops {
76958 int (*suspend)(void);
76959 void (*resume)(void);
76960 void (*shutdown)(void);
76961-};
76962+} __do_const;
76963
76964 extern void register_syscore_ops(struct syscore_ops *ops);
76965 extern void unregister_syscore_ops(struct syscore_ops *ops);
76966diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
76967index 14a8ff2..af52bad 100644
76968--- a/include/linux/sysctl.h
76969+++ b/include/linux/sysctl.h
76970@@ -34,13 +34,13 @@ struct ctl_table_root;
76971 struct ctl_table_header;
76972 struct ctl_dir;
76973
76974-typedef struct ctl_table ctl_table;
76975-
76976 typedef int proc_handler (struct ctl_table *ctl, int write,
76977 void __user *buffer, size_t *lenp, loff_t *ppos);
76978
76979 extern int proc_dostring(struct ctl_table *, int,
76980 void __user *, size_t *, loff_t *);
76981+extern int proc_dostring_modpriv(struct ctl_table *, int,
76982+ void __user *, size_t *, loff_t *);
76983 extern int proc_dointvec(struct ctl_table *, int,
76984 void __user *, size_t *, loff_t *);
76985 extern int proc_dointvec_minmax(struct ctl_table *, int,
76986@@ -115,7 +115,9 @@ struct ctl_table
76987 struct ctl_table_poll *poll;
76988 void *extra1;
76989 void *extra2;
76990-};
76991+} __do_const;
76992+typedef struct ctl_table __no_const ctl_table_no_const;
76993+typedef struct ctl_table ctl_table;
76994
76995 struct ctl_node {
76996 struct rb_node node;
76997diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
76998index e2cee22..3ddb921 100644
76999--- a/include/linux/sysfs.h
77000+++ b/include/linux/sysfs.h
77001@@ -31,7 +31,8 @@ struct attribute {
77002 struct lock_class_key *key;
77003 struct lock_class_key skey;
77004 #endif
77005-};
77006+} __do_const;
77007+typedef struct attribute __no_const attribute_no_const;
77008
77009 /**
77010 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
77011@@ -59,8 +60,8 @@ struct attribute_group {
77012 umode_t (*is_visible)(struct kobject *,
77013 struct attribute *, int);
77014 struct attribute **attrs;
77015-};
77016-
77017+} __do_const;
77018+typedef struct attribute_group __no_const attribute_group_no_const;
77019
77020
77021 /**
77022@@ -107,7 +108,8 @@ struct bin_attribute {
77023 char *, loff_t, size_t);
77024 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
77025 struct vm_area_struct *vma);
77026-};
77027+} __do_const;
77028+typedef struct bin_attribute __no_const bin_attribute_no_const;
77029
77030 /**
77031 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
77032diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
77033index 7faf933..9b85a0c 100644
77034--- a/include/linux/sysrq.h
77035+++ b/include/linux/sysrq.h
77036@@ -16,6 +16,7 @@
77037
77038 #include <linux/errno.h>
77039 #include <linux/types.h>
77040+#include <linux/compiler.h>
77041
77042 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
77043 #define SYSRQ_DEFAULT_ENABLE 1
77044@@ -36,7 +37,7 @@ struct sysrq_key_op {
77045 char *help_msg;
77046 char *action_msg;
77047 int enable_mask;
77048-};
77049+} __do_const;
77050
77051 #ifdef CONFIG_MAGIC_SYSRQ
77052
77053diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
77054index e7e0473..7989295 100644
77055--- a/include/linux/thread_info.h
77056+++ b/include/linux/thread_info.h
77057@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
77058 #error "no set_restore_sigmask() provided and default one won't work"
77059 #endif
77060
77061+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
77062+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
77063+{
77064+#ifndef CONFIG_PAX_USERCOPY_DEBUG
77065+ if (!__builtin_constant_p(n))
77066+#endif
77067+ __check_object_size(ptr, n, to_user);
77068+}
77069+
77070 #endif /* __KERNEL__ */
77071
77072 #endif /* _LINUX_THREAD_INFO_H */
77073diff --git a/include/linux/tty.h b/include/linux/tty.h
77074index 8780bd2..d1ae08b 100644
77075--- a/include/linux/tty.h
77076+++ b/include/linux/tty.h
77077@@ -194,7 +194,7 @@ struct tty_port {
77078 const struct tty_port_operations *ops; /* Port operations */
77079 spinlock_t lock; /* Lock protecting tty field */
77080 int blocked_open; /* Waiting to open */
77081- int count; /* Usage count */
77082+ atomic_t count; /* Usage count */
77083 wait_queue_head_t open_wait; /* Open waiters */
77084 wait_queue_head_t close_wait; /* Close waiters */
77085 wait_queue_head_t delta_msr_wait; /* Modem status change */
77086@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
77087 struct tty_struct *tty, struct file *filp);
77088 static inline int tty_port_users(struct tty_port *port)
77089 {
77090- return port->count + port->blocked_open;
77091+ return atomic_read(&port->count) + port->blocked_open;
77092 }
77093
77094 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
77095diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
77096index 756a609..b302dd6 100644
77097--- a/include/linux/tty_driver.h
77098+++ b/include/linux/tty_driver.h
77099@@ -285,7 +285,7 @@ struct tty_operations {
77100 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
77101 #endif
77102 const struct file_operations *proc_fops;
77103-};
77104+} __do_const;
77105
77106 struct tty_driver {
77107 int magic; /* magic number for this structure */
77108diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
77109index 58390c7..95e214c 100644
77110--- a/include/linux/tty_ldisc.h
77111+++ b/include/linux/tty_ldisc.h
77112@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
77113
77114 struct module *owner;
77115
77116- int refcount;
77117+ atomic_t refcount;
77118 };
77119
77120 struct tty_ldisc {
77121diff --git a/include/linux/types.h b/include/linux/types.h
77122index 4d118ba..c3ee9bf 100644
77123--- a/include/linux/types.h
77124+++ b/include/linux/types.h
77125@@ -176,10 +176,26 @@ typedef struct {
77126 int counter;
77127 } atomic_t;
77128
77129+#ifdef CONFIG_PAX_REFCOUNT
77130+typedef struct {
77131+ int counter;
77132+} atomic_unchecked_t;
77133+#else
77134+typedef atomic_t atomic_unchecked_t;
77135+#endif
77136+
77137 #ifdef CONFIG_64BIT
77138 typedef struct {
77139 long counter;
77140 } atomic64_t;
77141+
77142+#ifdef CONFIG_PAX_REFCOUNT
77143+typedef struct {
77144+ long counter;
77145+} atomic64_unchecked_t;
77146+#else
77147+typedef atomic64_t atomic64_unchecked_t;
77148+#endif
77149 #endif
77150
77151 struct list_head {
77152diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
77153index 5ca0951..ab496a5 100644
77154--- a/include/linux/uaccess.h
77155+++ b/include/linux/uaccess.h
77156@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
77157 long ret; \
77158 mm_segment_t old_fs = get_fs(); \
77159 \
77160- set_fs(KERNEL_DS); \
77161 pagefault_disable(); \
77162- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
77163- pagefault_enable(); \
77164+ set_fs(KERNEL_DS); \
77165+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
77166 set_fs(old_fs); \
77167+ pagefault_enable(); \
77168 ret; \
77169 })
77170
77171diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
77172index 8e522cbc..aa8572d 100644
77173--- a/include/linux/uidgid.h
77174+++ b/include/linux/uidgid.h
77175@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
77176
77177 #endif /* CONFIG_USER_NS */
77178
77179+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
77180+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
77181+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
77182+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
77183+
77184 #endif /* _LINUX_UIDGID_H */
77185diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
77186index 99c1b4d..562e6f3 100644
77187--- a/include/linux/unaligned/access_ok.h
77188+++ b/include/linux/unaligned/access_ok.h
77189@@ -4,34 +4,34 @@
77190 #include <linux/kernel.h>
77191 #include <asm/byteorder.h>
77192
77193-static inline u16 get_unaligned_le16(const void *p)
77194+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
77195 {
77196- return le16_to_cpup((__le16 *)p);
77197+ return le16_to_cpup((const __le16 *)p);
77198 }
77199
77200-static inline u32 get_unaligned_le32(const void *p)
77201+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
77202 {
77203- return le32_to_cpup((__le32 *)p);
77204+ return le32_to_cpup((const __le32 *)p);
77205 }
77206
77207-static inline u64 get_unaligned_le64(const void *p)
77208+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
77209 {
77210- return le64_to_cpup((__le64 *)p);
77211+ return le64_to_cpup((const __le64 *)p);
77212 }
77213
77214-static inline u16 get_unaligned_be16(const void *p)
77215+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
77216 {
77217- return be16_to_cpup((__be16 *)p);
77218+ return be16_to_cpup((const __be16 *)p);
77219 }
77220
77221-static inline u32 get_unaligned_be32(const void *p)
77222+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
77223 {
77224- return be32_to_cpup((__be32 *)p);
77225+ return be32_to_cpup((const __be32 *)p);
77226 }
77227
77228-static inline u64 get_unaligned_be64(const void *p)
77229+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
77230 {
77231- return be64_to_cpup((__be64 *)p);
77232+ return be64_to_cpup((const __be64 *)p);
77233 }
77234
77235 static inline void put_unaligned_le16(u16 val, void *p)
77236diff --git a/include/linux/usb.h b/include/linux/usb.h
77237index a0bee5a..5533a52 100644
77238--- a/include/linux/usb.h
77239+++ b/include/linux/usb.h
77240@@ -552,7 +552,7 @@ struct usb_device {
77241 int maxchild;
77242
77243 u32 quirks;
77244- atomic_t urbnum;
77245+ atomic_unchecked_t urbnum;
77246
77247 unsigned long active_duration;
77248
77249@@ -1607,7 +1607,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
77250
77251 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
77252 __u8 request, __u8 requesttype, __u16 value, __u16 index,
77253- void *data, __u16 size, int timeout);
77254+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
77255 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
77256 void *data, int len, int *actual_length, int timeout);
77257 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
77258diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
77259index e452ba6..78f8e80 100644
77260--- a/include/linux/usb/renesas_usbhs.h
77261+++ b/include/linux/usb/renesas_usbhs.h
77262@@ -39,7 +39,7 @@ enum {
77263 */
77264 struct renesas_usbhs_driver_callback {
77265 int (*notify_hotplug)(struct platform_device *pdev);
77266-};
77267+} __no_const;
77268
77269 /*
77270 * callback functions for platform
77271diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
77272index 6f8fbcf..8259001 100644
77273--- a/include/linux/vermagic.h
77274+++ b/include/linux/vermagic.h
77275@@ -25,9 +25,35 @@
77276 #define MODULE_ARCH_VERMAGIC ""
77277 #endif
77278
77279+#ifdef CONFIG_PAX_REFCOUNT
77280+#define MODULE_PAX_REFCOUNT "REFCOUNT "
77281+#else
77282+#define MODULE_PAX_REFCOUNT ""
77283+#endif
77284+
77285+#ifdef CONSTIFY_PLUGIN
77286+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
77287+#else
77288+#define MODULE_CONSTIFY_PLUGIN ""
77289+#endif
77290+
77291+#ifdef STACKLEAK_PLUGIN
77292+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
77293+#else
77294+#define MODULE_STACKLEAK_PLUGIN ""
77295+#endif
77296+
77297+#ifdef CONFIG_GRKERNSEC
77298+#define MODULE_GRSEC "GRSEC "
77299+#else
77300+#define MODULE_GRSEC ""
77301+#endif
77302+
77303 #define VERMAGIC_STRING \
77304 UTS_RELEASE " " \
77305 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
77306 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
77307- MODULE_ARCH_VERMAGIC
77308+ MODULE_ARCH_VERMAGIC \
77309+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
77310+ MODULE_GRSEC
77311
77312diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
77313index 7d5773a..541c01c 100644
77314--- a/include/linux/vmalloc.h
77315+++ b/include/linux/vmalloc.h
77316@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
77317 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
77318 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
77319 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
77320+
77321+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
77322+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
77323+#endif
77324+
77325 /* bits [20..32] reserved for arch specific ioremap internals */
77326
77327 /*
77328@@ -75,7 +80,7 @@ extern void *vmalloc_32_user(unsigned long size);
77329 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
77330 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
77331 unsigned long start, unsigned long end, gfp_t gfp_mask,
77332- pgprot_t prot, int node, const void *caller);
77333+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
77334 extern void vfree(const void *addr);
77335
77336 extern void *vmap(struct page **pages, unsigned int count,
77337@@ -137,8 +142,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
77338 extern void free_vm_area(struct vm_struct *area);
77339
77340 /* for /dev/kmem */
77341-extern long vread(char *buf, char *addr, unsigned long count);
77342-extern long vwrite(char *buf, char *addr, unsigned long count);
77343+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
77344+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
77345
77346 /*
77347 * Internals. Dont't use..
77348diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
77349index c586679..f06b389 100644
77350--- a/include/linux/vmstat.h
77351+++ b/include/linux/vmstat.h
77352@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
77353 /*
77354 * Zone based page accounting with per cpu differentials.
77355 */
77356-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77357+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77358
77359 static inline void zone_page_state_add(long x, struct zone *zone,
77360 enum zone_stat_item item)
77361 {
77362- atomic_long_add(x, &zone->vm_stat[item]);
77363- atomic_long_add(x, &vm_stat[item]);
77364+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
77365+ atomic_long_add_unchecked(x, &vm_stat[item]);
77366 }
77367
77368 static inline unsigned long global_page_state(enum zone_stat_item item)
77369 {
77370- long x = atomic_long_read(&vm_stat[item]);
77371+ long x = atomic_long_read_unchecked(&vm_stat[item]);
77372 #ifdef CONFIG_SMP
77373 if (x < 0)
77374 x = 0;
77375@@ -112,7 +112,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
77376 static inline unsigned long zone_page_state(struct zone *zone,
77377 enum zone_stat_item item)
77378 {
77379- long x = atomic_long_read(&zone->vm_stat[item]);
77380+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
77381 #ifdef CONFIG_SMP
77382 if (x < 0)
77383 x = 0;
77384@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
77385 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
77386 enum zone_stat_item item)
77387 {
77388- long x = atomic_long_read(&zone->vm_stat[item]);
77389+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
77390
77391 #ifdef CONFIG_SMP
77392 int cpu;
77393@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
77394
77395 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
77396 {
77397- atomic_long_inc(&zone->vm_stat[item]);
77398- atomic_long_inc(&vm_stat[item]);
77399+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
77400+ atomic_long_inc_unchecked(&vm_stat[item]);
77401 }
77402
77403 static inline void __inc_zone_page_state(struct page *page,
77404@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
77405
77406 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
77407 {
77408- atomic_long_dec(&zone->vm_stat[item]);
77409- atomic_long_dec(&vm_stat[item]);
77410+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
77411+ atomic_long_dec_unchecked(&vm_stat[item]);
77412 }
77413
77414 static inline void __dec_zone_page_state(struct page *page,
77415diff --git a/include/linux/xattr.h b/include/linux/xattr.h
77416index fdbafc6..49dfe4f 100644
77417--- a/include/linux/xattr.h
77418+++ b/include/linux/xattr.h
77419@@ -28,7 +28,7 @@ struct xattr_handler {
77420 size_t size, int handler_flags);
77421 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
77422 size_t size, int flags, int handler_flags);
77423-};
77424+} __do_const;
77425
77426 struct xattr {
77427 char *name;
77428@@ -37,6 +37,9 @@ struct xattr {
77429 };
77430
77431 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
77432+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
77433+ssize_t pax_getxattr(struct dentry *, void *, size_t);
77434+#endif
77435 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
77436 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
77437 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
77438diff --git a/include/linux/zlib.h b/include/linux/zlib.h
77439index 9c5a6b4..09c9438 100644
77440--- a/include/linux/zlib.h
77441+++ b/include/linux/zlib.h
77442@@ -31,6 +31,7 @@
77443 #define _ZLIB_H
77444
77445 #include <linux/zconf.h>
77446+#include <linux/compiler.h>
77447
77448 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
77449 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
77450@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
77451
77452 /* basic functions */
77453
77454-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
77455+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
77456 /*
77457 Returns the number of bytes that needs to be allocated for a per-
77458 stream workspace with the specified parameters. A pointer to this
77459diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
77460index 95d1c91..6798cca 100644
77461--- a/include/media/v4l2-dev.h
77462+++ b/include/media/v4l2-dev.h
77463@@ -76,7 +76,7 @@ struct v4l2_file_operations {
77464 int (*mmap) (struct file *, struct vm_area_struct *);
77465 int (*open) (struct file *);
77466 int (*release) (struct file *);
77467-};
77468+} __do_const;
77469
77470 /*
77471 * Newer version of video_device, handled by videodev2.c
77472diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
77473index adcbb20..62c2559 100644
77474--- a/include/net/9p/transport.h
77475+++ b/include/net/9p/transport.h
77476@@ -57,7 +57,7 @@ struct p9_trans_module {
77477 int (*cancel) (struct p9_client *, struct p9_req_t *req);
77478 int (*zc_request)(struct p9_client *, struct p9_req_t *,
77479 char *, char *, int , int, int, int);
77480-};
77481+} __do_const;
77482
77483 void v9fs_register_trans(struct p9_trans_module *m);
77484 void v9fs_unregister_trans(struct p9_trans_module *m);
77485diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
77486index fb94cf1..7c0c987 100644
77487--- a/include/net/bluetooth/l2cap.h
77488+++ b/include/net/bluetooth/l2cap.h
77489@@ -551,7 +551,7 @@ struct l2cap_ops {
77490 void (*defer) (struct l2cap_chan *chan);
77491 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
77492 unsigned long len, int nb);
77493-};
77494+} __do_const;
77495
77496 struct l2cap_conn {
77497 struct hci_conn *hcon;
77498diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
77499index f2ae33d..c457cf0 100644
77500--- a/include/net/caif/cfctrl.h
77501+++ b/include/net/caif/cfctrl.h
77502@@ -52,7 +52,7 @@ struct cfctrl_rsp {
77503 void (*radioset_rsp)(void);
77504 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
77505 struct cflayer *client_layer);
77506-};
77507+} __no_const;
77508
77509 /* Link Setup Parameters for CAIF-Links. */
77510 struct cfctrl_link_param {
77511@@ -101,8 +101,8 @@ struct cfctrl_request_info {
77512 struct cfctrl {
77513 struct cfsrvl serv;
77514 struct cfctrl_rsp res;
77515- atomic_t req_seq_no;
77516- atomic_t rsp_seq_no;
77517+ atomic_unchecked_t req_seq_no;
77518+ atomic_unchecked_t rsp_seq_no;
77519 struct list_head list;
77520 /* Protects from simultaneous access to first_req list */
77521 spinlock_t info_list_lock;
77522diff --git a/include/net/flow.h b/include/net/flow.h
77523index 628e11b..4c475df 100644
77524--- a/include/net/flow.h
77525+++ b/include/net/flow.h
77526@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
77527
77528 extern void flow_cache_flush(void);
77529 extern void flow_cache_flush_deferred(void);
77530-extern atomic_t flow_cache_genid;
77531+extern atomic_unchecked_t flow_cache_genid;
77532
77533 #endif
77534diff --git a/include/net/genetlink.h b/include/net/genetlink.h
77535index 93024a4..eeb6b6e 100644
77536--- a/include/net/genetlink.h
77537+++ b/include/net/genetlink.h
77538@@ -119,7 +119,7 @@ struct genl_ops {
77539 struct netlink_callback *cb);
77540 int (*done)(struct netlink_callback *cb);
77541 struct list_head ops_list;
77542-};
77543+} __do_const;
77544
77545 extern int genl_register_family(struct genl_family *family);
77546 extern int genl_register_family_with_ops(struct genl_family *family,
77547diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
77548index 734d9b5..48a9a4b 100644
77549--- a/include/net/gro_cells.h
77550+++ b/include/net/gro_cells.h
77551@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
77552 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
77553
77554 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
77555- atomic_long_inc(&dev->rx_dropped);
77556+ atomic_long_inc_unchecked(&dev->rx_dropped);
77557 kfree_skb(skb);
77558 return;
77559 }
77560diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
77561index de2c785..0588a6b 100644
77562--- a/include/net/inet_connection_sock.h
77563+++ b/include/net/inet_connection_sock.h
77564@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
77565 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
77566 int (*bind_conflict)(const struct sock *sk,
77567 const struct inet_bind_bucket *tb, bool relax);
77568-};
77569+} __do_const;
77570
77571 /** inet_connection_sock - INET connection oriented sock
77572 *
77573diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
77574index 53f464d..ba76aaa 100644
77575--- a/include/net/inetpeer.h
77576+++ b/include/net/inetpeer.h
77577@@ -47,8 +47,8 @@ struct inet_peer {
77578 */
77579 union {
77580 struct {
77581- atomic_t rid; /* Frag reception counter */
77582- atomic_t ip_id_count; /* IP ID for the next packet */
77583+ atomic_unchecked_t rid; /* Frag reception counter */
77584+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
77585 };
77586 struct rcu_head rcu;
77587 struct inet_peer *gc_next;
77588@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
77589 more++;
77590 inet_peer_refcheck(p);
77591 do {
77592- old = atomic_read(&p->ip_id_count);
77593+ old = atomic_read_unchecked(&p->ip_id_count);
77594 new = old + more;
77595 if (!new)
77596 new = 1;
77597- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
77598+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
77599 return new;
77600 }
77601
77602diff --git a/include/net/ip.h b/include/net/ip.h
77603index a68f838..74518ab 100644
77604--- a/include/net/ip.h
77605+++ b/include/net/ip.h
77606@@ -202,7 +202,7 @@ extern struct local_ports {
77607 } sysctl_local_ports;
77608 extern void inet_get_local_port_range(int *low, int *high);
77609
77610-extern unsigned long *sysctl_local_reserved_ports;
77611+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
77612 static inline int inet_is_reserved_local_port(int port)
77613 {
77614 return test_bit(port, sysctl_local_reserved_ports);
77615diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
77616index e49db91..76a81de 100644
77617--- a/include/net/ip_fib.h
77618+++ b/include/net/ip_fib.h
77619@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
77620
77621 #define FIB_RES_SADDR(net, res) \
77622 ((FIB_RES_NH(res).nh_saddr_genid == \
77623- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
77624+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
77625 FIB_RES_NH(res).nh_saddr : \
77626 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
77627 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
77628diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
77629index 4c062cc..3562c31 100644
77630--- a/include/net/ip_vs.h
77631+++ b/include/net/ip_vs.h
77632@@ -612,7 +612,7 @@ struct ip_vs_conn {
77633 struct ip_vs_conn *control; /* Master control connection */
77634 atomic_t n_control; /* Number of controlled ones */
77635 struct ip_vs_dest *dest; /* real server */
77636- atomic_t in_pkts; /* incoming packet counter */
77637+ atomic_unchecked_t in_pkts; /* incoming packet counter */
77638
77639 /* packet transmitter for different forwarding methods. If it
77640 mangles the packet, it must return NF_DROP or better NF_STOLEN,
77641@@ -761,7 +761,7 @@ struct ip_vs_dest {
77642 __be16 port; /* port number of the server */
77643 union nf_inet_addr addr; /* IP address of the server */
77644 volatile unsigned int flags; /* dest status flags */
77645- atomic_t conn_flags; /* flags to copy to conn */
77646+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
77647 atomic_t weight; /* server weight */
77648
77649 atomic_t refcnt; /* reference counter */
77650@@ -1013,11 +1013,11 @@ struct netns_ipvs {
77651 /* ip_vs_lblc */
77652 int sysctl_lblc_expiration;
77653 struct ctl_table_header *lblc_ctl_header;
77654- struct ctl_table *lblc_ctl_table;
77655+ ctl_table_no_const *lblc_ctl_table;
77656 /* ip_vs_lblcr */
77657 int sysctl_lblcr_expiration;
77658 struct ctl_table_header *lblcr_ctl_header;
77659- struct ctl_table *lblcr_ctl_table;
77660+ ctl_table_no_const *lblcr_ctl_table;
77661 /* ip_vs_est */
77662 struct list_head est_list; /* estimator list */
77663 spinlock_t est_lock;
77664diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
77665index 80ffde3..968b0f4 100644
77666--- a/include/net/irda/ircomm_tty.h
77667+++ b/include/net/irda/ircomm_tty.h
77668@@ -35,6 +35,7 @@
77669 #include <linux/termios.h>
77670 #include <linux/timer.h>
77671 #include <linux/tty.h> /* struct tty_struct */
77672+#include <asm/local.h>
77673
77674 #include <net/irda/irias_object.h>
77675 #include <net/irda/ircomm_core.h>
77676diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
77677index 714cc9a..ea05f3e 100644
77678--- a/include/net/iucv/af_iucv.h
77679+++ b/include/net/iucv/af_iucv.h
77680@@ -149,7 +149,7 @@ struct iucv_skb_cb {
77681 struct iucv_sock_list {
77682 struct hlist_head head;
77683 rwlock_t lock;
77684- atomic_t autobind_name;
77685+ atomic_unchecked_t autobind_name;
77686 };
77687
77688 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
77689diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
77690index df83f69..9b640b8 100644
77691--- a/include/net/llc_c_ac.h
77692+++ b/include/net/llc_c_ac.h
77693@@ -87,7 +87,7 @@
77694 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
77695 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
77696
77697-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
77698+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
77699
77700 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
77701 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
77702diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
77703index 6ca3113..f8026dd 100644
77704--- a/include/net/llc_c_ev.h
77705+++ b/include/net/llc_c_ev.h
77706@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
77707 return (struct llc_conn_state_ev *)skb->cb;
77708 }
77709
77710-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
77711-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
77712+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
77713+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
77714
77715 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
77716 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
77717diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
77718index 0e79cfb..f46db31 100644
77719--- a/include/net/llc_c_st.h
77720+++ b/include/net/llc_c_st.h
77721@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
77722 u8 next_state;
77723 llc_conn_ev_qfyr_t *ev_qualifiers;
77724 llc_conn_action_t *ev_actions;
77725-};
77726+} __do_const;
77727
77728 struct llc_conn_state {
77729 u8 current_state;
77730diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
77731index 37a3bbd..55a4241 100644
77732--- a/include/net/llc_s_ac.h
77733+++ b/include/net/llc_s_ac.h
77734@@ -23,7 +23,7 @@
77735 #define SAP_ACT_TEST_IND 9
77736
77737 /* All action functions must look like this */
77738-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
77739+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
77740
77741 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
77742 struct sk_buff *skb);
77743diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
77744index 567c681..cd73ac0 100644
77745--- a/include/net/llc_s_st.h
77746+++ b/include/net/llc_s_st.h
77747@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
77748 llc_sap_ev_t ev;
77749 u8 next_state;
77750 llc_sap_action_t *ev_actions;
77751-};
77752+} __do_const;
77753
77754 struct llc_sap_state {
77755 u8 curr_state;
77756diff --git a/include/net/mac80211.h b/include/net/mac80211.h
77757index 885898a..cdace34 100644
77758--- a/include/net/mac80211.h
77759+++ b/include/net/mac80211.h
77760@@ -4205,7 +4205,7 @@ struct rate_control_ops {
77761 void (*add_sta_debugfs)(void *priv, void *priv_sta,
77762 struct dentry *dir);
77763 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
77764-};
77765+} __do_const;
77766
77767 static inline int rate_supported(struct ieee80211_sta *sta,
77768 enum ieee80211_band band,
77769diff --git a/include/net/neighbour.h b/include/net/neighbour.h
77770index 7e748ad..5c6229b 100644
77771--- a/include/net/neighbour.h
77772+++ b/include/net/neighbour.h
77773@@ -123,7 +123,7 @@ struct neigh_ops {
77774 void (*error_report)(struct neighbour *, struct sk_buff *);
77775 int (*output)(struct neighbour *, struct sk_buff *);
77776 int (*connected_output)(struct neighbour *, struct sk_buff *);
77777-};
77778+} __do_const;
77779
77780 struct pneigh_entry {
77781 struct pneigh_entry *next;
77782diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
77783index b176978..ea169f4 100644
77784--- a/include/net/net_namespace.h
77785+++ b/include/net/net_namespace.h
77786@@ -117,7 +117,7 @@ struct net {
77787 #endif
77788 struct netns_ipvs *ipvs;
77789 struct sock *diag_nlsk;
77790- atomic_t rt_genid;
77791+ atomic_unchecked_t rt_genid;
77792 };
77793
77794 /*
77795@@ -274,7 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
77796 #define __net_init __init
77797 #define __net_exit __exit_refok
77798 #define __net_initdata __initdata
77799+#ifdef CONSTIFY_PLUGIN
77800 #define __net_initconst __initconst
77801+#else
77802+#define __net_initconst __initdata
77803+#endif
77804 #endif
77805
77806 struct pernet_operations {
77807@@ -284,7 +288,7 @@ struct pernet_operations {
77808 void (*exit_batch)(struct list_head *net_exit_list);
77809 int *id;
77810 size_t size;
77811-};
77812+} __do_const;
77813
77814 /*
77815 * Use these carefully. If you implement a network device and it
77816@@ -332,12 +336,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
77817
77818 static inline int rt_genid(struct net *net)
77819 {
77820- return atomic_read(&net->rt_genid);
77821+ return atomic_read_unchecked(&net->rt_genid);
77822 }
77823
77824 static inline void rt_genid_bump(struct net *net)
77825 {
77826- atomic_inc(&net->rt_genid);
77827+ atomic_inc_unchecked(&net->rt_genid);
77828 }
77829
77830 #endif /* __NET_NET_NAMESPACE_H */
77831diff --git a/include/net/netdma.h b/include/net/netdma.h
77832index 8ba8ce2..99b7fff 100644
77833--- a/include/net/netdma.h
77834+++ b/include/net/netdma.h
77835@@ -24,7 +24,7 @@
77836 #include <linux/dmaengine.h>
77837 #include <linux/skbuff.h>
77838
77839-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
77840+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
77841 struct sk_buff *skb, int offset, struct iovec *to,
77842 size_t len, struct dma_pinned_list *pinned_list);
77843
77844diff --git a/include/net/netlink.h b/include/net/netlink.h
77845index 9690b0f..87aded7 100644
77846--- a/include/net/netlink.h
77847+++ b/include/net/netlink.h
77848@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
77849 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
77850 {
77851 if (mark)
77852- skb_trim(skb, (unsigned char *) mark - skb->data);
77853+ skb_trim(skb, (const unsigned char *) mark - skb->data);
77854 }
77855
77856 /**
77857diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
77858index c9c0c53..53f24c3 100644
77859--- a/include/net/netns/conntrack.h
77860+++ b/include/net/netns/conntrack.h
77861@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
77862 struct nf_proto_net {
77863 #ifdef CONFIG_SYSCTL
77864 struct ctl_table_header *ctl_table_header;
77865- struct ctl_table *ctl_table;
77866+ ctl_table_no_const *ctl_table;
77867 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
77868 struct ctl_table_header *ctl_compat_header;
77869- struct ctl_table *ctl_compat_table;
77870+ ctl_table_no_const *ctl_compat_table;
77871 #endif
77872 #endif
77873 unsigned int users;
77874@@ -58,7 +58,7 @@ struct nf_ip_net {
77875 struct nf_icmp_net icmpv6;
77876 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
77877 struct ctl_table_header *ctl_table_header;
77878- struct ctl_table *ctl_table;
77879+ ctl_table_no_const *ctl_table;
77880 #endif
77881 };
77882
77883diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
77884index 2ba9de8..47bd6c7 100644
77885--- a/include/net/netns/ipv4.h
77886+++ b/include/net/netns/ipv4.h
77887@@ -67,7 +67,7 @@ struct netns_ipv4 {
77888 kgid_t sysctl_ping_group_range[2];
77889 long sysctl_tcp_mem[3];
77890
77891- atomic_t dev_addr_genid;
77892+ atomic_unchecked_t dev_addr_genid;
77893
77894 #ifdef CONFIG_IP_MROUTE
77895 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
77896diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
77897index 005e2c2..023d340 100644
77898--- a/include/net/netns/ipv6.h
77899+++ b/include/net/netns/ipv6.h
77900@@ -71,7 +71,7 @@ struct netns_ipv6 {
77901 struct fib_rules_ops *mr6_rules_ops;
77902 #endif
77903 #endif
77904- atomic_t dev_addr_genid;
77905+ atomic_unchecked_t dev_addr_genid;
77906 };
77907
77908 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
77909diff --git a/include/net/protocol.h b/include/net/protocol.h
77910index 047c047..b9dad15 100644
77911--- a/include/net/protocol.h
77912+++ b/include/net/protocol.h
77913@@ -44,7 +44,7 @@ struct net_protocol {
77914 void (*err_handler)(struct sk_buff *skb, u32 info);
77915 unsigned int no_policy:1,
77916 netns_ok:1;
77917-};
77918+} __do_const;
77919
77920 #if IS_ENABLED(CONFIG_IPV6)
77921 struct inet6_protocol {
77922@@ -57,7 +57,7 @@ struct inet6_protocol {
77923 u8 type, u8 code, int offset,
77924 __be32 info);
77925 unsigned int flags; /* INET6_PROTO_xxx */
77926-};
77927+} __do_const;
77928
77929 #define INET6_PROTO_NOPOLICY 0x1
77930 #define INET6_PROTO_FINAL 0x2
77931diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
77932index 7026648..584cc8c 100644
77933--- a/include/net/rtnetlink.h
77934+++ b/include/net/rtnetlink.h
77935@@ -81,7 +81,7 @@ struct rtnl_link_ops {
77936 const struct net_device *dev);
77937 unsigned int (*get_num_tx_queues)(void);
77938 unsigned int (*get_num_rx_queues)(void);
77939-};
77940+} __do_const;
77941
77942 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
77943 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
77944diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
77945index cd89510..d67810f 100644
77946--- a/include/net/sctp/sctp.h
77947+++ b/include/net/sctp/sctp.h
77948@@ -330,9 +330,9 @@ do { \
77949
77950 #else /* SCTP_DEBUG */
77951
77952-#define SCTP_DEBUG_PRINTK(whatever...)
77953-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
77954-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
77955+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
77956+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
77957+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
77958 #define SCTP_ENABLE_DEBUG
77959 #define SCTP_DISABLE_DEBUG
77960 #define SCTP_ASSERT(expr, str, func)
77961diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
77962index 2a82d13..62a31c2 100644
77963--- a/include/net/sctp/sm.h
77964+++ b/include/net/sctp/sm.h
77965@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
77966 typedef struct {
77967 sctp_state_fn_t *fn;
77968 const char *name;
77969-} sctp_sm_table_entry_t;
77970+} __do_const sctp_sm_table_entry_t;
77971
77972 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
77973 * currently in use.
77974@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
77975 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
77976
77977 /* Extern declarations for major data structures. */
77978-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
77979+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
77980
77981
77982 /* Get the size of a DATA chunk payload. */
77983diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
77984index 1bd4c41..9250b5b 100644
77985--- a/include/net/sctp/structs.h
77986+++ b/include/net/sctp/structs.h
77987@@ -516,7 +516,7 @@ struct sctp_pf {
77988 struct sctp_association *asoc);
77989 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
77990 struct sctp_af *af;
77991-};
77992+} __do_const;
77993
77994
77995 /* Structure to track chunk fragments that have been acked, but peer
77996diff --git a/include/net/sock.h b/include/net/sock.h
77997index 66772cf..25bc45b 100644
77998--- a/include/net/sock.h
77999+++ b/include/net/sock.h
78000@@ -325,7 +325,7 @@ struct sock {
78001 #ifdef CONFIG_RPS
78002 __u32 sk_rxhash;
78003 #endif
78004- atomic_t sk_drops;
78005+ atomic_unchecked_t sk_drops;
78006 int sk_rcvbuf;
78007
78008 struct sk_filter __rcu *sk_filter;
78009@@ -1797,7 +1797,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
78010 }
78011
78012 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
78013- char __user *from, char *to,
78014+ char __user *from, unsigned char *to,
78015 int copy, int offset)
78016 {
78017 if (skb->ip_summed == CHECKSUM_NONE) {
78018@@ -2056,7 +2056,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
78019 }
78020 }
78021
78022-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
78023+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
78024
78025 /**
78026 * sk_page_frag - return an appropriate page_frag
78027diff --git a/include/net/tcp.h b/include/net/tcp.h
78028index 5bba80f..8520a82 100644
78029--- a/include/net/tcp.h
78030+++ b/include/net/tcp.h
78031@@ -524,7 +524,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
78032 extern void tcp_xmit_retransmit_queue(struct sock *);
78033 extern void tcp_simple_retransmit(struct sock *);
78034 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
78035-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
78036+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
78037
78038 extern void tcp_send_probe0(struct sock *);
78039 extern void tcp_send_partial(struct sock *);
78040@@ -697,8 +697,8 @@ struct tcp_skb_cb {
78041 struct inet6_skb_parm h6;
78042 #endif
78043 } header; /* For incoming frames */
78044- __u32 seq; /* Starting sequence number */
78045- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
78046+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
78047+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
78048 __u32 when; /* used to compute rtt's */
78049 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
78050
78051@@ -712,7 +712,7 @@ struct tcp_skb_cb {
78052
78053 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
78054 /* 1 byte hole */
78055- __u32 ack_seq; /* Sequence number ACK'd */
78056+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
78057 };
78058
78059 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
78060diff --git a/include/net/xfrm.h b/include/net/xfrm.h
78061index 94ce082..62b278d 100644
78062--- a/include/net/xfrm.h
78063+++ b/include/net/xfrm.h
78064@@ -305,7 +305,7 @@ struct xfrm_policy_afinfo {
78065 struct net_device *dev,
78066 const struct flowi *fl);
78067 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
78068-};
78069+} __do_const;
78070
78071 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
78072 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
78073@@ -341,7 +341,7 @@ struct xfrm_state_afinfo {
78074 struct sk_buff *skb);
78075 int (*transport_finish)(struct sk_buff *skb,
78076 int async);
78077-};
78078+} __do_const;
78079
78080 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
78081 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
78082@@ -424,7 +424,7 @@ struct xfrm_mode {
78083 struct module *owner;
78084 unsigned int encap;
78085 int flags;
78086-};
78087+} __do_const;
78088
78089 /* Flags for xfrm_mode. */
78090 enum {
78091@@ -521,7 +521,7 @@ struct xfrm_policy {
78092 struct timer_list timer;
78093
78094 struct flow_cache_object flo;
78095- atomic_t genid;
78096+ atomic_unchecked_t genid;
78097 u32 priority;
78098 u32 index;
78099 struct xfrm_mark mark;
78100diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
78101index 1a046b1..ee0bef0 100644
78102--- a/include/rdma/iw_cm.h
78103+++ b/include/rdma/iw_cm.h
78104@@ -122,7 +122,7 @@ struct iw_cm_verbs {
78105 int backlog);
78106
78107 int (*destroy_listen)(struct iw_cm_id *cm_id);
78108-};
78109+} __no_const;
78110
78111 /**
78112 * iw_create_cm_id - Create an IW CM identifier.
78113diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
78114index e1379b4..67eafbe 100644
78115--- a/include/scsi/libfc.h
78116+++ b/include/scsi/libfc.h
78117@@ -762,6 +762,7 @@ struct libfc_function_template {
78118 */
78119 void (*disc_stop_final) (struct fc_lport *);
78120 };
78121+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
78122
78123 /**
78124 * struct fc_disc - Discovery context
78125@@ -866,7 +867,7 @@ struct fc_lport {
78126 struct fc_vport *vport;
78127
78128 /* Operational Information */
78129- struct libfc_function_template tt;
78130+ libfc_function_template_no_const tt;
78131 u8 link_up;
78132 u8 qfull;
78133 enum fc_lport_state state;
78134diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
78135index cc64587..608f523 100644
78136--- a/include/scsi/scsi_device.h
78137+++ b/include/scsi/scsi_device.h
78138@@ -171,9 +171,9 @@ struct scsi_device {
78139 unsigned int max_device_blocked; /* what device_blocked counts down from */
78140 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
78141
78142- atomic_t iorequest_cnt;
78143- atomic_t iodone_cnt;
78144- atomic_t ioerr_cnt;
78145+ atomic_unchecked_t iorequest_cnt;
78146+ atomic_unchecked_t iodone_cnt;
78147+ atomic_unchecked_t ioerr_cnt;
78148
78149 struct device sdev_gendev,
78150 sdev_dev;
78151diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
78152index b797e8f..8e2c3aa 100644
78153--- a/include/scsi/scsi_transport_fc.h
78154+++ b/include/scsi/scsi_transport_fc.h
78155@@ -751,7 +751,8 @@ struct fc_function_template {
78156 unsigned long show_host_system_hostname:1;
78157
78158 unsigned long disable_target_scan:1;
78159-};
78160+} __do_const;
78161+typedef struct fc_function_template __no_const fc_function_template_no_const;
78162
78163
78164 /**
78165diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
78166index 9031a26..750d592 100644
78167--- a/include/sound/compress_driver.h
78168+++ b/include/sound/compress_driver.h
78169@@ -128,7 +128,7 @@ struct snd_compr_ops {
78170 struct snd_compr_caps *caps);
78171 int (*get_codec_caps) (struct snd_compr_stream *stream,
78172 struct snd_compr_codec_caps *codec);
78173-};
78174+} __no_const;
78175
78176 /**
78177 * struct snd_compr: Compressed device
78178diff --git a/include/sound/soc.h b/include/sound/soc.h
78179index 85c1522..f44bad1 100644
78180--- a/include/sound/soc.h
78181+++ b/include/sound/soc.h
78182@@ -781,7 +781,7 @@ struct snd_soc_codec_driver {
78183 /* probe ordering - for components with runtime dependencies */
78184 int probe_order;
78185 int remove_order;
78186-};
78187+} __do_const;
78188
78189 /* SoC platform interface */
78190 struct snd_soc_platform_driver {
78191@@ -827,7 +827,7 @@ struct snd_soc_platform_driver {
78192 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
78193 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
78194 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
78195-};
78196+} __do_const;
78197
78198 struct snd_soc_platform {
78199 const char *name;
78200diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
78201index 4ea4f98..a63629b 100644
78202--- a/include/target/target_core_base.h
78203+++ b/include/target/target_core_base.h
78204@@ -653,7 +653,7 @@ struct se_device {
78205 spinlock_t stats_lock;
78206 /* Active commands on this virtual SE device */
78207 atomic_t simple_cmds;
78208- atomic_t dev_ordered_id;
78209+ atomic_unchecked_t dev_ordered_id;
78210 atomic_t dev_ordered_sync;
78211 atomic_t dev_qf_count;
78212 int export_count;
78213diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
78214new file mode 100644
78215index 0000000..fb634b7
78216--- /dev/null
78217+++ b/include/trace/events/fs.h
78218@@ -0,0 +1,53 @@
78219+#undef TRACE_SYSTEM
78220+#define TRACE_SYSTEM fs
78221+
78222+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
78223+#define _TRACE_FS_H
78224+
78225+#include <linux/fs.h>
78226+#include <linux/tracepoint.h>
78227+
78228+TRACE_EVENT(do_sys_open,
78229+
78230+ TP_PROTO(const char *filename, int flags, int mode),
78231+
78232+ TP_ARGS(filename, flags, mode),
78233+
78234+ TP_STRUCT__entry(
78235+ __string( filename, filename )
78236+ __field( int, flags )
78237+ __field( int, mode )
78238+ ),
78239+
78240+ TP_fast_assign(
78241+ __assign_str(filename, filename);
78242+ __entry->flags = flags;
78243+ __entry->mode = mode;
78244+ ),
78245+
78246+ TP_printk("\"%s\" %x %o",
78247+ __get_str(filename), __entry->flags, __entry->mode)
78248+);
78249+
78250+TRACE_EVENT(open_exec,
78251+
78252+ TP_PROTO(const char *filename),
78253+
78254+ TP_ARGS(filename),
78255+
78256+ TP_STRUCT__entry(
78257+ __string( filename, filename )
78258+ ),
78259+
78260+ TP_fast_assign(
78261+ __assign_str(filename, filename);
78262+ ),
78263+
78264+ TP_printk("\"%s\"",
78265+ __get_str(filename))
78266+);
78267+
78268+#endif /* _TRACE_FS_H */
78269+
78270+/* This part must be outside protection */
78271+#include <trace/define_trace.h>
78272diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
78273index 1c09820..7f5ec79 100644
78274--- a/include/trace/events/irq.h
78275+++ b/include/trace/events/irq.h
78276@@ -36,7 +36,7 @@ struct softirq_action;
78277 */
78278 TRACE_EVENT(irq_handler_entry,
78279
78280- TP_PROTO(int irq, struct irqaction *action),
78281+ TP_PROTO(int irq, const struct irqaction *action),
78282
78283 TP_ARGS(irq, action),
78284
78285@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
78286 */
78287 TRACE_EVENT(irq_handler_exit,
78288
78289- TP_PROTO(int irq, struct irqaction *action, int ret),
78290+ TP_PROTO(int irq, const struct irqaction *action, int ret),
78291
78292 TP_ARGS(irq, action, ret),
78293
78294diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
78295index 7caf44c..23c6f27 100644
78296--- a/include/uapi/linux/a.out.h
78297+++ b/include/uapi/linux/a.out.h
78298@@ -39,6 +39,14 @@ enum machine_type {
78299 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
78300 };
78301
78302+/* Constants for the N_FLAGS field */
78303+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
78304+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
78305+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
78306+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
78307+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
78308+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
78309+
78310 #if !defined (N_MAGIC)
78311 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
78312 #endif
78313diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
78314index d876736..ccce5c0 100644
78315--- a/include/uapi/linux/byteorder/little_endian.h
78316+++ b/include/uapi/linux/byteorder/little_endian.h
78317@@ -42,51 +42,51 @@
78318
78319 static inline __le64 __cpu_to_le64p(const __u64 *p)
78320 {
78321- return (__force __le64)*p;
78322+ return (__force const __le64)*p;
78323 }
78324-static inline __u64 __le64_to_cpup(const __le64 *p)
78325+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
78326 {
78327- return (__force __u64)*p;
78328+ return (__force const __u64)*p;
78329 }
78330 static inline __le32 __cpu_to_le32p(const __u32 *p)
78331 {
78332- return (__force __le32)*p;
78333+ return (__force const __le32)*p;
78334 }
78335 static inline __u32 __le32_to_cpup(const __le32 *p)
78336 {
78337- return (__force __u32)*p;
78338+ return (__force const __u32)*p;
78339 }
78340 static inline __le16 __cpu_to_le16p(const __u16 *p)
78341 {
78342- return (__force __le16)*p;
78343+ return (__force const __le16)*p;
78344 }
78345 static inline __u16 __le16_to_cpup(const __le16 *p)
78346 {
78347- return (__force __u16)*p;
78348+ return (__force const __u16)*p;
78349 }
78350 static inline __be64 __cpu_to_be64p(const __u64 *p)
78351 {
78352- return (__force __be64)__swab64p(p);
78353+ return (__force const __be64)__swab64p(p);
78354 }
78355 static inline __u64 __be64_to_cpup(const __be64 *p)
78356 {
78357- return __swab64p((__u64 *)p);
78358+ return __swab64p((const __u64 *)p);
78359 }
78360 static inline __be32 __cpu_to_be32p(const __u32 *p)
78361 {
78362- return (__force __be32)__swab32p(p);
78363+ return (__force const __be32)__swab32p(p);
78364 }
78365-static inline __u32 __be32_to_cpup(const __be32 *p)
78366+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
78367 {
78368- return __swab32p((__u32 *)p);
78369+ return __swab32p((const __u32 *)p);
78370 }
78371 static inline __be16 __cpu_to_be16p(const __u16 *p)
78372 {
78373- return (__force __be16)__swab16p(p);
78374+ return (__force const __be16)__swab16p(p);
78375 }
78376 static inline __u16 __be16_to_cpup(const __be16 *p)
78377 {
78378- return __swab16p((__u16 *)p);
78379+ return __swab16p((const __u16 *)p);
78380 }
78381 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
78382 #define __le64_to_cpus(x) do { (void)(x); } while (0)
78383diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
78384index ef6103b..d4e65dd 100644
78385--- a/include/uapi/linux/elf.h
78386+++ b/include/uapi/linux/elf.h
78387@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
78388 #define PT_GNU_EH_FRAME 0x6474e550
78389
78390 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
78391+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
78392+
78393+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
78394+
78395+/* Constants for the e_flags field */
78396+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
78397+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
78398+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
78399+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
78400+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
78401+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
78402
78403 /*
78404 * Extended Numbering
78405@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
78406 #define DT_DEBUG 21
78407 #define DT_TEXTREL 22
78408 #define DT_JMPREL 23
78409+#define DT_FLAGS 30
78410+ #define DF_TEXTREL 0x00000004
78411 #define DT_ENCODING 32
78412 #define OLD_DT_LOOS 0x60000000
78413 #define DT_LOOS 0x6000000d
78414@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
78415 #define PF_W 0x2
78416 #define PF_X 0x1
78417
78418+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
78419+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
78420+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
78421+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
78422+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
78423+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
78424+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
78425+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
78426+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
78427+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
78428+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
78429+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
78430+
78431 typedef struct elf32_phdr{
78432 Elf32_Word p_type;
78433 Elf32_Off p_offset;
78434@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
78435 #define EI_OSABI 7
78436 #define EI_PAD 8
78437
78438+#define EI_PAX 14
78439+
78440 #define ELFMAG0 0x7f /* EI_MAG */
78441 #define ELFMAG1 'E'
78442 #define ELFMAG2 'L'
78443diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
78444index aa169c4..6a2771d 100644
78445--- a/include/uapi/linux/personality.h
78446+++ b/include/uapi/linux/personality.h
78447@@ -30,6 +30,7 @@ enum {
78448 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
78449 ADDR_NO_RANDOMIZE | \
78450 ADDR_COMPAT_LAYOUT | \
78451+ ADDR_LIMIT_3GB | \
78452 MMAP_PAGE_ZERO)
78453
78454 /*
78455diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
78456index 7530e74..e714828 100644
78457--- a/include/uapi/linux/screen_info.h
78458+++ b/include/uapi/linux/screen_info.h
78459@@ -43,7 +43,8 @@ struct screen_info {
78460 __u16 pages; /* 0x32 */
78461 __u16 vesa_attributes; /* 0x34 */
78462 __u32 capabilities; /* 0x36 */
78463- __u8 _reserved[6]; /* 0x3a */
78464+ __u16 vesapm_size; /* 0x3a */
78465+ __u8 _reserved[4]; /* 0x3c */
78466 } __attribute__((packed));
78467
78468 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
78469diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
78470index 0e011eb..82681b1 100644
78471--- a/include/uapi/linux/swab.h
78472+++ b/include/uapi/linux/swab.h
78473@@ -43,7 +43,7 @@
78474 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
78475 */
78476
78477-static inline __attribute_const__ __u16 __fswab16(__u16 val)
78478+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
78479 {
78480 #ifdef __HAVE_BUILTIN_BSWAP16__
78481 return __builtin_bswap16(val);
78482@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
78483 #endif
78484 }
78485
78486-static inline __attribute_const__ __u32 __fswab32(__u32 val)
78487+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
78488 {
78489 #ifdef __HAVE_BUILTIN_BSWAP32__
78490 return __builtin_bswap32(val);
78491@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
78492 #endif
78493 }
78494
78495-static inline __attribute_const__ __u64 __fswab64(__u64 val)
78496+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
78497 {
78498 #ifdef __HAVE_BUILTIN_BSWAP64__
78499 return __builtin_bswap64(val);
78500diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
78501index 6d67213..8dab561 100644
78502--- a/include/uapi/linux/sysctl.h
78503+++ b/include/uapi/linux/sysctl.h
78504@@ -155,7 +155,11 @@ enum
78505 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
78506 };
78507
78508-
78509+#ifdef CONFIG_PAX_SOFTMODE
78510+enum {
78511+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
78512+};
78513+#endif
78514
78515 /* CTL_VM names: */
78516 enum
78517diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
78518index e4629b9..6958086 100644
78519--- a/include/uapi/linux/xattr.h
78520+++ b/include/uapi/linux/xattr.h
78521@@ -63,5 +63,9 @@
78522 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
78523 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
78524
78525+/* User namespace */
78526+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
78527+#define XATTR_PAX_FLAGS_SUFFIX "flags"
78528+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
78529
78530 #endif /* _UAPI_LINUX_XATTR_H */
78531diff --git a/include/video/udlfb.h b/include/video/udlfb.h
78532index f9466fa..f4e2b81 100644
78533--- a/include/video/udlfb.h
78534+++ b/include/video/udlfb.h
78535@@ -53,10 +53,10 @@ struct dlfb_data {
78536 u32 pseudo_palette[256];
78537 int blank_mode; /*one of FB_BLANK_ */
78538 /* blit-only rendering path metrics, exposed through sysfs */
78539- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
78540- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
78541- atomic_t bytes_sent; /* to usb, after compression including overhead */
78542- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
78543+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
78544+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
78545+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
78546+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
78547 };
78548
78549 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
78550diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
78551index 1a91850..28573f8 100644
78552--- a/include/video/uvesafb.h
78553+++ b/include/video/uvesafb.h
78554@@ -122,6 +122,7 @@ struct uvesafb_par {
78555 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
78556 u8 pmi_setpal; /* PMI for palette changes */
78557 u16 *pmi_base; /* protected mode interface location */
78558+ u8 *pmi_code; /* protected mode code location */
78559 void *pmi_start;
78560 void *pmi_pal;
78561 u8 *vbe_state_orig; /*
78562diff --git a/init/Kconfig b/init/Kconfig
78563index 2d9b831..ae4c8ac 100644
78564--- a/init/Kconfig
78565+++ b/init/Kconfig
78566@@ -1029,6 +1029,7 @@ endif # CGROUPS
78567
78568 config CHECKPOINT_RESTORE
78569 bool "Checkpoint/restore support" if EXPERT
78570+ depends on !GRKERNSEC
78571 default n
78572 help
78573 Enables additional kernel features in a sake of checkpoint/restore.
78574@@ -1516,7 +1517,7 @@ config SLUB_DEBUG
78575
78576 config COMPAT_BRK
78577 bool "Disable heap randomization"
78578- default y
78579+ default n
78580 help
78581 Randomizing heap placement makes heap exploits harder, but it
78582 also breaks ancient binaries (including anything libc5 based).
78583@@ -1779,7 +1780,7 @@ config INIT_ALL_POSSIBLE
78584 config STOP_MACHINE
78585 bool
78586 default y
78587- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
78588+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
78589 help
78590 Need stop_machine() primitive.
78591
78592diff --git a/init/Makefile b/init/Makefile
78593index 7bc47ee..6da2dc7 100644
78594--- a/init/Makefile
78595+++ b/init/Makefile
78596@@ -2,6 +2,9 @@
78597 # Makefile for the linux kernel.
78598 #
78599
78600+ccflags-y := $(GCC_PLUGINS_CFLAGS)
78601+asflags-y := $(GCC_PLUGINS_AFLAGS)
78602+
78603 obj-y := main.o version.o mounts.o
78604 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
78605 obj-y += noinitramfs.o
78606diff --git a/init/do_mounts.c b/init/do_mounts.c
78607index a2b49f2..03a0e17c 100644
78608--- a/init/do_mounts.c
78609+++ b/init/do_mounts.c
78610@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
78611 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
78612 {
78613 struct super_block *s;
78614- int err = sys_mount(name, "/root", fs, flags, data);
78615+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
78616 if (err)
78617 return err;
78618
78619- sys_chdir("/root");
78620+ sys_chdir((const char __force_user *)"/root");
78621 s = current->fs->pwd.dentry->d_sb;
78622 ROOT_DEV = s->s_dev;
78623 printk(KERN_INFO
78624@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
78625 va_start(args, fmt);
78626 vsprintf(buf, fmt, args);
78627 va_end(args);
78628- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
78629+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
78630 if (fd >= 0) {
78631 sys_ioctl(fd, FDEJECT, 0);
78632 sys_close(fd);
78633 }
78634 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
78635- fd = sys_open("/dev/console", O_RDWR, 0);
78636+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
78637 if (fd >= 0) {
78638 sys_ioctl(fd, TCGETS, (long)&termios);
78639 termios.c_lflag &= ~ICANON;
78640 sys_ioctl(fd, TCSETSF, (long)&termios);
78641- sys_read(fd, &c, 1);
78642+ sys_read(fd, (char __user *)&c, 1);
78643 termios.c_lflag |= ICANON;
78644 sys_ioctl(fd, TCSETSF, (long)&termios);
78645 sys_close(fd);
78646@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
78647 mount_root();
78648 out:
78649 devtmpfs_mount("dev");
78650- sys_mount(".", "/", NULL, MS_MOVE, NULL);
78651- sys_chroot(".");
78652+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
78653+ sys_chroot((const char __force_user *)".");
78654 }
78655diff --git a/init/do_mounts.h b/init/do_mounts.h
78656index f5b978a..69dbfe8 100644
78657--- a/init/do_mounts.h
78658+++ b/init/do_mounts.h
78659@@ -15,15 +15,15 @@ extern int root_mountflags;
78660
78661 static inline int create_dev(char *name, dev_t dev)
78662 {
78663- sys_unlink(name);
78664- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
78665+ sys_unlink((char __force_user *)name);
78666+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
78667 }
78668
78669 #if BITS_PER_LONG == 32
78670 static inline u32 bstat(char *name)
78671 {
78672 struct stat64 stat;
78673- if (sys_stat64(name, &stat) != 0)
78674+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
78675 return 0;
78676 if (!S_ISBLK(stat.st_mode))
78677 return 0;
78678@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
78679 static inline u32 bstat(char *name)
78680 {
78681 struct stat stat;
78682- if (sys_newstat(name, &stat) != 0)
78683+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
78684 return 0;
78685 if (!S_ISBLK(stat.st_mode))
78686 return 0;
78687diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
78688index 3e0878e..8a9d7a0 100644
78689--- a/init/do_mounts_initrd.c
78690+++ b/init/do_mounts_initrd.c
78691@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
78692 {
78693 sys_unshare(CLONE_FS | CLONE_FILES);
78694 /* stdin/stdout/stderr for /linuxrc */
78695- sys_open("/dev/console", O_RDWR, 0);
78696+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
78697 sys_dup(0);
78698 sys_dup(0);
78699 /* move initrd over / and chdir/chroot in initrd root */
78700- sys_chdir("/root");
78701- sys_mount(".", "/", NULL, MS_MOVE, NULL);
78702- sys_chroot(".");
78703+ sys_chdir((const char __force_user *)"/root");
78704+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
78705+ sys_chroot((const char __force_user *)".");
78706 sys_setsid();
78707 return 0;
78708 }
78709@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
78710 create_dev("/dev/root.old", Root_RAM0);
78711 /* mount initrd on rootfs' /root */
78712 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
78713- sys_mkdir("/old", 0700);
78714- sys_chdir("/old");
78715+ sys_mkdir((const char __force_user *)"/old", 0700);
78716+ sys_chdir((const char __force_user *)"/old");
78717
78718 /* try loading default modules from initrd */
78719 load_default_modules();
78720@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
78721 current->flags &= ~PF_FREEZER_SKIP;
78722
78723 /* move initrd to rootfs' /old */
78724- sys_mount("..", ".", NULL, MS_MOVE, NULL);
78725+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
78726 /* switch root and cwd back to / of rootfs */
78727- sys_chroot("..");
78728+ sys_chroot((const char __force_user *)"..");
78729
78730 if (new_decode_dev(real_root_dev) == Root_RAM0) {
78731- sys_chdir("/old");
78732+ sys_chdir((const char __force_user *)"/old");
78733 return;
78734 }
78735
78736- sys_chdir("/");
78737+ sys_chdir((const char __force_user *)"/");
78738 ROOT_DEV = new_decode_dev(real_root_dev);
78739 mount_root();
78740
78741 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
78742- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
78743+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
78744 if (!error)
78745 printk("okay\n");
78746 else {
78747- int fd = sys_open("/dev/root.old", O_RDWR, 0);
78748+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
78749 if (error == -ENOENT)
78750 printk("/initrd does not exist. Ignored.\n");
78751 else
78752 printk("failed\n");
78753 printk(KERN_NOTICE "Unmounting old root\n");
78754- sys_umount("/old", MNT_DETACH);
78755+ sys_umount((char __force_user *)"/old", MNT_DETACH);
78756 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
78757 if (fd < 0) {
78758 error = fd;
78759@@ -127,11 +127,11 @@ int __init initrd_load(void)
78760 * mounted in the normal path.
78761 */
78762 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
78763- sys_unlink("/initrd.image");
78764+ sys_unlink((const char __force_user *)"/initrd.image");
78765 handle_initrd();
78766 return 1;
78767 }
78768 }
78769- sys_unlink("/initrd.image");
78770+ sys_unlink((const char __force_user *)"/initrd.image");
78771 return 0;
78772 }
78773diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
78774index 8cb6db5..d729f50 100644
78775--- a/init/do_mounts_md.c
78776+++ b/init/do_mounts_md.c
78777@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
78778 partitioned ? "_d" : "", minor,
78779 md_setup_args[ent].device_names);
78780
78781- fd = sys_open(name, 0, 0);
78782+ fd = sys_open((char __force_user *)name, 0, 0);
78783 if (fd < 0) {
78784 printk(KERN_ERR "md: open failed - cannot start "
78785 "array %s\n", name);
78786@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
78787 * array without it
78788 */
78789 sys_close(fd);
78790- fd = sys_open(name, 0, 0);
78791+ fd = sys_open((char __force_user *)name, 0, 0);
78792 sys_ioctl(fd, BLKRRPART, 0);
78793 }
78794 sys_close(fd);
78795@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
78796
78797 wait_for_device_probe();
78798
78799- fd = sys_open("/dev/md0", 0, 0);
78800+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
78801 if (fd >= 0) {
78802 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
78803 sys_close(fd);
78804diff --git a/init/init_task.c b/init/init_task.c
78805index ba0a7f36..2bcf1d5 100644
78806--- a/init/init_task.c
78807+++ b/init/init_task.c
78808@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
78809 * Initial thread structure. Alignment of this is handled by a special
78810 * linker map entry.
78811 */
78812+#ifdef CONFIG_X86
78813+union thread_union init_thread_union __init_task_data;
78814+#else
78815 union thread_union init_thread_union __init_task_data =
78816 { INIT_THREAD_INFO(init_task) };
78817+#endif
78818diff --git a/init/initramfs.c b/init/initramfs.c
78819index a67ef9d..2d17ed9 100644
78820--- a/init/initramfs.c
78821+++ b/init/initramfs.c
78822@@ -84,7 +84,7 @@ static void __init free_hash(void)
78823 }
78824 }
78825
78826-static long __init do_utime(char *filename, time_t mtime)
78827+static long __init do_utime(char __force_user *filename, time_t mtime)
78828 {
78829 struct timespec t[2];
78830
78831@@ -119,7 +119,7 @@ static void __init dir_utime(void)
78832 struct dir_entry *de, *tmp;
78833 list_for_each_entry_safe(de, tmp, &dir_list, list) {
78834 list_del(&de->list);
78835- do_utime(de->name, de->mtime);
78836+ do_utime((char __force_user *)de->name, de->mtime);
78837 kfree(de->name);
78838 kfree(de);
78839 }
78840@@ -281,7 +281,7 @@ static int __init maybe_link(void)
78841 if (nlink >= 2) {
78842 char *old = find_link(major, minor, ino, mode, collected);
78843 if (old)
78844- return (sys_link(old, collected) < 0) ? -1 : 1;
78845+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
78846 }
78847 return 0;
78848 }
78849@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
78850 {
78851 struct stat st;
78852
78853- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
78854+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
78855 if (S_ISDIR(st.st_mode))
78856- sys_rmdir(path);
78857+ sys_rmdir((char __force_user *)path);
78858 else
78859- sys_unlink(path);
78860+ sys_unlink((char __force_user *)path);
78861 }
78862 }
78863
78864@@ -315,7 +315,7 @@ static int __init do_name(void)
78865 int openflags = O_WRONLY|O_CREAT;
78866 if (ml != 1)
78867 openflags |= O_TRUNC;
78868- wfd = sys_open(collected, openflags, mode);
78869+ wfd = sys_open((char __force_user *)collected, openflags, mode);
78870
78871 if (wfd >= 0) {
78872 sys_fchown(wfd, uid, gid);
78873@@ -327,17 +327,17 @@ static int __init do_name(void)
78874 }
78875 }
78876 } else if (S_ISDIR(mode)) {
78877- sys_mkdir(collected, mode);
78878- sys_chown(collected, uid, gid);
78879- sys_chmod(collected, mode);
78880+ sys_mkdir((char __force_user *)collected, mode);
78881+ sys_chown((char __force_user *)collected, uid, gid);
78882+ sys_chmod((char __force_user *)collected, mode);
78883 dir_add(collected, mtime);
78884 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
78885 S_ISFIFO(mode) || S_ISSOCK(mode)) {
78886 if (maybe_link() == 0) {
78887- sys_mknod(collected, mode, rdev);
78888- sys_chown(collected, uid, gid);
78889- sys_chmod(collected, mode);
78890- do_utime(collected, mtime);
78891+ sys_mknod((char __force_user *)collected, mode, rdev);
78892+ sys_chown((char __force_user *)collected, uid, gid);
78893+ sys_chmod((char __force_user *)collected, mode);
78894+ do_utime((char __force_user *)collected, mtime);
78895 }
78896 }
78897 return 0;
78898@@ -346,15 +346,15 @@ static int __init do_name(void)
78899 static int __init do_copy(void)
78900 {
78901 if (count >= body_len) {
78902- sys_write(wfd, victim, body_len);
78903+ sys_write(wfd, (char __force_user *)victim, body_len);
78904 sys_close(wfd);
78905- do_utime(vcollected, mtime);
78906+ do_utime((char __force_user *)vcollected, mtime);
78907 kfree(vcollected);
78908 eat(body_len);
78909 state = SkipIt;
78910 return 0;
78911 } else {
78912- sys_write(wfd, victim, count);
78913+ sys_write(wfd, (char __force_user *)victim, count);
78914 body_len -= count;
78915 eat(count);
78916 return 1;
78917@@ -365,9 +365,9 @@ static int __init do_symlink(void)
78918 {
78919 collected[N_ALIGN(name_len) + body_len] = '\0';
78920 clean_path(collected, 0);
78921- sys_symlink(collected + N_ALIGN(name_len), collected);
78922- sys_lchown(collected, uid, gid);
78923- do_utime(collected, mtime);
78924+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
78925+ sys_lchown((char __force_user *)collected, uid, gid);
78926+ do_utime((char __force_user *)collected, mtime);
78927 state = SkipIt;
78928 next_state = Reset;
78929 return 0;
78930@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
78931 {
78932 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
78933 if (err)
78934- panic(err); /* Failed to decompress INTERNAL initramfs */
78935+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
78936 if (initrd_start) {
78937 #ifdef CONFIG_BLK_DEV_RAM
78938 int fd;
78939diff --git a/init/main.c b/init/main.c
78940index 9484f4b..0eac7c3 100644
78941--- a/init/main.c
78942+++ b/init/main.c
78943@@ -100,6 +100,8 @@ static inline void mark_rodata_ro(void) { }
78944 extern void tc_init(void);
78945 #endif
78946
78947+extern void grsecurity_init(void);
78948+
78949 /*
78950 * Debug helper: via this flag we know that we are in 'early bootup code'
78951 * where only the boot processor is running with IRQ disabled. This means
78952@@ -153,6 +155,74 @@ static int __init set_reset_devices(char *str)
78953
78954 __setup("reset_devices", set_reset_devices);
78955
78956+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
78957+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
78958+static int __init setup_grsec_proc_gid(char *str)
78959+{
78960+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
78961+ return 1;
78962+}
78963+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
78964+#endif
78965+
78966+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
78967+unsigned long pax_user_shadow_base __read_only;
78968+EXPORT_SYMBOL(pax_user_shadow_base);
78969+extern char pax_enter_kernel_user[];
78970+extern char pax_exit_kernel_user[];
78971+#endif
78972+
78973+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
78974+static int __init setup_pax_nouderef(char *str)
78975+{
78976+#ifdef CONFIG_X86_32
78977+ unsigned int cpu;
78978+ struct desc_struct *gdt;
78979+
78980+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
78981+ gdt = get_cpu_gdt_table(cpu);
78982+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
78983+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
78984+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
78985+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
78986+ }
78987+ loadsegment(ds, __KERNEL_DS);
78988+ loadsegment(es, __KERNEL_DS);
78989+ loadsegment(ss, __KERNEL_DS);
78990+#else
78991+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
78992+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
78993+ clone_pgd_mask = ~(pgdval_t)0UL;
78994+ pax_user_shadow_base = 0UL;
78995+ setup_clear_cpu_cap(X86_FEATURE_PCID);
78996+#endif
78997+
78998+ return 0;
78999+}
79000+early_param("pax_nouderef", setup_pax_nouderef);
79001+
79002+#ifdef CONFIG_X86_64
79003+static int __init setup_pax_weakuderef(char *str)
79004+{
79005+ if (clone_pgd_mask != ~(pgdval_t)0UL)
79006+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
79007+ return 1;
79008+}
79009+__setup("pax_weakuderef", setup_pax_weakuderef);
79010+#endif
79011+#endif
79012+
79013+#ifdef CONFIG_PAX_SOFTMODE
79014+int pax_softmode;
79015+
79016+static int __init setup_pax_softmode(char *str)
79017+{
79018+ get_option(&str, &pax_softmode);
79019+ return 1;
79020+}
79021+__setup("pax_softmode=", setup_pax_softmode);
79022+#endif
79023+
79024 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
79025 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
79026 static const char *panic_later, *panic_param;
79027@@ -655,8 +725,6 @@ static void __init do_ctors(void)
79028 bool initcall_debug;
79029 core_param(initcall_debug, initcall_debug, bool, 0644);
79030
79031-static char msgbuf[64];
79032-
79033 static int __init_or_module do_one_initcall_debug(initcall_t fn)
79034 {
79035 ktime_t calltime, delta, rettime;
79036@@ -679,23 +747,22 @@ int __init_or_module do_one_initcall(initcall_t fn)
79037 {
79038 int count = preempt_count();
79039 int ret;
79040+ const char *msg1 = "", *msg2 = "";
79041
79042 if (initcall_debug)
79043 ret = do_one_initcall_debug(fn);
79044 else
79045 ret = fn();
79046
79047- msgbuf[0] = 0;
79048-
79049 if (preempt_count() != count) {
79050- sprintf(msgbuf, "preemption imbalance ");
79051+ msg1 = " preemption imbalance";
79052 preempt_count() = count;
79053 }
79054 if (irqs_disabled()) {
79055- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
79056+ msg2 = " disabled interrupts";
79057 local_irq_enable();
79058 }
79059- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
79060+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
79061
79062 return ret;
79063 }
79064@@ -748,8 +815,14 @@ static void __init do_initcall_level(int level)
79065 level, level,
79066 &repair_env_string);
79067
79068- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
79069+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
79070 do_one_initcall(*fn);
79071+
79072+#ifdef LATENT_ENTROPY_PLUGIN
79073+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
79074+#endif
79075+
79076+ }
79077 }
79078
79079 static void __init do_initcalls(void)
79080@@ -783,8 +856,14 @@ static void __init do_pre_smp_initcalls(void)
79081 {
79082 initcall_t *fn;
79083
79084- for (fn = __initcall_start; fn < __initcall0_start; fn++)
79085+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
79086 do_one_initcall(*fn);
79087+
79088+#ifdef LATENT_ENTROPY_PLUGIN
79089+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
79090+#endif
79091+
79092+ }
79093 }
79094
79095 /*
79096@@ -802,8 +881,8 @@ static int run_init_process(const char *init_filename)
79097 {
79098 argv_init[0] = init_filename;
79099 return do_execve(init_filename,
79100- (const char __user *const __user *)argv_init,
79101- (const char __user *const __user *)envp_init);
79102+ (const char __user *const __force_user *)argv_init,
79103+ (const char __user *const __force_user *)envp_init);
79104 }
79105
79106 static noinline void __init kernel_init_freeable(void);
79107@@ -880,7 +959,7 @@ static noinline void __init kernel_init_freeable(void)
79108 do_basic_setup();
79109
79110 /* Open the /dev/console on the rootfs, this should never fail */
79111- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
79112+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
79113 pr_err("Warning: unable to open an initial console.\n");
79114
79115 (void) sys_dup(0);
79116@@ -893,11 +972,13 @@ static noinline void __init kernel_init_freeable(void)
79117 if (!ramdisk_execute_command)
79118 ramdisk_execute_command = "/init";
79119
79120- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
79121+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
79122 ramdisk_execute_command = NULL;
79123 prepare_namespace();
79124 }
79125
79126+ grsecurity_init();
79127+
79128 /*
79129 * Ok, we have completed the initial bootup, and
79130 * we're essentially up and running. Get rid of the
79131diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
79132index 130dfec..cc88451 100644
79133--- a/ipc/ipc_sysctl.c
79134+++ b/ipc/ipc_sysctl.c
79135@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
79136 static int proc_ipc_dointvec(ctl_table *table, int write,
79137 void __user *buffer, size_t *lenp, loff_t *ppos)
79138 {
79139- struct ctl_table ipc_table;
79140+ ctl_table_no_const ipc_table;
79141
79142 memcpy(&ipc_table, table, sizeof(ipc_table));
79143 ipc_table.data = get_ipc(table);
79144@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
79145 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
79146 void __user *buffer, size_t *lenp, loff_t *ppos)
79147 {
79148- struct ctl_table ipc_table;
79149+ ctl_table_no_const ipc_table;
79150
79151 memcpy(&ipc_table, table, sizeof(ipc_table));
79152 ipc_table.data = get_ipc(table);
79153@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
79154 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
79155 void __user *buffer, size_t *lenp, loff_t *ppos)
79156 {
79157- struct ctl_table ipc_table;
79158+ ctl_table_no_const ipc_table;
79159 size_t lenp_bef = *lenp;
79160 int rc;
79161
79162@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
79163 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
79164 void __user *buffer, size_t *lenp, loff_t *ppos)
79165 {
79166- struct ctl_table ipc_table;
79167+ ctl_table_no_const ipc_table;
79168 memcpy(&ipc_table, table, sizeof(ipc_table));
79169 ipc_table.data = get_ipc(table);
79170
79171@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
79172 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
79173 void __user *buffer, size_t *lenp, loff_t *ppos)
79174 {
79175- struct ctl_table ipc_table;
79176+ ctl_table_no_const ipc_table;
79177 size_t lenp_bef = *lenp;
79178 int oldval;
79179 int rc;
79180diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
79181index 383d638..943fdbb 100644
79182--- a/ipc/mq_sysctl.c
79183+++ b/ipc/mq_sysctl.c
79184@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
79185 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
79186 void __user *buffer, size_t *lenp, loff_t *ppos)
79187 {
79188- struct ctl_table mq_table;
79189+ ctl_table_no_const mq_table;
79190 memcpy(&mq_table, table, sizeof(mq_table));
79191 mq_table.data = get_mq(table);
79192
79193diff --git a/ipc/mqueue.c b/ipc/mqueue.c
79194index e4e47f6..a85e0ad 100644
79195--- a/ipc/mqueue.c
79196+++ b/ipc/mqueue.c
79197@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
79198 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
79199 info->attr.mq_msgsize);
79200
79201+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
79202 spin_lock(&mq_lock);
79203 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
79204 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
79205diff --git a/ipc/msg.c b/ipc/msg.c
79206index d0c6d96..69a893c 100644
79207--- a/ipc/msg.c
79208+++ b/ipc/msg.c
79209@@ -296,18 +296,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
79210 return security_msg_queue_associate(msq, msgflg);
79211 }
79212
79213+static struct ipc_ops msg_ops = {
79214+ .getnew = newque,
79215+ .associate = msg_security,
79216+ .more_checks = NULL
79217+};
79218+
79219 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
79220 {
79221 struct ipc_namespace *ns;
79222- struct ipc_ops msg_ops;
79223 struct ipc_params msg_params;
79224
79225 ns = current->nsproxy->ipc_ns;
79226
79227- msg_ops.getnew = newque;
79228- msg_ops.associate = msg_security;
79229- msg_ops.more_checks = NULL;
79230-
79231 msg_params.key = key;
79232 msg_params.flg = msgflg;
79233
79234diff --git a/ipc/sem.c b/ipc/sem.c
79235index 70480a3..f4e8262 100644
79236--- a/ipc/sem.c
79237+++ b/ipc/sem.c
79238@@ -460,10 +460,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
79239 return 0;
79240 }
79241
79242+static struct ipc_ops sem_ops = {
79243+ .getnew = newary,
79244+ .associate = sem_security,
79245+ .more_checks = sem_more_checks
79246+};
79247+
79248 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
79249 {
79250 struct ipc_namespace *ns;
79251- struct ipc_ops sem_ops;
79252 struct ipc_params sem_params;
79253
79254 ns = current->nsproxy->ipc_ns;
79255@@ -471,10 +476,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
79256 if (nsems < 0 || nsems > ns->sc_semmsl)
79257 return -EINVAL;
79258
79259- sem_ops.getnew = newary;
79260- sem_ops.associate = sem_security;
79261- sem_ops.more_checks = sem_more_checks;
79262-
79263 sem_params.key = key;
79264 sem_params.flg = semflg;
79265 sem_params.u.nsems = nsems;
79266diff --git a/ipc/shm.c b/ipc/shm.c
79267index 7e199fa..180a1ca 100644
79268--- a/ipc/shm.c
79269+++ b/ipc/shm.c
79270@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
79271 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
79272 #endif
79273
79274+#ifdef CONFIG_GRKERNSEC
79275+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
79276+ const time_t shm_createtime, const kuid_t cuid,
79277+ const int shmid);
79278+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
79279+ const time_t shm_createtime);
79280+#endif
79281+
79282 void shm_init_ns(struct ipc_namespace *ns)
79283 {
79284 ns->shm_ctlmax = SHMMAX;
79285@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
79286 shp->shm_lprid = 0;
79287 shp->shm_atim = shp->shm_dtim = 0;
79288 shp->shm_ctim = get_seconds();
79289+#ifdef CONFIG_GRKERNSEC
79290+ {
79291+ struct timespec timeval;
79292+ do_posix_clock_monotonic_gettime(&timeval);
79293+
79294+ shp->shm_createtime = timeval.tv_sec;
79295+ }
79296+#endif
79297 shp->shm_segsz = size;
79298 shp->shm_nattch = 0;
79299 shp->shm_file = file;
79300@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
79301 return 0;
79302 }
79303
79304+static struct ipc_ops shm_ops = {
79305+ .getnew = newseg,
79306+ .associate = shm_security,
79307+ .more_checks = shm_more_checks
79308+};
79309+
79310 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
79311 {
79312 struct ipc_namespace *ns;
79313- struct ipc_ops shm_ops;
79314 struct ipc_params shm_params;
79315
79316 ns = current->nsproxy->ipc_ns;
79317
79318- shm_ops.getnew = newseg;
79319- shm_ops.associate = shm_security;
79320- shm_ops.more_checks = shm_more_checks;
79321-
79322 shm_params.key = key;
79323 shm_params.flg = shmflg;
79324 shm_params.u.size = size;
79325@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
79326 f_mode = FMODE_READ | FMODE_WRITE;
79327 }
79328 if (shmflg & SHM_EXEC) {
79329+
79330+#ifdef CONFIG_PAX_MPROTECT
79331+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
79332+ goto out;
79333+#endif
79334+
79335 prot |= PROT_EXEC;
79336 acc_mode |= S_IXUGO;
79337 }
79338@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
79339 if (err)
79340 goto out_unlock;
79341
79342+#ifdef CONFIG_GRKERNSEC
79343+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
79344+ shp->shm_perm.cuid, shmid) ||
79345+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
79346+ err = -EACCES;
79347+ goto out_unlock;
79348+ }
79349+#endif
79350+
79351 path = shp->shm_file->f_path;
79352 path_get(&path);
79353 shp->shm_nattch++;
79354+#ifdef CONFIG_GRKERNSEC
79355+ shp->shm_lapid = current->pid;
79356+#endif
79357 size = i_size_read(path.dentry->d_inode);
79358 shm_unlock(shp);
79359
79360diff --git a/kernel/acct.c b/kernel/acct.c
79361index 8d6e145..33e0b1e 100644
79362--- a/kernel/acct.c
79363+++ b/kernel/acct.c
79364@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
79365 */
79366 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
79367 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
79368- file->f_op->write(file, (char *)&ac,
79369+ file->f_op->write(file, (char __force_user *)&ac,
79370 sizeof(acct_t), &file->f_pos);
79371 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
79372 set_fs(fs);
79373diff --git a/kernel/audit.c b/kernel/audit.c
79374index 91e53d0..d9e3ec4 100644
79375--- a/kernel/audit.c
79376+++ b/kernel/audit.c
79377@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
79378 3) suppressed due to audit_rate_limit
79379 4) suppressed due to audit_backlog_limit
79380 */
79381-static atomic_t audit_lost = ATOMIC_INIT(0);
79382+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
79383
79384 /* The netlink socket. */
79385 static struct sock *audit_sock;
79386@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
79387 unsigned long now;
79388 int print;
79389
79390- atomic_inc(&audit_lost);
79391+ atomic_inc_unchecked(&audit_lost);
79392
79393 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
79394
79395@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
79396 printk(KERN_WARNING
79397 "audit: audit_lost=%d audit_rate_limit=%d "
79398 "audit_backlog_limit=%d\n",
79399- atomic_read(&audit_lost),
79400+ atomic_read_unchecked(&audit_lost),
79401 audit_rate_limit,
79402 audit_backlog_limit);
79403 audit_panic(message);
79404@@ -664,7 +664,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
79405 status_set.pid = audit_pid;
79406 status_set.rate_limit = audit_rate_limit;
79407 status_set.backlog_limit = audit_backlog_limit;
79408- status_set.lost = atomic_read(&audit_lost);
79409+ status_set.lost = atomic_read_unchecked(&audit_lost);
79410 status_set.backlog = skb_queue_len(&audit_skb_queue);
79411 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
79412 &status_set, sizeof(status_set));
79413diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
79414index 6bd4a90..0ee9eff 100644
79415--- a/kernel/auditfilter.c
79416+++ b/kernel/auditfilter.c
79417@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
79418 f->lsm_rule = NULL;
79419
79420 /* Support legacy tests for a valid loginuid */
79421- if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
79422+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
79423 f->type = AUDIT_LOGINUID_SET;
79424 f->val = 0;
79425 }
79426diff --git a/kernel/auditsc.c b/kernel/auditsc.c
79427index 3c8a601..3a416f6 100644
79428--- a/kernel/auditsc.c
79429+++ b/kernel/auditsc.c
79430@@ -1956,7 +1956,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
79431 }
79432
79433 /* global counter which is incremented every time something logs in */
79434-static atomic_t session_id = ATOMIC_INIT(0);
79435+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
79436
79437 /**
79438 * audit_set_loginuid - set current task's audit_context loginuid
79439@@ -1980,7 +1980,7 @@ int audit_set_loginuid(kuid_t loginuid)
79440 return -EPERM;
79441 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
79442
79443- sessionid = atomic_inc_return(&session_id);
79444+ sessionid = atomic_inc_return_unchecked(&session_id);
79445 if (context && context->in_syscall) {
79446 struct audit_buffer *ab;
79447
79448diff --git a/kernel/capability.c b/kernel/capability.c
79449index f6c2ce5..982c0f9 100644
79450--- a/kernel/capability.c
79451+++ b/kernel/capability.c
79452@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
79453 * before modification is attempted and the application
79454 * fails.
79455 */
79456+ if (tocopy > ARRAY_SIZE(kdata))
79457+ return -EFAULT;
79458+
79459 if (copy_to_user(dataptr, kdata, tocopy
79460 * sizeof(struct __user_cap_data_struct))) {
79461 return -EFAULT;
79462@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
79463 int ret;
79464
79465 rcu_read_lock();
79466- ret = security_capable(__task_cred(t), ns, cap);
79467+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
79468+ gr_task_is_capable(t, __task_cred(t), cap);
79469 rcu_read_unlock();
79470
79471- return (ret == 0);
79472+ return ret;
79473 }
79474
79475 /**
79476@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
79477 int ret;
79478
79479 rcu_read_lock();
79480- ret = security_capable_noaudit(__task_cred(t), ns, cap);
79481+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
79482 rcu_read_unlock();
79483
79484- return (ret == 0);
79485+ return ret;
79486 }
79487
79488 /**
79489@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
79490 BUG();
79491 }
79492
79493- if (security_capable(current_cred(), ns, cap) == 0) {
79494+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
79495 current->flags |= PF_SUPERPRIV;
79496 return true;
79497 }
79498@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
79499 }
79500 EXPORT_SYMBOL(ns_capable);
79501
79502+bool ns_capable_nolog(struct user_namespace *ns, int cap)
79503+{
79504+ if (unlikely(!cap_valid(cap))) {
79505+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
79506+ BUG();
79507+ }
79508+
79509+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
79510+ current->flags |= PF_SUPERPRIV;
79511+ return true;
79512+ }
79513+ return false;
79514+}
79515+EXPORT_SYMBOL(ns_capable_nolog);
79516+
79517 /**
79518 * file_ns_capable - Determine if the file's opener had a capability in effect
79519 * @file: The file we want to check
79520@@ -432,6 +451,12 @@ bool capable(int cap)
79521 }
79522 EXPORT_SYMBOL(capable);
79523
79524+bool capable_nolog(int cap)
79525+{
79526+ return ns_capable_nolog(&init_user_ns, cap);
79527+}
79528+EXPORT_SYMBOL(capable_nolog);
79529+
79530 /**
79531 * nsown_capable - Check superior capability to one's own user_ns
79532 * @cap: The capability in question
79533@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
79534
79535 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
79536 }
79537+
79538+bool inode_capable_nolog(const struct inode *inode, int cap)
79539+{
79540+ struct user_namespace *ns = current_user_ns();
79541+
79542+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
79543+}
79544diff --git a/kernel/cgroup.c b/kernel/cgroup.c
79545index 2e9b387..61817b1 100644
79546--- a/kernel/cgroup.c
79547+++ b/kernel/cgroup.c
79548@@ -5398,7 +5398,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
79549 struct css_set *cg = link->cg;
79550 struct task_struct *task;
79551 int count = 0;
79552- seq_printf(seq, "css_set %p\n", cg);
79553+ seq_printf(seq, "css_set %pK\n", cg);
79554 list_for_each_entry(task, &cg->tasks, cg_list) {
79555 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
79556 seq_puts(seq, " ...\n");
79557diff --git a/kernel/compat.c b/kernel/compat.c
79558index 0a09e48..f44f3f0 100644
79559--- a/kernel/compat.c
79560+++ b/kernel/compat.c
79561@@ -13,6 +13,7 @@
79562
79563 #include <linux/linkage.h>
79564 #include <linux/compat.h>
79565+#include <linux/module.h>
79566 #include <linux/errno.h>
79567 #include <linux/time.h>
79568 #include <linux/signal.h>
79569@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
79570 mm_segment_t oldfs;
79571 long ret;
79572
79573- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
79574+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
79575 oldfs = get_fs();
79576 set_fs(KERNEL_DS);
79577 ret = hrtimer_nanosleep_restart(restart);
79578@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
79579 oldfs = get_fs();
79580 set_fs(KERNEL_DS);
79581 ret = hrtimer_nanosleep(&tu,
79582- rmtp ? (struct timespec __user *)&rmt : NULL,
79583+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
79584 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
79585 set_fs(oldfs);
79586
79587@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
79588 mm_segment_t old_fs = get_fs();
79589
79590 set_fs(KERNEL_DS);
79591- ret = sys_sigpending((old_sigset_t __user *) &s);
79592+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
79593 set_fs(old_fs);
79594 if (ret == 0)
79595 ret = put_user(s, set);
79596@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
79597 mm_segment_t old_fs = get_fs();
79598
79599 set_fs(KERNEL_DS);
79600- ret = sys_old_getrlimit(resource, &r);
79601+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
79602 set_fs(old_fs);
79603
79604 if (!ret) {
79605@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
79606 set_fs (KERNEL_DS);
79607 ret = sys_wait4(pid,
79608 (stat_addr ?
79609- (unsigned int __user *) &status : NULL),
79610- options, (struct rusage __user *) &r);
79611+ (unsigned int __force_user *) &status : NULL),
79612+ options, (struct rusage __force_user *) &r);
79613 set_fs (old_fs);
79614
79615 if (ret > 0) {
79616@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
79617 memset(&info, 0, sizeof(info));
79618
79619 set_fs(KERNEL_DS);
79620- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
79621- uru ? (struct rusage __user *)&ru : NULL);
79622+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
79623+ uru ? (struct rusage __force_user *)&ru : NULL);
79624 set_fs(old_fs);
79625
79626 if ((ret < 0) || (info.si_signo == 0))
79627@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
79628 oldfs = get_fs();
79629 set_fs(KERNEL_DS);
79630 err = sys_timer_settime(timer_id, flags,
79631- (struct itimerspec __user *) &newts,
79632- (struct itimerspec __user *) &oldts);
79633+ (struct itimerspec __force_user *) &newts,
79634+ (struct itimerspec __force_user *) &oldts);
79635 set_fs(oldfs);
79636 if (!err && old && put_compat_itimerspec(old, &oldts))
79637 return -EFAULT;
79638@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
79639 oldfs = get_fs();
79640 set_fs(KERNEL_DS);
79641 err = sys_timer_gettime(timer_id,
79642- (struct itimerspec __user *) &ts);
79643+ (struct itimerspec __force_user *) &ts);
79644 set_fs(oldfs);
79645 if (!err && put_compat_itimerspec(setting, &ts))
79646 return -EFAULT;
79647@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
79648 oldfs = get_fs();
79649 set_fs(KERNEL_DS);
79650 err = sys_clock_settime(which_clock,
79651- (struct timespec __user *) &ts);
79652+ (struct timespec __force_user *) &ts);
79653 set_fs(oldfs);
79654 return err;
79655 }
79656@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
79657 oldfs = get_fs();
79658 set_fs(KERNEL_DS);
79659 err = sys_clock_gettime(which_clock,
79660- (struct timespec __user *) &ts);
79661+ (struct timespec __force_user *) &ts);
79662 set_fs(oldfs);
79663 if (!err && put_compat_timespec(&ts, tp))
79664 return -EFAULT;
79665@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
79666
79667 oldfs = get_fs();
79668 set_fs(KERNEL_DS);
79669- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
79670+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
79671 set_fs(oldfs);
79672
79673 err = compat_put_timex(utp, &txc);
79674@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
79675 oldfs = get_fs();
79676 set_fs(KERNEL_DS);
79677 err = sys_clock_getres(which_clock,
79678- (struct timespec __user *) &ts);
79679+ (struct timespec __force_user *) &ts);
79680 set_fs(oldfs);
79681 if (!err && tp && put_compat_timespec(&ts, tp))
79682 return -EFAULT;
79683@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
79684 long err;
79685 mm_segment_t oldfs;
79686 struct timespec tu;
79687- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
79688+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
79689
79690- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
79691+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
79692 oldfs = get_fs();
79693 set_fs(KERNEL_DS);
79694 err = clock_nanosleep_restart(restart);
79695@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
79696 oldfs = get_fs();
79697 set_fs(KERNEL_DS);
79698 err = sys_clock_nanosleep(which_clock, flags,
79699- (struct timespec __user *) &in,
79700- (struct timespec __user *) &out);
79701+ (struct timespec __force_user *) &in,
79702+ (struct timespec __force_user *) &out);
79703 set_fs(oldfs);
79704
79705 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
79706diff --git a/kernel/configs.c b/kernel/configs.c
79707index c18b1f1..b9a0132 100644
79708--- a/kernel/configs.c
79709+++ b/kernel/configs.c
79710@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
79711 struct proc_dir_entry *entry;
79712
79713 /* create the current config file */
79714+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
79715+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
79716+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
79717+ &ikconfig_file_ops);
79718+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79719+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
79720+ &ikconfig_file_ops);
79721+#endif
79722+#else
79723 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
79724 &ikconfig_file_ops);
79725+#endif
79726+
79727 if (!entry)
79728 return -ENOMEM;
79729
79730diff --git a/kernel/cred.c b/kernel/cred.c
79731index e0573a4..3874e41 100644
79732--- a/kernel/cred.c
79733+++ b/kernel/cred.c
79734@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
79735 validate_creds(cred);
79736 alter_cred_subscribers(cred, -1);
79737 put_cred(cred);
79738+
79739+#ifdef CONFIG_GRKERNSEC_SETXID
79740+ cred = (struct cred *) tsk->delayed_cred;
79741+ if (cred != NULL) {
79742+ tsk->delayed_cred = NULL;
79743+ validate_creds(cred);
79744+ alter_cred_subscribers(cred, -1);
79745+ put_cred(cred);
79746+ }
79747+#endif
79748 }
79749
79750 /**
79751@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
79752 * Always returns 0 thus allowing this function to be tail-called at the end
79753 * of, say, sys_setgid().
79754 */
79755-int commit_creds(struct cred *new)
79756+static int __commit_creds(struct cred *new)
79757 {
79758 struct task_struct *task = current;
79759 const struct cred *old = task->real_cred;
79760@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
79761
79762 get_cred(new); /* we will require a ref for the subj creds too */
79763
79764+ gr_set_role_label(task, new->uid, new->gid);
79765+
79766 /* dumpability changes */
79767 if (!uid_eq(old->euid, new->euid) ||
79768 !gid_eq(old->egid, new->egid) ||
79769@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
79770 put_cred(old);
79771 return 0;
79772 }
79773+#ifdef CONFIG_GRKERNSEC_SETXID
79774+extern int set_user(struct cred *new);
79775+
79776+void gr_delayed_cred_worker(void)
79777+{
79778+ const struct cred *new = current->delayed_cred;
79779+ struct cred *ncred;
79780+
79781+ current->delayed_cred = NULL;
79782+
79783+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
79784+ // from doing get_cred on it when queueing this
79785+ put_cred(new);
79786+ return;
79787+ } else if (new == NULL)
79788+ return;
79789+
79790+ ncred = prepare_creds();
79791+ if (!ncred)
79792+ goto die;
79793+ // uids
79794+ ncred->uid = new->uid;
79795+ ncred->euid = new->euid;
79796+ ncred->suid = new->suid;
79797+ ncred->fsuid = new->fsuid;
79798+ // gids
79799+ ncred->gid = new->gid;
79800+ ncred->egid = new->egid;
79801+ ncred->sgid = new->sgid;
79802+ ncred->fsgid = new->fsgid;
79803+ // groups
79804+ if (set_groups(ncred, new->group_info) < 0) {
79805+ abort_creds(ncred);
79806+ goto die;
79807+ }
79808+ // caps
79809+ ncred->securebits = new->securebits;
79810+ ncred->cap_inheritable = new->cap_inheritable;
79811+ ncred->cap_permitted = new->cap_permitted;
79812+ ncred->cap_effective = new->cap_effective;
79813+ ncred->cap_bset = new->cap_bset;
79814+
79815+ if (set_user(ncred)) {
79816+ abort_creds(ncred);
79817+ goto die;
79818+ }
79819+
79820+ // from doing get_cred on it when queueing this
79821+ put_cred(new);
79822+
79823+ __commit_creds(ncred);
79824+ return;
79825+die:
79826+ // from doing get_cred on it when queueing this
79827+ put_cred(new);
79828+ do_group_exit(SIGKILL);
79829+}
79830+#endif
79831+
79832+int commit_creds(struct cred *new)
79833+{
79834+#ifdef CONFIG_GRKERNSEC_SETXID
79835+ int ret;
79836+ int schedule_it = 0;
79837+ struct task_struct *t;
79838+
79839+ /* we won't get called with tasklist_lock held for writing
79840+ and interrupts disabled as the cred struct in that case is
79841+ init_cred
79842+ */
79843+ if (grsec_enable_setxid && !current_is_single_threaded() &&
79844+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
79845+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
79846+ schedule_it = 1;
79847+ }
79848+ ret = __commit_creds(new);
79849+ if (schedule_it) {
79850+ rcu_read_lock();
79851+ read_lock(&tasklist_lock);
79852+ for (t = next_thread(current); t != current;
79853+ t = next_thread(t)) {
79854+ if (t->delayed_cred == NULL) {
79855+ t->delayed_cred = get_cred(new);
79856+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
79857+ set_tsk_need_resched(t);
79858+ }
79859+ }
79860+ read_unlock(&tasklist_lock);
79861+ rcu_read_unlock();
79862+ }
79863+ return ret;
79864+#else
79865+ return __commit_creds(new);
79866+#endif
79867+}
79868+
79869 EXPORT_SYMBOL(commit_creds);
79870
79871 /**
79872diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
79873index 0506d44..2c20034 100644
79874--- a/kernel/debug/debug_core.c
79875+++ b/kernel/debug/debug_core.c
79876@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
79877 */
79878 static atomic_t masters_in_kgdb;
79879 static atomic_t slaves_in_kgdb;
79880-static atomic_t kgdb_break_tasklet_var;
79881+static atomic_unchecked_t kgdb_break_tasklet_var;
79882 atomic_t kgdb_setting_breakpoint;
79883
79884 struct task_struct *kgdb_usethread;
79885@@ -133,7 +133,7 @@ int kgdb_single_step;
79886 static pid_t kgdb_sstep_pid;
79887
79888 /* to keep track of the CPU which is doing the single stepping*/
79889-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
79890+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
79891
79892 /*
79893 * If you are debugging a problem where roundup (the collection of
79894@@ -541,7 +541,7 @@ return_normal:
79895 * kernel will only try for the value of sstep_tries before
79896 * giving up and continuing on.
79897 */
79898- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
79899+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
79900 (kgdb_info[cpu].task &&
79901 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
79902 atomic_set(&kgdb_active, -1);
79903@@ -635,8 +635,8 @@ cpu_master_loop:
79904 }
79905
79906 kgdb_restore:
79907- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
79908- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
79909+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
79910+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
79911 if (kgdb_info[sstep_cpu].task)
79912 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
79913 else
79914@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
79915 static void kgdb_tasklet_bpt(unsigned long ing)
79916 {
79917 kgdb_breakpoint();
79918- atomic_set(&kgdb_break_tasklet_var, 0);
79919+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
79920 }
79921
79922 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
79923
79924 void kgdb_schedule_breakpoint(void)
79925 {
79926- if (atomic_read(&kgdb_break_tasklet_var) ||
79927+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
79928 atomic_read(&kgdb_active) != -1 ||
79929 atomic_read(&kgdb_setting_breakpoint))
79930 return;
79931- atomic_inc(&kgdb_break_tasklet_var);
79932+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
79933 tasklet_schedule(&kgdb_tasklet_breakpoint);
79934 }
79935 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
79936diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
79937index 00eb8f7..d7e3244 100644
79938--- a/kernel/debug/kdb/kdb_main.c
79939+++ b/kernel/debug/kdb/kdb_main.c
79940@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
79941 continue;
79942
79943 kdb_printf("%-20s%8u 0x%p ", mod->name,
79944- mod->core_size, (void *)mod);
79945+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
79946 #ifdef CONFIG_MODULE_UNLOAD
79947 kdb_printf("%4ld ", module_refcount(mod));
79948 #endif
79949@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
79950 kdb_printf(" (Loading)");
79951 else
79952 kdb_printf(" (Live)");
79953- kdb_printf(" 0x%p", mod->module_core);
79954+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
79955
79956 #ifdef CONFIG_MODULE_UNLOAD
79957 {
79958diff --git a/kernel/events/core.c b/kernel/events/core.c
79959index e76e495..cbfe63a 100644
79960--- a/kernel/events/core.c
79961+++ b/kernel/events/core.c
79962@@ -156,8 +156,15 @@ static struct srcu_struct pmus_srcu;
79963 * 0 - disallow raw tracepoint access for unpriv
79964 * 1 - disallow cpu events for unpriv
79965 * 2 - disallow kernel profiling for unpriv
79966+ * 3 - disallow all unpriv perf event use
79967 */
79968-int sysctl_perf_event_paranoid __read_mostly = 1;
79969+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
79970+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
79971+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
79972+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
79973+#else
79974+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
79975+#endif
79976
79977 /* Minimum for 512 kiB + 1 user control page */
79978 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
79979@@ -184,7 +191,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
79980 return 0;
79981 }
79982
79983-static atomic64_t perf_event_id;
79984+static atomic64_unchecked_t perf_event_id;
79985
79986 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
79987 enum event_type_t event_type);
79988@@ -2747,7 +2754,7 @@ static void __perf_event_read(void *info)
79989
79990 static inline u64 perf_event_count(struct perf_event *event)
79991 {
79992- return local64_read(&event->count) + atomic64_read(&event->child_count);
79993+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
79994 }
79995
79996 static u64 perf_event_read(struct perf_event *event)
79997@@ -3093,9 +3100,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
79998 mutex_lock(&event->child_mutex);
79999 total += perf_event_read(event);
80000 *enabled += event->total_time_enabled +
80001- atomic64_read(&event->child_total_time_enabled);
80002+ atomic64_read_unchecked(&event->child_total_time_enabled);
80003 *running += event->total_time_running +
80004- atomic64_read(&event->child_total_time_running);
80005+ atomic64_read_unchecked(&event->child_total_time_running);
80006
80007 list_for_each_entry(child, &event->child_list, child_list) {
80008 total += perf_event_read(child);
80009@@ -3481,10 +3488,10 @@ void perf_event_update_userpage(struct perf_event *event)
80010 userpg->offset -= local64_read(&event->hw.prev_count);
80011
80012 userpg->time_enabled = enabled +
80013- atomic64_read(&event->child_total_time_enabled);
80014+ atomic64_read_unchecked(&event->child_total_time_enabled);
80015
80016 userpg->time_running = running +
80017- atomic64_read(&event->child_total_time_running);
80018+ atomic64_read_unchecked(&event->child_total_time_running);
80019
80020 arch_perf_update_userpage(userpg, now);
80021
80022@@ -4034,7 +4041,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
80023
80024 /* Data. */
80025 sp = perf_user_stack_pointer(regs);
80026- rem = __output_copy_user(handle, (void *) sp, dump_size);
80027+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
80028 dyn_size = dump_size - rem;
80029
80030 perf_output_skip(handle, rem);
80031@@ -4122,11 +4129,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
80032 values[n++] = perf_event_count(event);
80033 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
80034 values[n++] = enabled +
80035- atomic64_read(&event->child_total_time_enabled);
80036+ atomic64_read_unchecked(&event->child_total_time_enabled);
80037 }
80038 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
80039 values[n++] = running +
80040- atomic64_read(&event->child_total_time_running);
80041+ atomic64_read_unchecked(&event->child_total_time_running);
80042 }
80043 if (read_format & PERF_FORMAT_ID)
80044 values[n++] = primary_event_id(event);
80045@@ -4835,12 +4842,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
80046 * need to add enough zero bytes after the string to handle
80047 * the 64bit alignment we do later.
80048 */
80049- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
80050+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
80051 if (!buf) {
80052 name = strncpy(tmp, "//enomem", sizeof(tmp));
80053 goto got_name;
80054 }
80055- name = d_path(&file->f_path, buf, PATH_MAX);
80056+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
80057 if (IS_ERR(name)) {
80058 name = strncpy(tmp, "//toolong", sizeof(tmp));
80059 goto got_name;
80060@@ -6262,7 +6269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
80061 event->parent = parent_event;
80062
80063 event->ns = get_pid_ns(task_active_pid_ns(current));
80064- event->id = atomic64_inc_return(&perf_event_id);
80065+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
80066
80067 event->state = PERF_EVENT_STATE_INACTIVE;
80068
80069@@ -6572,6 +6579,11 @@ SYSCALL_DEFINE5(perf_event_open,
80070 if (flags & ~PERF_FLAG_ALL)
80071 return -EINVAL;
80072
80073+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
80074+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
80075+ return -EACCES;
80076+#endif
80077+
80078 err = perf_copy_attr(attr_uptr, &attr);
80079 if (err)
80080 return err;
80081@@ -6904,10 +6916,10 @@ static void sync_child_event(struct perf_event *child_event,
80082 /*
80083 * Add back the child's count to the parent's count:
80084 */
80085- atomic64_add(child_val, &parent_event->child_count);
80086- atomic64_add(child_event->total_time_enabled,
80087+ atomic64_add_unchecked(child_val, &parent_event->child_count);
80088+ atomic64_add_unchecked(child_event->total_time_enabled,
80089 &parent_event->child_total_time_enabled);
80090- atomic64_add(child_event->total_time_running,
80091+ atomic64_add_unchecked(child_event->total_time_running,
80092 &parent_event->child_total_time_running);
80093
80094 /*
80095diff --git a/kernel/events/internal.h b/kernel/events/internal.h
80096index ca65997..60df03d 100644
80097--- a/kernel/events/internal.h
80098+++ b/kernel/events/internal.h
80099@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
80100 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
80101 }
80102
80103-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
80104-static inline unsigned int \
80105+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
80106+static inline unsigned long \
80107 func_name(struct perf_output_handle *handle, \
80108- const void *buf, unsigned int len) \
80109+ const void user *buf, unsigned long len) \
80110 { \
80111 unsigned long size, written; \
80112 \
80113@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
80114 return n;
80115 }
80116
80117-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
80118+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
80119
80120 #define MEMCPY_SKIP(dst, src, n) (n)
80121
80122-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
80123+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
80124
80125 #ifndef arch_perf_out_copy_user
80126 #define arch_perf_out_copy_user __copy_from_user_inatomic
80127 #endif
80128
80129-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
80130+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
80131
80132 /* Callchain handling */
80133 extern struct perf_callchain_entry *
80134diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
80135index f356974..cb8c570 100644
80136--- a/kernel/events/uprobes.c
80137+++ b/kernel/events/uprobes.c
80138@@ -1556,7 +1556,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
80139 {
80140 struct page *page;
80141 uprobe_opcode_t opcode;
80142- int result;
80143+ long result;
80144
80145 pagefault_disable();
80146 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
80147diff --git a/kernel/exit.c b/kernel/exit.c
80148index 7bb73f9..d7978ed 100644
80149--- a/kernel/exit.c
80150+++ b/kernel/exit.c
80151@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
80152 struct task_struct *leader;
80153 int zap_leader;
80154 repeat:
80155+#ifdef CONFIG_NET
80156+ gr_del_task_from_ip_table(p);
80157+#endif
80158+
80159 /* don't need to get the RCU readlock here - the process is dead and
80160 * can't be modifying its own credentials. But shut RCU-lockdep up */
80161 rcu_read_lock();
80162@@ -340,7 +344,7 @@ int allow_signal(int sig)
80163 * know it'll be handled, so that they don't get converted to
80164 * SIGKILL or just silently dropped.
80165 */
80166- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
80167+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
80168 recalc_sigpending();
80169 spin_unlock_irq(&current->sighand->siglock);
80170 return 0;
80171@@ -709,6 +713,8 @@ void do_exit(long code)
80172 struct task_struct *tsk = current;
80173 int group_dead;
80174
80175+ set_fs(USER_DS);
80176+
80177 profile_task_exit(tsk);
80178
80179 WARN_ON(blk_needs_flush_plug(tsk));
80180@@ -725,7 +731,6 @@ void do_exit(long code)
80181 * mm_release()->clear_child_tid() from writing to a user-controlled
80182 * kernel address.
80183 */
80184- set_fs(USER_DS);
80185
80186 ptrace_event(PTRACE_EVENT_EXIT, code);
80187
80188@@ -784,6 +789,9 @@ void do_exit(long code)
80189 tsk->exit_code = code;
80190 taskstats_exit(tsk, group_dead);
80191
80192+ gr_acl_handle_psacct(tsk, code);
80193+ gr_acl_handle_exit();
80194+
80195 exit_mm(tsk);
80196
80197 if (group_dead)
80198@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
80199 * Take down every thread in the group. This is called by fatal signals
80200 * as well as by sys_exit_group (below).
80201 */
80202-void
80203+__noreturn void
80204 do_group_exit(int exit_code)
80205 {
80206 struct signal_struct *sig = current->signal;
80207diff --git a/kernel/fork.c b/kernel/fork.c
80208index ffbc090..08ceeee 100644
80209--- a/kernel/fork.c
80210+++ b/kernel/fork.c
80211@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
80212 *stackend = STACK_END_MAGIC; /* for overflow detection */
80213
80214 #ifdef CONFIG_CC_STACKPROTECTOR
80215- tsk->stack_canary = get_random_int();
80216+ tsk->stack_canary = pax_get_random_long();
80217 #endif
80218
80219 /*
80220@@ -345,13 +345,81 @@ free_tsk:
80221 }
80222
80223 #ifdef CONFIG_MMU
80224+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
80225+{
80226+ struct vm_area_struct *tmp;
80227+ unsigned long charge;
80228+ struct mempolicy *pol;
80229+ struct file *file;
80230+
80231+ charge = 0;
80232+ if (mpnt->vm_flags & VM_ACCOUNT) {
80233+ unsigned long len = vma_pages(mpnt);
80234+
80235+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
80236+ goto fail_nomem;
80237+ charge = len;
80238+ }
80239+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80240+ if (!tmp)
80241+ goto fail_nomem;
80242+ *tmp = *mpnt;
80243+ tmp->vm_mm = mm;
80244+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
80245+ pol = mpol_dup(vma_policy(mpnt));
80246+ if (IS_ERR(pol))
80247+ goto fail_nomem_policy;
80248+ vma_set_policy(tmp, pol);
80249+ if (anon_vma_fork(tmp, mpnt))
80250+ goto fail_nomem_anon_vma_fork;
80251+ tmp->vm_flags &= ~VM_LOCKED;
80252+ tmp->vm_next = tmp->vm_prev = NULL;
80253+ tmp->vm_mirror = NULL;
80254+ file = tmp->vm_file;
80255+ if (file) {
80256+ struct inode *inode = file_inode(file);
80257+ struct address_space *mapping = file->f_mapping;
80258+
80259+ get_file(file);
80260+ if (tmp->vm_flags & VM_DENYWRITE)
80261+ atomic_dec(&inode->i_writecount);
80262+ mutex_lock(&mapping->i_mmap_mutex);
80263+ if (tmp->vm_flags & VM_SHARED)
80264+ mapping->i_mmap_writable++;
80265+ flush_dcache_mmap_lock(mapping);
80266+ /* insert tmp into the share list, just after mpnt */
80267+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
80268+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
80269+ else
80270+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
80271+ flush_dcache_mmap_unlock(mapping);
80272+ mutex_unlock(&mapping->i_mmap_mutex);
80273+ }
80274+
80275+ /*
80276+ * Clear hugetlb-related page reserves for children. This only
80277+ * affects MAP_PRIVATE mappings. Faults generated by the child
80278+ * are not guaranteed to succeed, even if read-only
80279+ */
80280+ if (is_vm_hugetlb_page(tmp))
80281+ reset_vma_resv_huge_pages(tmp);
80282+
80283+ return tmp;
80284+
80285+fail_nomem_anon_vma_fork:
80286+ mpol_put(pol);
80287+fail_nomem_policy:
80288+ kmem_cache_free(vm_area_cachep, tmp);
80289+fail_nomem:
80290+ vm_unacct_memory(charge);
80291+ return NULL;
80292+}
80293+
80294 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80295 {
80296 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
80297 struct rb_node **rb_link, *rb_parent;
80298 int retval;
80299- unsigned long charge;
80300- struct mempolicy *pol;
80301
80302 uprobe_start_dup_mmap();
80303 down_write(&oldmm->mmap_sem);
80304@@ -365,8 +433,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80305 mm->locked_vm = 0;
80306 mm->mmap = NULL;
80307 mm->mmap_cache = NULL;
80308- mm->free_area_cache = oldmm->mmap_base;
80309- mm->cached_hole_size = ~0UL;
80310+ mm->free_area_cache = oldmm->free_area_cache;
80311+ mm->cached_hole_size = oldmm->cached_hole_size;
80312 mm->map_count = 0;
80313 cpumask_clear(mm_cpumask(mm));
80314 mm->mm_rb = RB_ROOT;
80315@@ -382,57 +450,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80316
80317 prev = NULL;
80318 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
80319- struct file *file;
80320-
80321 if (mpnt->vm_flags & VM_DONTCOPY) {
80322 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
80323 -vma_pages(mpnt));
80324 continue;
80325 }
80326- charge = 0;
80327- if (mpnt->vm_flags & VM_ACCOUNT) {
80328- unsigned long len = vma_pages(mpnt);
80329-
80330- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
80331- goto fail_nomem;
80332- charge = len;
80333- }
80334- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80335- if (!tmp)
80336- goto fail_nomem;
80337- *tmp = *mpnt;
80338- INIT_LIST_HEAD(&tmp->anon_vma_chain);
80339- pol = mpol_dup(vma_policy(mpnt));
80340- retval = PTR_ERR(pol);
80341- if (IS_ERR(pol))
80342- goto fail_nomem_policy;
80343- vma_set_policy(tmp, pol);
80344- tmp->vm_mm = mm;
80345- if (anon_vma_fork(tmp, mpnt))
80346- goto fail_nomem_anon_vma_fork;
80347- tmp->vm_flags &= ~VM_LOCKED;
80348- tmp->vm_next = tmp->vm_prev = NULL;
80349- file = tmp->vm_file;
80350- if (file) {
80351- struct inode *inode = file_inode(file);
80352- struct address_space *mapping = file->f_mapping;
80353-
80354- get_file(file);
80355- if (tmp->vm_flags & VM_DENYWRITE)
80356- atomic_dec(&inode->i_writecount);
80357- mutex_lock(&mapping->i_mmap_mutex);
80358- if (tmp->vm_flags & VM_SHARED)
80359- mapping->i_mmap_writable++;
80360- flush_dcache_mmap_lock(mapping);
80361- /* insert tmp into the share list, just after mpnt */
80362- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
80363- vma_nonlinear_insert(tmp,
80364- &mapping->i_mmap_nonlinear);
80365- else
80366- vma_interval_tree_insert_after(tmp, mpnt,
80367- &mapping->i_mmap);
80368- flush_dcache_mmap_unlock(mapping);
80369- mutex_unlock(&mapping->i_mmap_mutex);
80370+ tmp = dup_vma(mm, oldmm, mpnt);
80371+ if (!tmp) {
80372+ retval = -ENOMEM;
80373+ goto out;
80374 }
80375
80376 /*
80377@@ -464,6 +490,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
80378 if (retval)
80379 goto out;
80380 }
80381+
80382+#ifdef CONFIG_PAX_SEGMEXEC
80383+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
80384+ struct vm_area_struct *mpnt_m;
80385+
80386+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
80387+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
80388+
80389+ if (!mpnt->vm_mirror)
80390+ continue;
80391+
80392+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
80393+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
80394+ mpnt->vm_mirror = mpnt_m;
80395+ } else {
80396+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
80397+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
80398+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
80399+ mpnt->vm_mirror->vm_mirror = mpnt;
80400+ }
80401+ }
80402+ BUG_ON(mpnt_m);
80403+ }
80404+#endif
80405+
80406 /* a new mm has just been created */
80407 arch_dup_mmap(oldmm, mm);
80408 retval = 0;
80409@@ -473,14 +524,6 @@ out:
80410 up_write(&oldmm->mmap_sem);
80411 uprobe_end_dup_mmap();
80412 return retval;
80413-fail_nomem_anon_vma_fork:
80414- mpol_put(pol);
80415-fail_nomem_policy:
80416- kmem_cache_free(vm_area_cachep, tmp);
80417-fail_nomem:
80418- retval = -ENOMEM;
80419- vm_unacct_memory(charge);
80420- goto out;
80421 }
80422
80423 static inline int mm_alloc_pgd(struct mm_struct *mm)
80424@@ -695,8 +738,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
80425 return ERR_PTR(err);
80426
80427 mm = get_task_mm(task);
80428- if (mm && mm != current->mm &&
80429- !ptrace_may_access(task, mode)) {
80430+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
80431+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
80432 mmput(mm);
80433 mm = ERR_PTR(-EACCES);
80434 }
80435@@ -918,13 +961,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
80436 spin_unlock(&fs->lock);
80437 return -EAGAIN;
80438 }
80439- fs->users++;
80440+ atomic_inc(&fs->users);
80441 spin_unlock(&fs->lock);
80442 return 0;
80443 }
80444 tsk->fs = copy_fs_struct(fs);
80445 if (!tsk->fs)
80446 return -ENOMEM;
80447+ /* Carry through gr_chroot_dentry and is_chrooted instead
80448+ of recomputing it here. Already copied when the task struct
80449+ is duplicated. This allows pivot_root to not be treated as
80450+ a chroot
80451+ */
80452+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
80453+
80454 return 0;
80455 }
80456
80457@@ -1197,10 +1247,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
80458 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
80459 #endif
80460 retval = -EAGAIN;
80461+
80462+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
80463+
80464 if (atomic_read(&p->real_cred->user->processes) >=
80465 task_rlimit(p, RLIMIT_NPROC)) {
80466- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
80467- p->real_cred->user != INIT_USER)
80468+ if (p->real_cred->user != INIT_USER &&
80469+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
80470 goto bad_fork_free;
80471 }
80472 current->flags &= ~PF_NPROC_EXCEEDED;
80473@@ -1446,6 +1499,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
80474 goto bad_fork_free_pid;
80475 }
80476
80477+ /* synchronizes with gr_set_acls()
80478+ we need to call this past the point of no return for fork()
80479+ */
80480+ gr_copy_label(p);
80481+
80482 if (clone_flags & CLONE_THREAD) {
80483 current->signal->nr_threads++;
80484 atomic_inc(&current->signal->live);
80485@@ -1529,6 +1587,8 @@ bad_fork_cleanup_count:
80486 bad_fork_free:
80487 free_task(p);
80488 fork_out:
80489+ gr_log_forkfail(retval);
80490+
80491 return ERR_PTR(retval);
80492 }
80493
80494@@ -1613,6 +1673,8 @@ long do_fork(unsigned long clone_flags,
80495 if (clone_flags & CLONE_PARENT_SETTID)
80496 put_user(nr, parent_tidptr);
80497
80498+ gr_handle_brute_check();
80499+
80500 if (clone_flags & CLONE_VFORK) {
80501 p->vfork_done = &vfork;
80502 init_completion(&vfork);
80503@@ -1729,7 +1791,7 @@ void __init proc_caches_init(void)
80504 mm_cachep = kmem_cache_create("mm_struct",
80505 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
80506 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
80507- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
80508+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
80509 mmap_init();
80510 nsproxy_cache_init();
80511 }
80512@@ -1769,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
80513 return 0;
80514
80515 /* don't need lock here; in the worst case we'll do useless copy */
80516- if (fs->users == 1)
80517+ if (atomic_read(&fs->users) == 1)
80518 return 0;
80519
80520 *new_fsp = copy_fs_struct(fs);
80521@@ -1881,7 +1943,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
80522 fs = current->fs;
80523 spin_lock(&fs->lock);
80524 current->fs = new_fs;
80525- if (--fs->users)
80526+ gr_set_chroot_entries(current, &current->fs->root);
80527+ if (atomic_dec_return(&fs->users))
80528 new_fs = NULL;
80529 else
80530 new_fs = fs;
80531diff --git a/kernel/futex.c b/kernel/futex.c
80532index 49dacfb..2ac4526 100644
80533--- a/kernel/futex.c
80534+++ b/kernel/futex.c
80535@@ -54,6 +54,7 @@
80536 #include <linux/mount.h>
80537 #include <linux/pagemap.h>
80538 #include <linux/syscalls.h>
80539+#include <linux/ptrace.h>
80540 #include <linux/signal.h>
80541 #include <linux/export.h>
80542 #include <linux/magic.h>
80543@@ -242,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
80544 struct page *page, *page_head;
80545 int err, ro = 0;
80546
80547+#ifdef CONFIG_PAX_SEGMEXEC
80548+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
80549+ return -EFAULT;
80550+#endif
80551+
80552 /*
80553 * The futex address must be "naturally" aligned.
80554 */
80555@@ -440,7 +446,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
80556
80557 static int get_futex_value_locked(u32 *dest, u32 __user *from)
80558 {
80559- int ret;
80560+ unsigned long ret;
80561
80562 pagefault_disable();
80563 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
80564@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
80565 {
80566 u32 curval;
80567 int i;
80568+ mm_segment_t oldfs;
80569
80570 /*
80571 * This will fail and we want it. Some arch implementations do
80572@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
80573 * implementation, the non-functional ones will return
80574 * -ENOSYS.
80575 */
80576+ oldfs = get_fs();
80577+ set_fs(USER_DS);
80578 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
80579 futex_cmpxchg_enabled = 1;
80580+ set_fs(oldfs);
80581
80582 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
80583 plist_head_init(&futex_queues[i].chain);
80584diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
80585index f9f44fd..29885e4 100644
80586--- a/kernel/futex_compat.c
80587+++ b/kernel/futex_compat.c
80588@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
80589 return 0;
80590 }
80591
80592-static void __user *futex_uaddr(struct robust_list __user *entry,
80593+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
80594 compat_long_t futex_offset)
80595 {
80596 compat_uptr_t base = ptr_to_compat(entry);
80597diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
80598index 9b22d03..6295b62 100644
80599--- a/kernel/gcov/base.c
80600+++ b/kernel/gcov/base.c
80601@@ -102,11 +102,6 @@ void gcov_enable_events(void)
80602 }
80603
80604 #ifdef CONFIG_MODULES
80605-static inline int within(void *addr, void *start, unsigned long size)
80606-{
80607- return ((addr >= start) && (addr < start + size));
80608-}
80609-
80610 /* Update list and generate events when modules are unloaded. */
80611 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
80612 void *data)
80613@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
80614 prev = NULL;
80615 /* Remove entries located in module from linked list. */
80616 for (info = gcov_info_head; info; info = info->next) {
80617- if (within(info, mod->module_core, mod->core_size)) {
80618+ if (within_module_core_rw((unsigned long)info, mod)) {
80619 if (prev)
80620 prev->next = info->next;
80621 else
80622diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
80623index 2288fbd..0f3941f 100644
80624--- a/kernel/hrtimer.c
80625+++ b/kernel/hrtimer.c
80626@@ -1435,7 +1435,7 @@ void hrtimer_peek_ahead_timers(void)
80627 local_irq_restore(flags);
80628 }
80629
80630-static void run_hrtimer_softirq(struct softirq_action *h)
80631+static void run_hrtimer_softirq(void)
80632 {
80633 hrtimer_peek_ahead_timers();
80634 }
80635@@ -1770,7 +1770,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
80636 return NOTIFY_OK;
80637 }
80638
80639-static struct notifier_block __cpuinitdata hrtimers_nb = {
80640+static struct notifier_block hrtimers_nb = {
80641 .notifier_call = hrtimer_cpu_notify,
80642 };
80643
80644diff --git a/kernel/irq_work.c b/kernel/irq_work.c
80645index 55fcce6..0e4cf34 100644
80646--- a/kernel/irq_work.c
80647+++ b/kernel/irq_work.c
80648@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
80649 return NOTIFY_OK;
80650 }
80651
80652-static struct notifier_block cpu_notify;
80653+static struct notifier_block cpu_notify = {
80654+ .notifier_call = irq_work_cpu_notify,
80655+ .priority = 0,
80656+};
80657
80658 static __init int irq_work_init_cpu_notifier(void)
80659 {
80660- cpu_notify.notifier_call = irq_work_cpu_notify;
80661- cpu_notify.priority = 0;
80662 register_cpu_notifier(&cpu_notify);
80663 return 0;
80664 }
80665diff --git a/kernel/jump_label.c b/kernel/jump_label.c
80666index 60f48fa..7f3a770 100644
80667--- a/kernel/jump_label.c
80668+++ b/kernel/jump_label.c
80669@@ -13,6 +13,7 @@
80670 #include <linux/sort.h>
80671 #include <linux/err.h>
80672 #include <linux/static_key.h>
80673+#include <linux/mm.h>
80674
80675 #ifdef HAVE_JUMP_LABEL
80676
80677@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
80678
80679 size = (((unsigned long)stop - (unsigned long)start)
80680 / sizeof(struct jump_entry));
80681+ pax_open_kernel();
80682 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
80683+ pax_close_kernel();
80684 }
80685
80686 static void jump_label_update(struct static_key *key, int enable);
80687@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
80688 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
80689 struct jump_entry *iter;
80690
80691+ pax_open_kernel();
80692 for (iter = iter_start; iter < iter_stop; iter++) {
80693 if (within_module_init(iter->code, mod))
80694 iter->code = 0;
80695 }
80696+ pax_close_kernel();
80697 }
80698
80699 static int
80700diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
80701index 3127ad5..159d880 100644
80702--- a/kernel/kallsyms.c
80703+++ b/kernel/kallsyms.c
80704@@ -11,6 +11,9 @@
80705 * Changed the compression method from stem compression to "table lookup"
80706 * compression (see scripts/kallsyms.c for a more complete description)
80707 */
80708+#ifdef CONFIG_GRKERNSEC_HIDESYM
80709+#define __INCLUDED_BY_HIDESYM 1
80710+#endif
80711 #include <linux/kallsyms.h>
80712 #include <linux/module.h>
80713 #include <linux/init.h>
80714@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
80715
80716 static inline int is_kernel_inittext(unsigned long addr)
80717 {
80718+ if (system_state != SYSTEM_BOOTING)
80719+ return 0;
80720+
80721 if (addr >= (unsigned long)_sinittext
80722 && addr <= (unsigned long)_einittext)
80723 return 1;
80724 return 0;
80725 }
80726
80727+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80728+#ifdef CONFIG_MODULES
80729+static inline int is_module_text(unsigned long addr)
80730+{
80731+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
80732+ return 1;
80733+
80734+ addr = ktla_ktva(addr);
80735+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
80736+}
80737+#else
80738+static inline int is_module_text(unsigned long addr)
80739+{
80740+ return 0;
80741+}
80742+#endif
80743+#endif
80744+
80745 static inline int is_kernel_text(unsigned long addr)
80746 {
80747 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
80748@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
80749
80750 static inline int is_kernel(unsigned long addr)
80751 {
80752+
80753+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80754+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
80755+ return 1;
80756+
80757+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
80758+#else
80759 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
80760+#endif
80761+
80762 return 1;
80763 return in_gate_area_no_mm(addr);
80764 }
80765
80766 static int is_ksym_addr(unsigned long addr)
80767 {
80768+
80769+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80770+ if (is_module_text(addr))
80771+ return 0;
80772+#endif
80773+
80774 if (all_var)
80775 return is_kernel(addr);
80776
80777@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
80778
80779 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
80780 {
80781- iter->name[0] = '\0';
80782 iter->nameoff = get_symbol_offset(new_pos);
80783 iter->pos = new_pos;
80784 }
80785@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
80786 {
80787 struct kallsym_iter *iter = m->private;
80788
80789+#ifdef CONFIG_GRKERNSEC_HIDESYM
80790+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
80791+ return 0;
80792+#endif
80793+
80794 /* Some debugging symbols have no name. Ignore them. */
80795 if (!iter->name[0])
80796 return 0;
80797@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
80798 */
80799 type = iter->exported ? toupper(iter->type) :
80800 tolower(iter->type);
80801+
80802 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
80803 type, iter->name, iter->module_name);
80804 } else
80805@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
80806 struct kallsym_iter *iter;
80807 int ret;
80808
80809- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
80810+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
80811 if (!iter)
80812 return -ENOMEM;
80813 reset_iter(iter, 0);
80814diff --git a/kernel/kcmp.c b/kernel/kcmp.c
80815index e30ac0f..3528cac 100644
80816--- a/kernel/kcmp.c
80817+++ b/kernel/kcmp.c
80818@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
80819 struct task_struct *task1, *task2;
80820 int ret;
80821
80822+#ifdef CONFIG_GRKERNSEC
80823+ return -ENOSYS;
80824+#endif
80825+
80826 rcu_read_lock();
80827
80828 /*
80829diff --git a/kernel/kexec.c b/kernel/kexec.c
80830index 59f7b55..4022f65 100644
80831--- a/kernel/kexec.c
80832+++ b/kernel/kexec.c
80833@@ -1041,7 +1041,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
80834 unsigned long flags)
80835 {
80836 struct compat_kexec_segment in;
80837- struct kexec_segment out, __user *ksegments;
80838+ struct kexec_segment out;
80839+ struct kexec_segment __user *ksegments;
80840 unsigned long i, result;
80841
80842 /* Don't allow clients that don't understand the native
80843diff --git a/kernel/kmod.c b/kernel/kmod.c
80844index 8241906..d625f2c 100644
80845--- a/kernel/kmod.c
80846+++ b/kernel/kmod.c
80847@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
80848 kfree(info->argv);
80849 }
80850
80851-static int call_modprobe(char *module_name, int wait)
80852+static int call_modprobe(char *module_name, char *module_param, int wait)
80853 {
80854 struct subprocess_info *info;
80855 static char *envp[] = {
80856@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
80857 NULL
80858 };
80859
80860- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
80861+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
80862 if (!argv)
80863 goto out;
80864
80865@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
80866 argv[1] = "-q";
80867 argv[2] = "--";
80868 argv[3] = module_name; /* check free_modprobe_argv() */
80869- argv[4] = NULL;
80870+ argv[4] = module_param;
80871+ argv[5] = NULL;
80872
80873 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
80874 NULL, free_modprobe_argv, NULL);
80875@@ -129,9 +130,8 @@ out:
80876 * If module auto-loading support is disabled then this function
80877 * becomes a no-operation.
80878 */
80879-int __request_module(bool wait, const char *fmt, ...)
80880+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
80881 {
80882- va_list args;
80883 char module_name[MODULE_NAME_LEN];
80884 unsigned int max_modprobes;
80885 int ret;
80886@@ -147,9 +147,7 @@ int __request_module(bool wait, const char *fmt, ...)
80887 */
80888 WARN_ON_ONCE(wait && current_is_async());
80889
80890- va_start(args, fmt);
80891- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
80892- va_end(args);
80893+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
80894 if (ret >= MODULE_NAME_LEN)
80895 return -ENAMETOOLONG;
80896
80897@@ -157,6 +155,20 @@ int __request_module(bool wait, const char *fmt, ...)
80898 if (ret)
80899 return ret;
80900
80901+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80902+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
80903+ /* hack to workaround consolekit/udisks stupidity */
80904+ read_lock(&tasklist_lock);
80905+ if (!strcmp(current->comm, "mount") &&
80906+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
80907+ read_unlock(&tasklist_lock);
80908+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
80909+ return -EPERM;
80910+ }
80911+ read_unlock(&tasklist_lock);
80912+ }
80913+#endif
80914+
80915 /* If modprobe needs a service that is in a module, we get a recursive
80916 * loop. Limit the number of running kmod threads to max_threads/2 or
80917 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
80918@@ -185,11 +197,52 @@ int __request_module(bool wait, const char *fmt, ...)
80919
80920 trace_module_request(module_name, wait, _RET_IP_);
80921
80922- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
80923+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
80924
80925 atomic_dec(&kmod_concurrent);
80926 return ret;
80927 }
80928+
80929+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
80930+{
80931+ va_list args;
80932+ int ret;
80933+
80934+ va_start(args, fmt);
80935+ ret = ____request_module(wait, module_param, fmt, args);
80936+ va_end(args);
80937+
80938+ return ret;
80939+}
80940+
80941+int __request_module(bool wait, const char *fmt, ...)
80942+{
80943+ va_list args;
80944+ int ret;
80945+
80946+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80947+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
80948+ char module_param[MODULE_NAME_LEN];
80949+
80950+ memset(module_param, 0, sizeof(module_param));
80951+
80952+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
80953+
80954+ va_start(args, fmt);
80955+ ret = ____request_module(wait, module_param, fmt, args);
80956+ va_end(args);
80957+
80958+ return ret;
80959+ }
80960+#endif
80961+
80962+ va_start(args, fmt);
80963+ ret = ____request_module(wait, NULL, fmt, args);
80964+ va_end(args);
80965+
80966+ return ret;
80967+}
80968+
80969 EXPORT_SYMBOL(__request_module);
80970 #endif /* CONFIG_MODULES */
80971
80972@@ -300,7 +353,7 @@ static int wait_for_helper(void *data)
80973 *
80974 * Thus the __user pointer cast is valid here.
80975 */
80976- sys_wait4(pid, (int __user *)&ret, 0, NULL);
80977+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
80978
80979 /*
80980 * If ret is 0, either ____call_usermodehelper failed and the
80981@@ -651,7 +704,7 @@ EXPORT_SYMBOL(call_usermodehelper);
80982 static int proc_cap_handler(struct ctl_table *table, int write,
80983 void __user *buffer, size_t *lenp, loff_t *ppos)
80984 {
80985- struct ctl_table t;
80986+ ctl_table_no_const t;
80987 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
80988 kernel_cap_t new_cap;
80989 int err, i;
80990diff --git a/kernel/kprobes.c b/kernel/kprobes.c
80991index bddf3b2..233bf40 100644
80992--- a/kernel/kprobes.c
80993+++ b/kernel/kprobes.c
80994@@ -31,6 +31,9 @@
80995 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
80996 * <prasanna@in.ibm.com> added function-return probes.
80997 */
80998+#ifdef CONFIG_GRKERNSEC_HIDESYM
80999+#define __INCLUDED_BY_HIDESYM 1
81000+#endif
81001 #include <linux/kprobes.h>
81002 #include <linux/hash.h>
81003 #include <linux/init.h>
81004@@ -185,7 +188,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
81005 * kernel image and loaded module images reside. This is required
81006 * so x86_64 can correctly handle the %rip-relative fixups.
81007 */
81008- kip->insns = module_alloc(PAGE_SIZE);
81009+ kip->insns = module_alloc_exec(PAGE_SIZE);
81010 if (!kip->insns) {
81011 kfree(kip);
81012 return NULL;
81013@@ -225,7 +228,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
81014 */
81015 if (!list_is_singular(&kip->list)) {
81016 list_del(&kip->list);
81017- module_free(NULL, kip->insns);
81018+ module_free_exec(NULL, kip->insns);
81019 kfree(kip);
81020 }
81021 return 1;
81022@@ -2083,7 +2086,7 @@ static int __init init_kprobes(void)
81023 {
81024 int i, err = 0;
81025 unsigned long offset = 0, size = 0;
81026- char *modname, namebuf[128];
81027+ char *modname, namebuf[KSYM_NAME_LEN];
81028 const char *symbol_name;
81029 void *addr;
81030 struct kprobe_blackpoint *kb;
81031@@ -2168,11 +2171,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
81032 kprobe_type = "k";
81033
81034 if (sym)
81035- seq_printf(pi, "%p %s %s+0x%x %s ",
81036+ seq_printf(pi, "%pK %s %s+0x%x %s ",
81037 p->addr, kprobe_type, sym, offset,
81038 (modname ? modname : " "));
81039 else
81040- seq_printf(pi, "%p %s %p ",
81041+ seq_printf(pi, "%pK %s %pK ",
81042 p->addr, kprobe_type, p->addr);
81043
81044 if (!pp)
81045@@ -2209,7 +2212,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
81046 const char *sym = NULL;
81047 unsigned int i = *(loff_t *) v;
81048 unsigned long offset = 0;
81049- char *modname, namebuf[128];
81050+ char *modname, namebuf[KSYM_NAME_LEN];
81051
81052 head = &kprobe_table[i];
81053 preempt_disable();
81054diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
81055index 6ada93c..dce7d5d 100644
81056--- a/kernel/ksysfs.c
81057+++ b/kernel/ksysfs.c
81058@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
81059 {
81060 if (count+1 > UEVENT_HELPER_PATH_LEN)
81061 return -ENOENT;
81062+ if (!capable(CAP_SYS_ADMIN))
81063+ return -EPERM;
81064 memcpy(uevent_helper, buf, count);
81065 uevent_helper[count] = '\0';
81066 if (count && uevent_helper[count-1] == '\n')
81067@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
81068 return count;
81069 }
81070
81071-static struct bin_attribute notes_attr = {
81072+static bin_attribute_no_const notes_attr __read_only = {
81073 .attr = {
81074 .name = "notes",
81075 .mode = S_IRUGO,
81076diff --git a/kernel/lockdep.c b/kernel/lockdep.c
81077index 1f3186b..bb7dbc6 100644
81078--- a/kernel/lockdep.c
81079+++ b/kernel/lockdep.c
81080@@ -596,6 +596,10 @@ static int static_obj(void *obj)
81081 end = (unsigned long) &_end,
81082 addr = (unsigned long) obj;
81083
81084+#ifdef CONFIG_PAX_KERNEXEC
81085+ start = ktla_ktva(start);
81086+#endif
81087+
81088 /*
81089 * static variable?
81090 */
81091@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
81092 if (!static_obj(lock->key)) {
81093 debug_locks_off();
81094 printk("INFO: trying to register non-static key.\n");
81095+ printk("lock:%pS key:%pS.\n", lock, lock->key);
81096 printk("the code is fine but needs lockdep annotation.\n");
81097 printk("turning off the locking correctness validator.\n");
81098 dump_stack();
81099@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
81100 if (!class)
81101 return 0;
81102 }
81103- atomic_inc((atomic_t *)&class->ops);
81104+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
81105 if (very_verbose(class)) {
81106 printk("\nacquire class [%p] %s", class->key, class->name);
81107 if (class->name_version > 1)
81108diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
81109index b2c71c5..7b88d63 100644
81110--- a/kernel/lockdep_proc.c
81111+++ b/kernel/lockdep_proc.c
81112@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
81113 return 0;
81114 }
81115
81116- seq_printf(m, "%p", class->key);
81117+ seq_printf(m, "%pK", class->key);
81118 #ifdef CONFIG_DEBUG_LOCKDEP
81119 seq_printf(m, " OPS:%8ld", class->ops);
81120 #endif
81121@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
81122
81123 list_for_each_entry(entry, &class->locks_after, entry) {
81124 if (entry->distance == 1) {
81125- seq_printf(m, " -> [%p] ", entry->class->key);
81126+ seq_printf(m, " -> [%pK] ", entry->class->key);
81127 print_name(m, entry->class);
81128 seq_puts(m, "\n");
81129 }
81130@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
81131 if (!class->key)
81132 continue;
81133
81134- seq_printf(m, "[%p] ", class->key);
81135+ seq_printf(m, "[%pK] ", class->key);
81136 print_name(m, class);
81137 seq_puts(m, "\n");
81138 }
81139@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
81140 if (!i)
81141 seq_line(m, '-', 40-namelen, namelen);
81142
81143- snprintf(ip, sizeof(ip), "[<%p>]",
81144+ snprintf(ip, sizeof(ip), "[<%pK>]",
81145 (void *)class->contention_point[i]);
81146 seq_printf(m, "%40s %14lu %29s %pS\n",
81147 name, stats->contention_point[i],
81148@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
81149 if (!i)
81150 seq_line(m, '-', 40-namelen, namelen);
81151
81152- snprintf(ip, sizeof(ip), "[<%p>]",
81153+ snprintf(ip, sizeof(ip), "[<%pK>]",
81154 (void *)class->contending_point[i]);
81155 seq_printf(m, "%40s %14lu %29s %pS\n",
81156 name, stats->contending_point[i],
81157diff --git a/kernel/module.c b/kernel/module.c
81158index fa53db8..6f17200 100644
81159--- a/kernel/module.c
81160+++ b/kernel/module.c
81161@@ -61,6 +61,7 @@
81162 #include <linux/pfn.h>
81163 #include <linux/bsearch.h>
81164 #include <linux/fips.h>
81165+#include <linux/grsecurity.h>
81166 #include <uapi/linux/module.h>
81167 #include "module-internal.h"
81168
81169@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
81170
81171 /* Bounds of module allocation, for speeding __module_address.
81172 * Protected by module_mutex. */
81173-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
81174+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
81175+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
81176
81177 int register_module_notifier(struct notifier_block * nb)
81178 {
81179@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
81180 return true;
81181
81182 list_for_each_entry_rcu(mod, &modules, list) {
81183- struct symsearch arr[] = {
81184+ struct symsearch modarr[] = {
81185 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
81186 NOT_GPL_ONLY, false },
81187 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
81188@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
81189 if (mod->state == MODULE_STATE_UNFORMED)
81190 continue;
81191
81192- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
81193+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
81194 return true;
81195 }
81196 return false;
81197@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
81198 static int percpu_modalloc(struct module *mod,
81199 unsigned long size, unsigned long align)
81200 {
81201- if (align > PAGE_SIZE) {
81202+ if (align-1 >= PAGE_SIZE) {
81203 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
81204 mod->name, align, PAGE_SIZE);
81205 align = PAGE_SIZE;
81206@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
81207 static ssize_t show_coresize(struct module_attribute *mattr,
81208 struct module_kobject *mk, char *buffer)
81209 {
81210- return sprintf(buffer, "%u\n", mk->mod->core_size);
81211+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
81212 }
81213
81214 static struct module_attribute modinfo_coresize =
81215@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
81216 static ssize_t show_initsize(struct module_attribute *mattr,
81217 struct module_kobject *mk, char *buffer)
81218 {
81219- return sprintf(buffer, "%u\n", mk->mod->init_size);
81220+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
81221 }
81222
81223 static struct module_attribute modinfo_initsize =
81224@@ -1313,7 +1315,7 @@ resolve_symbol_wait(struct module *mod,
81225 */
81226 #ifdef CONFIG_SYSFS
81227
81228-#ifdef CONFIG_KALLSYMS
81229+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
81230 static inline bool sect_empty(const Elf_Shdr *sect)
81231 {
81232 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
81233@@ -1453,7 +1455,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
81234 {
81235 unsigned int notes, loaded, i;
81236 struct module_notes_attrs *notes_attrs;
81237- struct bin_attribute *nattr;
81238+ bin_attribute_no_const *nattr;
81239
81240 /* failed to create section attributes, so can't create notes */
81241 if (!mod->sect_attrs)
81242@@ -1565,7 +1567,7 @@ static void del_usage_links(struct module *mod)
81243 static int module_add_modinfo_attrs(struct module *mod)
81244 {
81245 struct module_attribute *attr;
81246- struct module_attribute *temp_attr;
81247+ module_attribute_no_const *temp_attr;
81248 int error = 0;
81249 int i;
81250
81251@@ -1779,21 +1781,21 @@ static void set_section_ro_nx(void *base,
81252
81253 static void unset_module_core_ro_nx(struct module *mod)
81254 {
81255- set_page_attributes(mod->module_core + mod->core_text_size,
81256- mod->module_core + mod->core_size,
81257+ set_page_attributes(mod->module_core_rw,
81258+ mod->module_core_rw + mod->core_size_rw,
81259 set_memory_x);
81260- set_page_attributes(mod->module_core,
81261- mod->module_core + mod->core_ro_size,
81262+ set_page_attributes(mod->module_core_rx,
81263+ mod->module_core_rx + mod->core_size_rx,
81264 set_memory_rw);
81265 }
81266
81267 static void unset_module_init_ro_nx(struct module *mod)
81268 {
81269- set_page_attributes(mod->module_init + mod->init_text_size,
81270- mod->module_init + mod->init_size,
81271+ set_page_attributes(mod->module_init_rw,
81272+ mod->module_init_rw + mod->init_size_rw,
81273 set_memory_x);
81274- set_page_attributes(mod->module_init,
81275- mod->module_init + mod->init_ro_size,
81276+ set_page_attributes(mod->module_init_rx,
81277+ mod->module_init_rx + mod->init_size_rx,
81278 set_memory_rw);
81279 }
81280
81281@@ -1806,14 +1808,14 @@ void set_all_modules_text_rw(void)
81282 list_for_each_entry_rcu(mod, &modules, list) {
81283 if (mod->state == MODULE_STATE_UNFORMED)
81284 continue;
81285- if ((mod->module_core) && (mod->core_text_size)) {
81286- set_page_attributes(mod->module_core,
81287- mod->module_core + mod->core_text_size,
81288+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
81289+ set_page_attributes(mod->module_core_rx,
81290+ mod->module_core_rx + mod->core_size_rx,
81291 set_memory_rw);
81292 }
81293- if ((mod->module_init) && (mod->init_text_size)) {
81294- set_page_attributes(mod->module_init,
81295- mod->module_init + mod->init_text_size,
81296+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
81297+ set_page_attributes(mod->module_init_rx,
81298+ mod->module_init_rx + mod->init_size_rx,
81299 set_memory_rw);
81300 }
81301 }
81302@@ -1829,14 +1831,14 @@ void set_all_modules_text_ro(void)
81303 list_for_each_entry_rcu(mod, &modules, list) {
81304 if (mod->state == MODULE_STATE_UNFORMED)
81305 continue;
81306- if ((mod->module_core) && (mod->core_text_size)) {
81307- set_page_attributes(mod->module_core,
81308- mod->module_core + mod->core_text_size,
81309+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
81310+ set_page_attributes(mod->module_core_rx,
81311+ mod->module_core_rx + mod->core_size_rx,
81312 set_memory_ro);
81313 }
81314- if ((mod->module_init) && (mod->init_text_size)) {
81315- set_page_attributes(mod->module_init,
81316- mod->module_init + mod->init_text_size,
81317+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
81318+ set_page_attributes(mod->module_init_rx,
81319+ mod->module_init_rx + mod->init_size_rx,
81320 set_memory_ro);
81321 }
81322 }
81323@@ -1887,16 +1889,19 @@ static void free_module(struct module *mod)
81324
81325 /* This may be NULL, but that's OK */
81326 unset_module_init_ro_nx(mod);
81327- module_free(mod, mod->module_init);
81328+ module_free(mod, mod->module_init_rw);
81329+ module_free_exec(mod, mod->module_init_rx);
81330 kfree(mod->args);
81331 percpu_modfree(mod);
81332
81333 /* Free lock-classes: */
81334- lockdep_free_key_range(mod->module_core, mod->core_size);
81335+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
81336+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
81337
81338 /* Finally, free the core (containing the module structure) */
81339 unset_module_core_ro_nx(mod);
81340- module_free(mod, mod->module_core);
81341+ module_free_exec(mod, mod->module_core_rx);
81342+ module_free(mod, mod->module_core_rw);
81343
81344 #ifdef CONFIG_MPU
81345 update_protections(current->mm);
81346@@ -1966,9 +1971,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81347 int ret = 0;
81348 const struct kernel_symbol *ksym;
81349
81350+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81351+ int is_fs_load = 0;
81352+ int register_filesystem_found = 0;
81353+ char *p;
81354+
81355+ p = strstr(mod->args, "grsec_modharden_fs");
81356+ if (p) {
81357+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
81358+ /* copy \0 as well */
81359+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
81360+ is_fs_load = 1;
81361+ }
81362+#endif
81363+
81364 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
81365 const char *name = info->strtab + sym[i].st_name;
81366
81367+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81368+ /* it's a real shame this will never get ripped and copied
81369+ upstream! ;(
81370+ */
81371+ if (is_fs_load && !strcmp(name, "register_filesystem"))
81372+ register_filesystem_found = 1;
81373+#endif
81374+
81375 switch (sym[i].st_shndx) {
81376 case SHN_COMMON:
81377 /* We compiled with -fno-common. These are not
81378@@ -1989,7 +2016,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81379 ksym = resolve_symbol_wait(mod, info, name);
81380 /* Ok if resolved. */
81381 if (ksym && !IS_ERR(ksym)) {
81382+ pax_open_kernel();
81383 sym[i].st_value = ksym->value;
81384+ pax_close_kernel();
81385 break;
81386 }
81387
81388@@ -2008,11 +2037,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
81389 secbase = (unsigned long)mod_percpu(mod);
81390 else
81391 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
81392+ pax_open_kernel();
81393 sym[i].st_value += secbase;
81394+ pax_close_kernel();
81395 break;
81396 }
81397 }
81398
81399+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81400+ if (is_fs_load && !register_filesystem_found) {
81401+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
81402+ ret = -EPERM;
81403+ }
81404+#endif
81405+
81406 return ret;
81407 }
81408
81409@@ -2096,22 +2134,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
81410 || s->sh_entsize != ~0UL
81411 || strstarts(sname, ".init"))
81412 continue;
81413- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
81414+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
81415+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
81416+ else
81417+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
81418 pr_debug("\t%s\n", sname);
81419 }
81420- switch (m) {
81421- case 0: /* executable */
81422- mod->core_size = debug_align(mod->core_size);
81423- mod->core_text_size = mod->core_size;
81424- break;
81425- case 1: /* RO: text and ro-data */
81426- mod->core_size = debug_align(mod->core_size);
81427- mod->core_ro_size = mod->core_size;
81428- break;
81429- case 3: /* whole core */
81430- mod->core_size = debug_align(mod->core_size);
81431- break;
81432- }
81433 }
81434
81435 pr_debug("Init section allocation order:\n");
81436@@ -2125,23 +2153,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
81437 || s->sh_entsize != ~0UL
81438 || !strstarts(sname, ".init"))
81439 continue;
81440- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
81441- | INIT_OFFSET_MASK);
81442+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
81443+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
81444+ else
81445+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
81446+ s->sh_entsize |= INIT_OFFSET_MASK;
81447 pr_debug("\t%s\n", sname);
81448 }
81449- switch (m) {
81450- case 0: /* executable */
81451- mod->init_size = debug_align(mod->init_size);
81452- mod->init_text_size = mod->init_size;
81453- break;
81454- case 1: /* RO: text and ro-data */
81455- mod->init_size = debug_align(mod->init_size);
81456- mod->init_ro_size = mod->init_size;
81457- break;
81458- case 3: /* whole init */
81459- mod->init_size = debug_align(mod->init_size);
81460- break;
81461- }
81462 }
81463 }
81464
81465@@ -2314,7 +2332,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
81466
81467 /* Put symbol section at end of init part of module. */
81468 symsect->sh_flags |= SHF_ALLOC;
81469- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
81470+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
81471 info->index.sym) | INIT_OFFSET_MASK;
81472 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
81473
81474@@ -2331,13 +2349,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
81475 }
81476
81477 /* Append room for core symbols at end of core part. */
81478- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
81479- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
81480- mod->core_size += strtab_size;
81481+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
81482+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
81483+ mod->core_size_rx += strtab_size;
81484
81485 /* Put string table section at end of init part of module. */
81486 strsect->sh_flags |= SHF_ALLOC;
81487- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
81488+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
81489 info->index.str) | INIT_OFFSET_MASK;
81490 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
81491 }
81492@@ -2355,12 +2373,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
81493 /* Make sure we get permanent strtab: don't use info->strtab. */
81494 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
81495
81496+ pax_open_kernel();
81497+
81498 /* Set types up while we still have access to sections. */
81499 for (i = 0; i < mod->num_symtab; i++)
81500 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
81501
81502- mod->core_symtab = dst = mod->module_core + info->symoffs;
81503- mod->core_strtab = s = mod->module_core + info->stroffs;
81504+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
81505+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
81506 src = mod->symtab;
81507 for (ndst = i = 0; i < mod->num_symtab; i++) {
81508 if (i == 0 ||
81509@@ -2372,6 +2392,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
81510 }
81511 }
81512 mod->core_num_syms = ndst;
81513+
81514+ pax_close_kernel();
81515 }
81516 #else
81517 static inline void layout_symtab(struct module *mod, struct load_info *info)
81518@@ -2405,17 +2427,33 @@ void * __weak module_alloc(unsigned long size)
81519 return vmalloc_exec(size);
81520 }
81521
81522-static void *module_alloc_update_bounds(unsigned long size)
81523+static void *module_alloc_update_bounds_rw(unsigned long size)
81524 {
81525 void *ret = module_alloc(size);
81526
81527 if (ret) {
81528 mutex_lock(&module_mutex);
81529 /* Update module bounds. */
81530- if ((unsigned long)ret < module_addr_min)
81531- module_addr_min = (unsigned long)ret;
81532- if ((unsigned long)ret + size > module_addr_max)
81533- module_addr_max = (unsigned long)ret + size;
81534+ if ((unsigned long)ret < module_addr_min_rw)
81535+ module_addr_min_rw = (unsigned long)ret;
81536+ if ((unsigned long)ret + size > module_addr_max_rw)
81537+ module_addr_max_rw = (unsigned long)ret + size;
81538+ mutex_unlock(&module_mutex);
81539+ }
81540+ return ret;
81541+}
81542+
81543+static void *module_alloc_update_bounds_rx(unsigned long size)
81544+{
81545+ void *ret = module_alloc_exec(size);
81546+
81547+ if (ret) {
81548+ mutex_lock(&module_mutex);
81549+ /* Update module bounds. */
81550+ if ((unsigned long)ret < module_addr_min_rx)
81551+ module_addr_min_rx = (unsigned long)ret;
81552+ if ((unsigned long)ret + size > module_addr_max_rx)
81553+ module_addr_max_rx = (unsigned long)ret + size;
81554 mutex_unlock(&module_mutex);
81555 }
81556 return ret;
81557@@ -2691,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
81558 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
81559 {
81560 const char *modmagic = get_modinfo(info, "vermagic");
81561+ const char *license = get_modinfo(info, "license");
81562 int err;
81563
81564+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
81565+ if (!license || !license_is_gpl_compatible(license))
81566+ return -ENOEXEC;
81567+#endif
81568+
81569 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
81570 modmagic = NULL;
81571
81572@@ -2718,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
81573 }
81574
81575 /* Set up license info based on the info section */
81576- set_license(mod, get_modinfo(info, "license"));
81577+ set_license(mod, license);
81578
81579 return 0;
81580 }
81581@@ -2799,7 +2843,7 @@ static int move_module(struct module *mod, struct load_info *info)
81582 void *ptr;
81583
81584 /* Do the allocs. */
81585- ptr = module_alloc_update_bounds(mod->core_size);
81586+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
81587 /*
81588 * The pointer to this block is stored in the module structure
81589 * which is inside the block. Just mark it as not being a
81590@@ -2809,11 +2853,11 @@ static int move_module(struct module *mod, struct load_info *info)
81591 if (!ptr)
81592 return -ENOMEM;
81593
81594- memset(ptr, 0, mod->core_size);
81595- mod->module_core = ptr;
81596+ memset(ptr, 0, mod->core_size_rw);
81597+ mod->module_core_rw = ptr;
81598
81599- if (mod->init_size) {
81600- ptr = module_alloc_update_bounds(mod->init_size);
81601+ if (mod->init_size_rw) {
81602+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
81603 /*
81604 * The pointer to this block is stored in the module structure
81605 * which is inside the block. This block doesn't need to be
81606@@ -2822,13 +2866,45 @@ static int move_module(struct module *mod, struct load_info *info)
81607 */
81608 kmemleak_ignore(ptr);
81609 if (!ptr) {
81610- module_free(mod, mod->module_core);
81611+ module_free(mod, mod->module_core_rw);
81612 return -ENOMEM;
81613 }
81614- memset(ptr, 0, mod->init_size);
81615- mod->module_init = ptr;
81616+ memset(ptr, 0, mod->init_size_rw);
81617+ mod->module_init_rw = ptr;
81618 } else
81619- mod->module_init = NULL;
81620+ mod->module_init_rw = NULL;
81621+
81622+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
81623+ kmemleak_not_leak(ptr);
81624+ if (!ptr) {
81625+ if (mod->module_init_rw)
81626+ module_free(mod, mod->module_init_rw);
81627+ module_free(mod, mod->module_core_rw);
81628+ return -ENOMEM;
81629+ }
81630+
81631+ pax_open_kernel();
81632+ memset(ptr, 0, mod->core_size_rx);
81633+ pax_close_kernel();
81634+ mod->module_core_rx = ptr;
81635+
81636+ if (mod->init_size_rx) {
81637+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
81638+ kmemleak_ignore(ptr);
81639+ if (!ptr && mod->init_size_rx) {
81640+ module_free_exec(mod, mod->module_core_rx);
81641+ if (mod->module_init_rw)
81642+ module_free(mod, mod->module_init_rw);
81643+ module_free(mod, mod->module_core_rw);
81644+ return -ENOMEM;
81645+ }
81646+
81647+ pax_open_kernel();
81648+ memset(ptr, 0, mod->init_size_rx);
81649+ pax_close_kernel();
81650+ mod->module_init_rx = ptr;
81651+ } else
81652+ mod->module_init_rx = NULL;
81653
81654 /* Transfer each section which specifies SHF_ALLOC */
81655 pr_debug("final section addresses:\n");
81656@@ -2839,16 +2915,45 @@ static int move_module(struct module *mod, struct load_info *info)
81657 if (!(shdr->sh_flags & SHF_ALLOC))
81658 continue;
81659
81660- if (shdr->sh_entsize & INIT_OFFSET_MASK)
81661- dest = mod->module_init
81662- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81663- else
81664- dest = mod->module_core + shdr->sh_entsize;
81665+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
81666+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
81667+ dest = mod->module_init_rw
81668+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81669+ else
81670+ dest = mod->module_init_rx
81671+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
81672+ } else {
81673+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
81674+ dest = mod->module_core_rw + shdr->sh_entsize;
81675+ else
81676+ dest = mod->module_core_rx + shdr->sh_entsize;
81677+ }
81678+
81679+ if (shdr->sh_type != SHT_NOBITS) {
81680+
81681+#ifdef CONFIG_PAX_KERNEXEC
81682+#ifdef CONFIG_X86_64
81683+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
81684+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
81685+#endif
81686+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
81687+ pax_open_kernel();
81688+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
81689+ pax_close_kernel();
81690+ } else
81691+#endif
81692
81693- if (shdr->sh_type != SHT_NOBITS)
81694 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
81695+ }
81696 /* Update sh_addr to point to copy in image. */
81697- shdr->sh_addr = (unsigned long)dest;
81698+
81699+#ifdef CONFIG_PAX_KERNEXEC
81700+ if (shdr->sh_flags & SHF_EXECINSTR)
81701+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
81702+ else
81703+#endif
81704+
81705+ shdr->sh_addr = (unsigned long)dest;
81706 pr_debug("\t0x%lx %s\n",
81707 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
81708 }
81709@@ -2905,12 +3010,12 @@ static void flush_module_icache(const struct module *mod)
81710 * Do it before processing of module parameters, so the module
81711 * can provide parameter accessor functions of its own.
81712 */
81713- if (mod->module_init)
81714- flush_icache_range((unsigned long)mod->module_init,
81715- (unsigned long)mod->module_init
81716- + mod->init_size);
81717- flush_icache_range((unsigned long)mod->module_core,
81718- (unsigned long)mod->module_core + mod->core_size);
81719+ if (mod->module_init_rx)
81720+ flush_icache_range((unsigned long)mod->module_init_rx,
81721+ (unsigned long)mod->module_init_rx
81722+ + mod->init_size_rx);
81723+ flush_icache_range((unsigned long)mod->module_core_rx,
81724+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
81725
81726 set_fs(old_fs);
81727 }
81728@@ -2977,8 +3082,10 @@ static int alloc_module_percpu(struct module *mod, struct load_info *info)
81729 static void module_deallocate(struct module *mod, struct load_info *info)
81730 {
81731 percpu_modfree(mod);
81732- module_free(mod, mod->module_init);
81733- module_free(mod, mod->module_core);
81734+ module_free_exec(mod, mod->module_init_rx);
81735+ module_free_exec(mod, mod->module_core_rx);
81736+ module_free(mod, mod->module_init_rw);
81737+ module_free(mod, mod->module_core_rw);
81738 }
81739
81740 int __weak module_finalize(const Elf_Ehdr *hdr,
81741@@ -2991,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
81742 static int post_relocation(struct module *mod, const struct load_info *info)
81743 {
81744 /* Sort exception table now relocations are done. */
81745+ pax_open_kernel();
81746 sort_extable(mod->extable, mod->extable + mod->num_exentries);
81747+ pax_close_kernel();
81748
81749 /* Copy relocated percpu area over. */
81750 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
81751@@ -3045,16 +3154,16 @@ static int do_init_module(struct module *mod)
81752 MODULE_STATE_COMING, mod);
81753
81754 /* Set RO and NX regions for core */
81755- set_section_ro_nx(mod->module_core,
81756- mod->core_text_size,
81757- mod->core_ro_size,
81758- mod->core_size);
81759+ set_section_ro_nx(mod->module_core_rx,
81760+ mod->core_size_rx,
81761+ mod->core_size_rx,
81762+ mod->core_size_rx);
81763
81764 /* Set RO and NX regions for init */
81765- set_section_ro_nx(mod->module_init,
81766- mod->init_text_size,
81767- mod->init_ro_size,
81768- mod->init_size);
81769+ set_section_ro_nx(mod->module_init_rx,
81770+ mod->init_size_rx,
81771+ mod->init_size_rx,
81772+ mod->init_size_rx);
81773
81774 do_mod_ctors(mod);
81775 /* Start the module */
81776@@ -3116,11 +3225,12 @@ static int do_init_module(struct module *mod)
81777 mod->strtab = mod->core_strtab;
81778 #endif
81779 unset_module_init_ro_nx(mod);
81780- module_free(mod, mod->module_init);
81781- mod->module_init = NULL;
81782- mod->init_size = 0;
81783- mod->init_ro_size = 0;
81784- mod->init_text_size = 0;
81785+ module_free(mod, mod->module_init_rw);
81786+ module_free_exec(mod, mod->module_init_rx);
81787+ mod->module_init_rw = NULL;
81788+ mod->module_init_rx = NULL;
81789+ mod->init_size_rw = 0;
81790+ mod->init_size_rx = 0;
81791 mutex_unlock(&module_mutex);
81792 wake_up_all(&module_wq);
81793
81794@@ -3252,9 +3362,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
81795 if (err)
81796 goto free_unload;
81797
81798+ /* Now copy in args */
81799+ mod->args = strndup_user(uargs, ~0UL >> 1);
81800+ if (IS_ERR(mod->args)) {
81801+ err = PTR_ERR(mod->args);
81802+ goto free_unload;
81803+ }
81804+
81805 /* Set up MODINFO_ATTR fields */
81806 setup_modinfo(mod, info);
81807
81808+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81809+ {
81810+ char *p, *p2;
81811+
81812+ if (strstr(mod->args, "grsec_modharden_netdev")) {
81813+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
81814+ err = -EPERM;
81815+ goto free_modinfo;
81816+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
81817+ p += sizeof("grsec_modharden_normal") - 1;
81818+ p2 = strstr(p, "_");
81819+ if (p2) {
81820+ *p2 = '\0';
81821+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
81822+ *p2 = '_';
81823+ }
81824+ err = -EPERM;
81825+ goto free_modinfo;
81826+ }
81827+ }
81828+#endif
81829+
81830 /* Fix up syms, so that st_value is a pointer to location. */
81831 err = simplify_symbols(mod, info);
81832 if (err < 0)
81833@@ -3270,13 +3409,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
81834
81835 flush_module_icache(mod);
81836
81837- /* Now copy in args */
81838- mod->args = strndup_user(uargs, ~0UL >> 1);
81839- if (IS_ERR(mod->args)) {
81840- err = PTR_ERR(mod->args);
81841- goto free_arch_cleanup;
81842- }
81843-
81844 dynamic_debug_setup(info->debug, info->num_debug);
81845
81846 /* Finally it's fully formed, ready to start executing. */
81847@@ -3311,11 +3443,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
81848 ddebug_cleanup:
81849 dynamic_debug_remove(info->debug);
81850 synchronize_sched();
81851- kfree(mod->args);
81852- free_arch_cleanup:
81853 module_arch_cleanup(mod);
81854 free_modinfo:
81855 free_modinfo(mod);
81856+ kfree(mod->args);
81857 free_unload:
81858 module_unload_free(mod);
81859 unlink_mod:
81860@@ -3398,10 +3529,16 @@ static const char *get_ksymbol(struct module *mod,
81861 unsigned long nextval;
81862
81863 /* At worse, next value is at end of module */
81864- if (within_module_init(addr, mod))
81865- nextval = (unsigned long)mod->module_init+mod->init_text_size;
81866+ if (within_module_init_rx(addr, mod))
81867+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
81868+ else if (within_module_init_rw(addr, mod))
81869+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
81870+ else if (within_module_core_rx(addr, mod))
81871+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
81872+ else if (within_module_core_rw(addr, mod))
81873+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
81874 else
81875- nextval = (unsigned long)mod->module_core+mod->core_text_size;
81876+ return NULL;
81877
81878 /* Scan for closest preceding symbol, and next symbol. (ELF
81879 starts real symbols at 1). */
81880@@ -3654,7 +3791,7 @@ static int m_show(struct seq_file *m, void *p)
81881 return 0;
81882
81883 seq_printf(m, "%s %u",
81884- mod->name, mod->init_size + mod->core_size);
81885+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
81886 print_unload_info(m, mod);
81887
81888 /* Informative for users. */
81889@@ -3663,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
81890 mod->state == MODULE_STATE_COMING ? "Loading":
81891 "Live");
81892 /* Used by oprofile and other similar tools. */
81893- seq_printf(m, " 0x%pK", mod->module_core);
81894+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
81895
81896 /* Taints info */
81897 if (mod->taints)
81898@@ -3699,7 +3836,17 @@ static const struct file_operations proc_modules_operations = {
81899
81900 static int __init proc_modules_init(void)
81901 {
81902+#ifndef CONFIG_GRKERNSEC_HIDESYM
81903+#ifdef CONFIG_GRKERNSEC_PROC_USER
81904+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
81905+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
81906+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
81907+#else
81908 proc_create("modules", 0, NULL, &proc_modules_operations);
81909+#endif
81910+#else
81911+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
81912+#endif
81913 return 0;
81914 }
81915 module_init(proc_modules_init);
81916@@ -3760,14 +3907,14 @@ struct module *__module_address(unsigned long addr)
81917 {
81918 struct module *mod;
81919
81920- if (addr < module_addr_min || addr > module_addr_max)
81921+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
81922+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
81923 return NULL;
81924
81925 list_for_each_entry_rcu(mod, &modules, list) {
81926 if (mod->state == MODULE_STATE_UNFORMED)
81927 continue;
81928- if (within_module_core(addr, mod)
81929- || within_module_init(addr, mod))
81930+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
81931 return mod;
81932 }
81933 return NULL;
81934@@ -3802,11 +3949,20 @@ bool is_module_text_address(unsigned long addr)
81935 */
81936 struct module *__module_text_address(unsigned long addr)
81937 {
81938- struct module *mod = __module_address(addr);
81939+ struct module *mod;
81940+
81941+#ifdef CONFIG_X86_32
81942+ addr = ktla_ktva(addr);
81943+#endif
81944+
81945+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
81946+ return NULL;
81947+
81948+ mod = __module_address(addr);
81949+
81950 if (mod) {
81951 /* Make sure it's within the text section. */
81952- if (!within(addr, mod->module_init, mod->init_text_size)
81953- && !within(addr, mod->module_core, mod->core_text_size))
81954+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
81955 mod = NULL;
81956 }
81957 return mod;
81958diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
81959index 7e3443f..b2a1e6b 100644
81960--- a/kernel/mutex-debug.c
81961+++ b/kernel/mutex-debug.c
81962@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
81963 }
81964
81965 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
81966- struct thread_info *ti)
81967+ struct task_struct *task)
81968 {
81969 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
81970
81971 /* Mark the current thread as blocked on the lock: */
81972- ti->task->blocked_on = waiter;
81973+ task->blocked_on = waiter;
81974 }
81975
81976 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
81977- struct thread_info *ti)
81978+ struct task_struct *task)
81979 {
81980 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
81981- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
81982- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
81983- ti->task->blocked_on = NULL;
81984+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
81985+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
81986+ task->blocked_on = NULL;
81987
81988 list_del_init(&waiter->list);
81989 waiter->task = NULL;
81990diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
81991index 0799fd3..d06ae3b 100644
81992--- a/kernel/mutex-debug.h
81993+++ b/kernel/mutex-debug.h
81994@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
81995 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
81996 extern void debug_mutex_add_waiter(struct mutex *lock,
81997 struct mutex_waiter *waiter,
81998- struct thread_info *ti);
81999+ struct task_struct *task);
82000 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
82001- struct thread_info *ti);
82002+ struct task_struct *task);
82003 extern void debug_mutex_unlock(struct mutex *lock);
82004 extern void debug_mutex_init(struct mutex *lock, const char *name,
82005 struct lock_class_key *key);
82006diff --git a/kernel/mutex.c b/kernel/mutex.c
82007index ad53a66..f1bf8bc 100644
82008--- a/kernel/mutex.c
82009+++ b/kernel/mutex.c
82010@@ -134,7 +134,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
82011 node->locked = 1;
82012 return;
82013 }
82014- ACCESS_ONCE(prev->next) = node;
82015+ ACCESS_ONCE_RW(prev->next) = node;
82016 smp_wmb();
82017 /* Wait until the lock holder passes the lock down */
82018 while (!ACCESS_ONCE(node->locked))
82019@@ -155,7 +155,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
82020 while (!(next = ACCESS_ONCE(node->next)))
82021 arch_mutex_cpu_relax();
82022 }
82023- ACCESS_ONCE(next->locked) = 1;
82024+ ACCESS_ONCE_RW(next->locked) = 1;
82025 smp_wmb();
82026 }
82027
82028@@ -341,7 +341,7 @@ slowpath:
82029 spin_lock_mutex(&lock->wait_lock, flags);
82030
82031 debug_mutex_lock_common(lock, &waiter);
82032- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
82033+ debug_mutex_add_waiter(lock, &waiter, task);
82034
82035 /* add waiting tasks to the end of the waitqueue (FIFO): */
82036 list_add_tail(&waiter.list, &lock->wait_list);
82037@@ -371,8 +371,7 @@ slowpath:
82038 * TASK_UNINTERRUPTIBLE case.)
82039 */
82040 if (unlikely(signal_pending_state(state, task))) {
82041- mutex_remove_waiter(lock, &waiter,
82042- task_thread_info(task));
82043+ mutex_remove_waiter(lock, &waiter, task);
82044 mutex_release(&lock->dep_map, 1, ip);
82045 spin_unlock_mutex(&lock->wait_lock, flags);
82046
82047@@ -391,7 +390,7 @@ slowpath:
82048 done:
82049 lock_acquired(&lock->dep_map, ip);
82050 /* got the lock - rejoice! */
82051- mutex_remove_waiter(lock, &waiter, current_thread_info());
82052+ mutex_remove_waiter(lock, &waiter, task);
82053 mutex_set_owner(lock);
82054
82055 /* set it to 0 if there are no waiters left: */
82056diff --git a/kernel/notifier.c b/kernel/notifier.c
82057index 2d5cc4c..d9ea600 100644
82058--- a/kernel/notifier.c
82059+++ b/kernel/notifier.c
82060@@ -5,6 +5,7 @@
82061 #include <linux/rcupdate.h>
82062 #include <linux/vmalloc.h>
82063 #include <linux/reboot.h>
82064+#include <linux/mm.h>
82065
82066 /*
82067 * Notifier list for kernel code which wants to be called
82068@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
82069 while ((*nl) != NULL) {
82070 if (n->priority > (*nl)->priority)
82071 break;
82072- nl = &((*nl)->next);
82073+ nl = (struct notifier_block **)&((*nl)->next);
82074 }
82075- n->next = *nl;
82076+ pax_open_kernel();
82077+ *(const void **)&n->next = *nl;
82078 rcu_assign_pointer(*nl, n);
82079+ pax_close_kernel();
82080 return 0;
82081 }
82082
82083@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
82084 return 0;
82085 if (n->priority > (*nl)->priority)
82086 break;
82087- nl = &((*nl)->next);
82088+ nl = (struct notifier_block **)&((*nl)->next);
82089 }
82090- n->next = *nl;
82091+ pax_open_kernel();
82092+ *(const void **)&n->next = *nl;
82093 rcu_assign_pointer(*nl, n);
82094+ pax_close_kernel();
82095 return 0;
82096 }
82097
82098@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
82099 {
82100 while ((*nl) != NULL) {
82101 if ((*nl) == n) {
82102+ pax_open_kernel();
82103 rcu_assign_pointer(*nl, n->next);
82104+ pax_close_kernel();
82105 return 0;
82106 }
82107- nl = &((*nl)->next);
82108+ nl = (struct notifier_block **)&((*nl)->next);
82109 }
82110 return -ENOENT;
82111 }
82112diff --git a/kernel/panic.c b/kernel/panic.c
82113index 167ec09..0dda5f9 100644
82114--- a/kernel/panic.c
82115+++ b/kernel/panic.c
82116@@ -400,7 +400,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
82117 unsigned taint, struct slowpath_args *args)
82118 {
82119 printk(KERN_WARNING "------------[ cut here ]------------\n");
82120- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
82121+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
82122
82123 if (args)
82124 vprintk(args->fmt, args->args);
82125@@ -453,7 +453,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
82126 */
82127 void __stack_chk_fail(void)
82128 {
82129- panic("stack-protector: Kernel stack is corrupted in: %p\n",
82130+ dump_stack();
82131+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
82132 __builtin_return_address(0));
82133 }
82134 EXPORT_SYMBOL(__stack_chk_fail);
82135diff --git a/kernel/pid.c b/kernel/pid.c
82136index 0db3e79..95b9dc2 100644
82137--- a/kernel/pid.c
82138+++ b/kernel/pid.c
82139@@ -33,6 +33,7 @@
82140 #include <linux/rculist.h>
82141 #include <linux/bootmem.h>
82142 #include <linux/hash.h>
82143+#include <linux/security.h>
82144 #include <linux/pid_namespace.h>
82145 #include <linux/init_task.h>
82146 #include <linux/syscalls.h>
82147@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
82148
82149 int pid_max = PID_MAX_DEFAULT;
82150
82151-#define RESERVED_PIDS 300
82152+#define RESERVED_PIDS 500
82153
82154 int pid_max_min = RESERVED_PIDS + 1;
82155 int pid_max_max = PID_MAX_LIMIT;
82156@@ -442,10 +443,18 @@ EXPORT_SYMBOL(pid_task);
82157 */
82158 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
82159 {
82160+ struct task_struct *task;
82161+
82162 rcu_lockdep_assert(rcu_read_lock_held(),
82163 "find_task_by_pid_ns() needs rcu_read_lock()"
82164 " protection");
82165- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
82166+
82167+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
82168+
82169+ if (gr_pid_is_chrooted(task))
82170+ return NULL;
82171+
82172+ return task;
82173 }
82174
82175 struct task_struct *find_task_by_vpid(pid_t vnr)
82176@@ -453,6 +462,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
82177 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
82178 }
82179
82180+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
82181+{
82182+ rcu_lockdep_assert(rcu_read_lock_held(),
82183+ "find_task_by_pid_ns() needs rcu_read_lock()"
82184+ " protection");
82185+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
82186+}
82187+
82188 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
82189 {
82190 struct pid *pid;
82191diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
82192index 6917e8e..9909aeb 100644
82193--- a/kernel/pid_namespace.c
82194+++ b/kernel/pid_namespace.c
82195@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
82196 void __user *buffer, size_t *lenp, loff_t *ppos)
82197 {
82198 struct pid_namespace *pid_ns = task_active_pid_ns(current);
82199- struct ctl_table tmp = *table;
82200+ ctl_table_no_const tmp = *table;
82201
82202 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
82203 return -EPERM;
82204diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
82205index 42670e9..8719c2f 100644
82206--- a/kernel/posix-cpu-timers.c
82207+++ b/kernel/posix-cpu-timers.c
82208@@ -1636,14 +1636,14 @@ struct k_clock clock_posix_cpu = {
82209
82210 static __init int init_posix_cpu_timers(void)
82211 {
82212- struct k_clock process = {
82213+ static struct k_clock process = {
82214 .clock_getres = process_cpu_clock_getres,
82215 .clock_get = process_cpu_clock_get,
82216 .timer_create = process_cpu_timer_create,
82217 .nsleep = process_cpu_nsleep,
82218 .nsleep_restart = process_cpu_nsleep_restart,
82219 };
82220- struct k_clock thread = {
82221+ static struct k_clock thread = {
82222 .clock_getres = thread_cpu_clock_getres,
82223 .clock_get = thread_cpu_clock_get,
82224 .timer_create = thread_cpu_timer_create,
82225diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
82226index 424c2d4..679242f 100644
82227--- a/kernel/posix-timers.c
82228+++ b/kernel/posix-timers.c
82229@@ -43,6 +43,7 @@
82230 #include <linux/hash.h>
82231 #include <linux/posix-clock.h>
82232 #include <linux/posix-timers.h>
82233+#include <linux/grsecurity.h>
82234 #include <linux/syscalls.h>
82235 #include <linux/wait.h>
82236 #include <linux/workqueue.h>
82237@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
82238 * which we beg off on and pass to do_sys_settimeofday().
82239 */
82240
82241-static struct k_clock posix_clocks[MAX_CLOCKS];
82242+static struct k_clock *posix_clocks[MAX_CLOCKS];
82243
82244 /*
82245 * These ones are defined below.
82246@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
82247 */
82248 static __init int init_posix_timers(void)
82249 {
82250- struct k_clock clock_realtime = {
82251+ static struct k_clock clock_realtime = {
82252 .clock_getres = hrtimer_get_res,
82253 .clock_get = posix_clock_realtime_get,
82254 .clock_set = posix_clock_realtime_set,
82255@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
82256 .timer_get = common_timer_get,
82257 .timer_del = common_timer_del,
82258 };
82259- struct k_clock clock_monotonic = {
82260+ static struct k_clock clock_monotonic = {
82261 .clock_getres = hrtimer_get_res,
82262 .clock_get = posix_ktime_get_ts,
82263 .nsleep = common_nsleep,
82264@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
82265 .timer_get = common_timer_get,
82266 .timer_del = common_timer_del,
82267 };
82268- struct k_clock clock_monotonic_raw = {
82269+ static struct k_clock clock_monotonic_raw = {
82270 .clock_getres = hrtimer_get_res,
82271 .clock_get = posix_get_monotonic_raw,
82272 };
82273- struct k_clock clock_realtime_coarse = {
82274+ static struct k_clock clock_realtime_coarse = {
82275 .clock_getres = posix_get_coarse_res,
82276 .clock_get = posix_get_realtime_coarse,
82277 };
82278- struct k_clock clock_monotonic_coarse = {
82279+ static struct k_clock clock_monotonic_coarse = {
82280 .clock_getres = posix_get_coarse_res,
82281 .clock_get = posix_get_monotonic_coarse,
82282 };
82283- struct k_clock clock_tai = {
82284+ static struct k_clock clock_tai = {
82285 .clock_getres = hrtimer_get_res,
82286 .clock_get = posix_get_tai,
82287 .nsleep = common_nsleep,
82288@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
82289 .timer_get = common_timer_get,
82290 .timer_del = common_timer_del,
82291 };
82292- struct k_clock clock_boottime = {
82293+ static struct k_clock clock_boottime = {
82294 .clock_getres = hrtimer_get_res,
82295 .clock_get = posix_get_boottime,
82296 .nsleep = common_nsleep,
82297@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
82298 return;
82299 }
82300
82301- posix_clocks[clock_id] = *new_clock;
82302+ posix_clocks[clock_id] = new_clock;
82303 }
82304 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
82305
82306@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
82307 return (id & CLOCKFD_MASK) == CLOCKFD ?
82308 &clock_posix_dynamic : &clock_posix_cpu;
82309
82310- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
82311+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
82312 return NULL;
82313- return &posix_clocks[id];
82314+ return posix_clocks[id];
82315 }
82316
82317 static int common_timer_create(struct k_itimer *new_timer)
82318@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
82319 struct k_clock *kc = clockid_to_kclock(which_clock);
82320 struct k_itimer *new_timer;
82321 int error, new_timer_id;
82322- sigevent_t event;
82323+ sigevent_t event = { };
82324 int it_id_set = IT_ID_NOT_SET;
82325
82326 if (!kc)
82327@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
82328 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
82329 return -EFAULT;
82330
82331+ /* only the CLOCK_REALTIME clock can be set, all other clocks
82332+ have their clock_set fptr set to a nosettime dummy function
82333+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
82334+ call common_clock_set, which calls do_sys_settimeofday, which
82335+ we hook
82336+ */
82337+
82338 return kc->clock_set(which_clock, &new_tp);
82339 }
82340
82341diff --git a/kernel/power/process.c b/kernel/power/process.c
82342index 98088e0..aaf95c0 100644
82343--- a/kernel/power/process.c
82344+++ b/kernel/power/process.c
82345@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
82346 u64 elapsed_csecs64;
82347 unsigned int elapsed_csecs;
82348 bool wakeup = false;
82349+ bool timedout = false;
82350
82351 do_gettimeofday(&start);
82352
82353@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
82354
82355 while (true) {
82356 todo = 0;
82357+ if (time_after(jiffies, end_time))
82358+ timedout = true;
82359 read_lock(&tasklist_lock);
82360 do_each_thread(g, p) {
82361 if (p == current || !freeze_task(p))
82362 continue;
82363
82364- if (!freezer_should_skip(p))
82365+ if (!freezer_should_skip(p)) {
82366 todo++;
82367+ if (timedout) {
82368+ printk(KERN_ERR "Task refusing to freeze:\n");
82369+ sched_show_task(p);
82370+ }
82371+ }
82372 } while_each_thread(g, p);
82373 read_unlock(&tasklist_lock);
82374
82375@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
82376 todo += wq_busy;
82377 }
82378
82379- if (!todo || time_after(jiffies, end_time))
82380+ if (!todo || timedout)
82381 break;
82382
82383 if (pm_wakeup_pending()) {
82384diff --git a/kernel/printk.c b/kernel/printk.c
82385index d37d45c..ab918b3 100644
82386--- a/kernel/printk.c
82387+++ b/kernel/printk.c
82388@@ -390,6 +390,11 @@ static int check_syslog_permissions(int type, bool from_file)
82389 if (from_file && type != SYSLOG_ACTION_OPEN)
82390 return 0;
82391
82392+#ifdef CONFIG_GRKERNSEC_DMESG
82393+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
82394+ return -EPERM;
82395+#endif
82396+
82397 if (syslog_action_restricted(type)) {
82398 if (capable(CAP_SYSLOG))
82399 return 0;
82400diff --git a/kernel/profile.c b/kernel/profile.c
82401index 0bf4007..6234708 100644
82402--- a/kernel/profile.c
82403+++ b/kernel/profile.c
82404@@ -37,7 +37,7 @@ struct profile_hit {
82405 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
82406 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
82407
82408-static atomic_t *prof_buffer;
82409+static atomic_unchecked_t *prof_buffer;
82410 static unsigned long prof_len, prof_shift;
82411
82412 int prof_on __read_mostly;
82413@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
82414 hits[i].pc = 0;
82415 continue;
82416 }
82417- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
82418+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
82419 hits[i].hits = hits[i].pc = 0;
82420 }
82421 }
82422@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
82423 * Add the current hit(s) and flush the write-queue out
82424 * to the global buffer:
82425 */
82426- atomic_add(nr_hits, &prof_buffer[pc]);
82427+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
82428 for (i = 0; i < NR_PROFILE_HIT; ++i) {
82429- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
82430+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
82431 hits[i].pc = hits[i].hits = 0;
82432 }
82433 out:
82434@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
82435 {
82436 unsigned long pc;
82437 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
82438- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
82439+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
82440 }
82441 #endif /* !CONFIG_SMP */
82442
82443@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
82444 return -EFAULT;
82445 buf++; p++; count--; read++;
82446 }
82447- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
82448+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
82449 if (copy_to_user(buf, (void *)pnt, count))
82450 return -EFAULT;
82451 read += count;
82452@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
82453 }
82454 #endif
82455 profile_discard_flip_buffers();
82456- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
82457+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
82458 return count;
82459 }
82460
82461diff --git a/kernel/ptrace.c b/kernel/ptrace.c
82462index 335a7ae..3bbbceb 100644
82463--- a/kernel/ptrace.c
82464+++ b/kernel/ptrace.c
82465@@ -326,7 +326,7 @@ static int ptrace_attach(struct task_struct *task, long request,
82466 if (seize)
82467 flags |= PT_SEIZED;
82468 rcu_read_lock();
82469- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
82470+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
82471 flags |= PT_PTRACE_CAP;
82472 rcu_read_unlock();
82473 task->ptrace = flags;
82474@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
82475 break;
82476 return -EIO;
82477 }
82478- if (copy_to_user(dst, buf, retval))
82479+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
82480 return -EFAULT;
82481 copied += retval;
82482 src += retval;
82483@@ -805,7 +805,7 @@ int ptrace_request(struct task_struct *child, long request,
82484 bool seized = child->ptrace & PT_SEIZED;
82485 int ret = -EIO;
82486 siginfo_t siginfo, *si;
82487- void __user *datavp = (void __user *) data;
82488+ void __user *datavp = (__force void __user *) data;
82489 unsigned long __user *datalp = datavp;
82490 unsigned long flags;
82491
82492@@ -1011,14 +1011,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
82493 goto out;
82494 }
82495
82496+ if (gr_handle_ptrace(child, request)) {
82497+ ret = -EPERM;
82498+ goto out_put_task_struct;
82499+ }
82500+
82501 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
82502 ret = ptrace_attach(child, request, addr, data);
82503 /*
82504 * Some architectures need to do book-keeping after
82505 * a ptrace attach.
82506 */
82507- if (!ret)
82508+ if (!ret) {
82509 arch_ptrace_attach(child);
82510+ gr_audit_ptrace(child);
82511+ }
82512 goto out_put_task_struct;
82513 }
82514
82515@@ -1046,7 +1053,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
82516 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
82517 if (copied != sizeof(tmp))
82518 return -EIO;
82519- return put_user(tmp, (unsigned long __user *)data);
82520+ return put_user(tmp, (__force unsigned long __user *)data);
82521 }
82522
82523 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
82524@@ -1140,7 +1147,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
82525 }
82526
82527 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
82528- compat_long_t addr, compat_long_t data)
82529+ compat_ulong_t addr, compat_ulong_t data)
82530 {
82531 struct task_struct *child;
82532 long ret;
82533@@ -1156,14 +1163,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
82534 goto out;
82535 }
82536
82537+ if (gr_handle_ptrace(child, request)) {
82538+ ret = -EPERM;
82539+ goto out_put_task_struct;
82540+ }
82541+
82542 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
82543 ret = ptrace_attach(child, request, addr, data);
82544 /*
82545 * Some architectures need to do book-keeping after
82546 * a ptrace attach.
82547 */
82548- if (!ret)
82549+ if (!ret) {
82550 arch_ptrace_attach(child);
82551+ gr_audit_ptrace(child);
82552+ }
82553 goto out_put_task_struct;
82554 }
82555
82556diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
82557index 48ab703..07561d4 100644
82558--- a/kernel/rcupdate.c
82559+++ b/kernel/rcupdate.c
82560@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
82561 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
82562 */
82563 if (till_stall_check < 3) {
82564- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
82565+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
82566 till_stall_check = 3;
82567 } else if (till_stall_check > 300) {
82568- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
82569+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
82570 till_stall_check = 300;
82571 }
82572 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
82573diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
82574index a0714a5..2ab5e34 100644
82575--- a/kernel/rcutiny.c
82576+++ b/kernel/rcutiny.c
82577@@ -46,7 +46,7 @@
82578 struct rcu_ctrlblk;
82579 static void invoke_rcu_callbacks(void);
82580 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
82581-static void rcu_process_callbacks(struct softirq_action *unused);
82582+static void rcu_process_callbacks(void);
82583 static void __call_rcu(struct rcu_head *head,
82584 void (*func)(struct rcu_head *rcu),
82585 struct rcu_ctrlblk *rcp);
82586@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
82587 rcu_is_callbacks_kthread()));
82588 }
82589
82590-static void rcu_process_callbacks(struct softirq_action *unused)
82591+static void rcu_process_callbacks(void)
82592 {
82593 __rcu_process_callbacks(&rcu_sched_ctrlblk);
82594 __rcu_process_callbacks(&rcu_bh_ctrlblk);
82595diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
82596index 8a23300..4255818 100644
82597--- a/kernel/rcutiny_plugin.h
82598+++ b/kernel/rcutiny_plugin.h
82599@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
82600 have_rcu_kthread_work = morework;
82601 local_irq_restore(flags);
82602 if (work)
82603- rcu_process_callbacks(NULL);
82604+ rcu_process_callbacks();
82605 schedule_timeout_interruptible(1); /* Leave CPU for others. */
82606 }
82607
82608diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
82609index e1f3a8c..42c94a2 100644
82610--- a/kernel/rcutorture.c
82611+++ b/kernel/rcutorture.c
82612@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
82613 { 0 };
82614 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
82615 { 0 };
82616-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
82617-static atomic_t n_rcu_torture_alloc;
82618-static atomic_t n_rcu_torture_alloc_fail;
82619-static atomic_t n_rcu_torture_free;
82620-static atomic_t n_rcu_torture_mberror;
82621-static atomic_t n_rcu_torture_error;
82622+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
82623+static atomic_unchecked_t n_rcu_torture_alloc;
82624+static atomic_unchecked_t n_rcu_torture_alloc_fail;
82625+static atomic_unchecked_t n_rcu_torture_free;
82626+static atomic_unchecked_t n_rcu_torture_mberror;
82627+static atomic_unchecked_t n_rcu_torture_error;
82628 static long n_rcu_torture_barrier_error;
82629 static long n_rcu_torture_boost_ktrerror;
82630 static long n_rcu_torture_boost_rterror;
82631@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
82632
82633 spin_lock_bh(&rcu_torture_lock);
82634 if (list_empty(&rcu_torture_freelist)) {
82635- atomic_inc(&n_rcu_torture_alloc_fail);
82636+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
82637 spin_unlock_bh(&rcu_torture_lock);
82638 return NULL;
82639 }
82640- atomic_inc(&n_rcu_torture_alloc);
82641+ atomic_inc_unchecked(&n_rcu_torture_alloc);
82642 p = rcu_torture_freelist.next;
82643 list_del_init(p);
82644 spin_unlock_bh(&rcu_torture_lock);
82645@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
82646 static void
82647 rcu_torture_free(struct rcu_torture *p)
82648 {
82649- atomic_inc(&n_rcu_torture_free);
82650+ atomic_inc_unchecked(&n_rcu_torture_free);
82651 spin_lock_bh(&rcu_torture_lock);
82652 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
82653 spin_unlock_bh(&rcu_torture_lock);
82654@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
82655 i = rp->rtort_pipe_count;
82656 if (i > RCU_TORTURE_PIPE_LEN)
82657 i = RCU_TORTURE_PIPE_LEN;
82658- atomic_inc(&rcu_torture_wcount[i]);
82659+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82660 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
82661 rp->rtort_mbtest = 0;
82662 rcu_torture_free(rp);
82663@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
82664 i = rp->rtort_pipe_count;
82665 if (i > RCU_TORTURE_PIPE_LEN)
82666 i = RCU_TORTURE_PIPE_LEN;
82667- atomic_inc(&rcu_torture_wcount[i]);
82668+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82669 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
82670 rp->rtort_mbtest = 0;
82671 list_del(&rp->rtort_free);
82672@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
82673 i = old_rp->rtort_pipe_count;
82674 if (i > RCU_TORTURE_PIPE_LEN)
82675 i = RCU_TORTURE_PIPE_LEN;
82676- atomic_inc(&rcu_torture_wcount[i]);
82677+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
82678 old_rp->rtort_pipe_count++;
82679 cur_ops->deferred_free(old_rp);
82680 }
82681@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
82682 return;
82683 }
82684 if (p->rtort_mbtest == 0)
82685- atomic_inc(&n_rcu_torture_mberror);
82686+ atomic_inc_unchecked(&n_rcu_torture_mberror);
82687 spin_lock(&rand_lock);
82688 cur_ops->read_delay(&rand);
82689 n_rcu_torture_timers++;
82690@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
82691 continue;
82692 }
82693 if (p->rtort_mbtest == 0)
82694- atomic_inc(&n_rcu_torture_mberror);
82695+ atomic_inc_unchecked(&n_rcu_torture_mberror);
82696 cur_ops->read_delay(&rand);
82697 preempt_disable();
82698 pipe_count = p->rtort_pipe_count;
82699@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
82700 rcu_torture_current,
82701 rcu_torture_current_version,
82702 list_empty(&rcu_torture_freelist),
82703- atomic_read(&n_rcu_torture_alloc),
82704- atomic_read(&n_rcu_torture_alloc_fail),
82705- atomic_read(&n_rcu_torture_free));
82706+ atomic_read_unchecked(&n_rcu_torture_alloc),
82707+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
82708+ atomic_read_unchecked(&n_rcu_torture_free));
82709 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
82710- atomic_read(&n_rcu_torture_mberror),
82711+ atomic_read_unchecked(&n_rcu_torture_mberror),
82712 n_rcu_torture_boost_ktrerror,
82713 n_rcu_torture_boost_rterror);
82714 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
82715@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
82716 n_barrier_attempts,
82717 n_rcu_torture_barrier_error);
82718 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
82719- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
82720+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
82721 n_rcu_torture_barrier_error != 0 ||
82722 n_rcu_torture_boost_ktrerror != 0 ||
82723 n_rcu_torture_boost_rterror != 0 ||
82724 n_rcu_torture_boost_failure != 0 ||
82725 i > 1) {
82726 cnt += sprintf(&page[cnt], "!!! ");
82727- atomic_inc(&n_rcu_torture_error);
82728+ atomic_inc_unchecked(&n_rcu_torture_error);
82729 WARN_ON_ONCE(1);
82730 }
82731 cnt += sprintf(&page[cnt], "Reader Pipe: ");
82732@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
82733 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
82734 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
82735 cnt += sprintf(&page[cnt], " %d",
82736- atomic_read(&rcu_torture_wcount[i]));
82737+ atomic_read_unchecked(&rcu_torture_wcount[i]));
82738 }
82739 cnt += sprintf(&page[cnt], "\n");
82740 if (cur_ops->stats)
82741@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
82742
82743 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
82744
82745- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
82746+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
82747 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
82748 else if (n_online_successes != n_online_attempts ||
82749 n_offline_successes != n_offline_attempts)
82750@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
82751
82752 rcu_torture_current = NULL;
82753 rcu_torture_current_version = 0;
82754- atomic_set(&n_rcu_torture_alloc, 0);
82755- atomic_set(&n_rcu_torture_alloc_fail, 0);
82756- atomic_set(&n_rcu_torture_free, 0);
82757- atomic_set(&n_rcu_torture_mberror, 0);
82758- atomic_set(&n_rcu_torture_error, 0);
82759+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
82760+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
82761+ atomic_set_unchecked(&n_rcu_torture_free, 0);
82762+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
82763+ atomic_set_unchecked(&n_rcu_torture_error, 0);
82764 n_rcu_torture_barrier_error = 0;
82765 n_rcu_torture_boost_ktrerror = 0;
82766 n_rcu_torture_boost_rterror = 0;
82767 n_rcu_torture_boost_failure = 0;
82768 n_rcu_torture_boosts = 0;
82769 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
82770- atomic_set(&rcu_torture_wcount[i], 0);
82771+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
82772 for_each_possible_cpu(cpu) {
82773 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
82774 per_cpu(rcu_torture_count, cpu)[i] = 0;
82775diff --git a/kernel/rcutree.c b/kernel/rcutree.c
82776index 3538001..e379e0b 100644
82777--- a/kernel/rcutree.c
82778+++ b/kernel/rcutree.c
82779@@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
82780 rcu_prepare_for_idle(smp_processor_id());
82781 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
82782 smp_mb__before_atomic_inc(); /* See above. */
82783- atomic_inc(&rdtp->dynticks);
82784+ atomic_inc_unchecked(&rdtp->dynticks);
82785 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
82786- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
82787+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
82788
82789 /*
82790 * It is illegal to enter an extended quiescent state while
82791@@ -496,10 +496,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
82792 int user)
82793 {
82794 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
82795- atomic_inc(&rdtp->dynticks);
82796+ atomic_inc_unchecked(&rdtp->dynticks);
82797 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
82798 smp_mb__after_atomic_inc(); /* See above. */
82799- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
82800+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
82801 rcu_cleanup_after_idle(smp_processor_id());
82802 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
82803 if (!user && !is_idle_task(current)) {
82804@@ -638,14 +638,14 @@ void rcu_nmi_enter(void)
82805 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
82806
82807 if (rdtp->dynticks_nmi_nesting == 0 &&
82808- (atomic_read(&rdtp->dynticks) & 0x1))
82809+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
82810 return;
82811 rdtp->dynticks_nmi_nesting++;
82812 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
82813- atomic_inc(&rdtp->dynticks);
82814+ atomic_inc_unchecked(&rdtp->dynticks);
82815 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
82816 smp_mb__after_atomic_inc(); /* See above. */
82817- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
82818+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
82819 }
82820
82821 /**
82822@@ -664,9 +664,9 @@ void rcu_nmi_exit(void)
82823 return;
82824 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
82825 smp_mb__before_atomic_inc(); /* See above. */
82826- atomic_inc(&rdtp->dynticks);
82827+ atomic_inc_unchecked(&rdtp->dynticks);
82828 smp_mb__after_atomic_inc(); /* Force delay to next write. */
82829- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
82830+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
82831 }
82832
82833 /**
82834@@ -680,7 +680,7 @@ int rcu_is_cpu_idle(void)
82835 int ret;
82836
82837 preempt_disable();
82838- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
82839+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
82840 preempt_enable();
82841 return ret;
82842 }
82843@@ -748,7 +748,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
82844 */
82845 static int dyntick_save_progress_counter(struct rcu_data *rdp)
82846 {
82847- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
82848+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
82849 return (rdp->dynticks_snap & 0x1) == 0;
82850 }
82851
82852@@ -763,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
82853 unsigned int curr;
82854 unsigned int snap;
82855
82856- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
82857+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
82858 snap = (unsigned int)rdp->dynticks_snap;
82859
82860 /*
82861@@ -1440,9 +1440,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
82862 rdp = this_cpu_ptr(rsp->rda);
82863 rcu_preempt_check_blocked_tasks(rnp);
82864 rnp->qsmask = rnp->qsmaskinit;
82865- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
82866+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
82867 WARN_ON_ONCE(rnp->completed != rsp->completed);
82868- ACCESS_ONCE(rnp->completed) = rsp->completed;
82869+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
82870 if (rnp == rdp->mynode)
82871 rcu_start_gp_per_cpu(rsp, rnp, rdp);
82872 rcu_preempt_boost_start_gp(rnp);
82873@@ -1524,7 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
82874 */
82875 rcu_for_each_node_breadth_first(rsp, rnp) {
82876 raw_spin_lock_irq(&rnp->lock);
82877- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
82878+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
82879 rdp = this_cpu_ptr(rsp->rda);
82880 if (rnp == rdp->mynode)
82881 __rcu_process_gp_end(rsp, rnp, rdp);
82882@@ -1855,7 +1855,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
82883 rsp->qlen += rdp->qlen;
82884 rdp->n_cbs_orphaned += rdp->qlen;
82885 rdp->qlen_lazy = 0;
82886- ACCESS_ONCE(rdp->qlen) = 0;
82887+ ACCESS_ONCE_RW(rdp->qlen) = 0;
82888 }
82889
82890 /*
82891@@ -2101,7 +2101,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
82892 }
82893 smp_mb(); /* List handling before counting for rcu_barrier(). */
82894 rdp->qlen_lazy -= count_lazy;
82895- ACCESS_ONCE(rdp->qlen) -= count;
82896+ ACCESS_ONCE_RW(rdp->qlen) -= count;
82897 rdp->n_cbs_invoked += count;
82898
82899 /* Reinstate batch limit if we have worked down the excess. */
82900@@ -2295,7 +2295,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
82901 /*
82902 * Do RCU core processing for the current CPU.
82903 */
82904-static void rcu_process_callbacks(struct softirq_action *unused)
82905+static void rcu_process_callbacks(void)
82906 {
82907 struct rcu_state *rsp;
82908
82909@@ -2419,7 +2419,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
82910 local_irq_restore(flags);
82911 return;
82912 }
82913- ACCESS_ONCE(rdp->qlen)++;
82914+ ACCESS_ONCE_RW(rdp->qlen)++;
82915 if (lazy)
82916 rdp->qlen_lazy++;
82917 else
82918@@ -2628,11 +2628,11 @@ void synchronize_sched_expedited(void)
82919 * counter wrap on a 32-bit system. Quite a few more CPUs would of
82920 * course be required on a 64-bit system.
82921 */
82922- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
82923+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
82924 (ulong)atomic_long_read(&rsp->expedited_done) +
82925 ULONG_MAX / 8)) {
82926 synchronize_sched();
82927- atomic_long_inc(&rsp->expedited_wrap);
82928+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
82929 return;
82930 }
82931
82932@@ -2640,7 +2640,7 @@ void synchronize_sched_expedited(void)
82933 * Take a ticket. Note that atomic_inc_return() implies a
82934 * full memory barrier.
82935 */
82936- snap = atomic_long_inc_return(&rsp->expedited_start);
82937+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
82938 firstsnap = snap;
82939 get_online_cpus();
82940 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
82941@@ -2653,14 +2653,14 @@ void synchronize_sched_expedited(void)
82942 synchronize_sched_expedited_cpu_stop,
82943 NULL) == -EAGAIN) {
82944 put_online_cpus();
82945- atomic_long_inc(&rsp->expedited_tryfail);
82946+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
82947
82948 /* Check to see if someone else did our work for us. */
82949 s = atomic_long_read(&rsp->expedited_done);
82950 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
82951 /* ensure test happens before caller kfree */
82952 smp_mb__before_atomic_inc(); /* ^^^ */
82953- atomic_long_inc(&rsp->expedited_workdone1);
82954+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
82955 return;
82956 }
82957
82958@@ -2669,7 +2669,7 @@ void synchronize_sched_expedited(void)
82959 udelay(trycount * num_online_cpus());
82960 } else {
82961 wait_rcu_gp(call_rcu_sched);
82962- atomic_long_inc(&rsp->expedited_normal);
82963+ atomic_long_inc_unchecked(&rsp->expedited_normal);
82964 return;
82965 }
82966
82967@@ -2678,7 +2678,7 @@ void synchronize_sched_expedited(void)
82968 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
82969 /* ensure test happens before caller kfree */
82970 smp_mb__before_atomic_inc(); /* ^^^ */
82971- atomic_long_inc(&rsp->expedited_workdone2);
82972+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
82973 return;
82974 }
82975
82976@@ -2690,10 +2690,10 @@ void synchronize_sched_expedited(void)
82977 * period works for us.
82978 */
82979 get_online_cpus();
82980- snap = atomic_long_read(&rsp->expedited_start);
82981+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
82982 smp_mb(); /* ensure read is before try_stop_cpus(). */
82983 }
82984- atomic_long_inc(&rsp->expedited_stoppedcpus);
82985+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
82986
82987 /*
82988 * Everyone up to our most recent fetch is covered by our grace
82989@@ -2702,16 +2702,16 @@ void synchronize_sched_expedited(void)
82990 * than we did already did their update.
82991 */
82992 do {
82993- atomic_long_inc(&rsp->expedited_done_tries);
82994+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
82995 s = atomic_long_read(&rsp->expedited_done);
82996 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
82997 /* ensure test happens before caller kfree */
82998 smp_mb__before_atomic_inc(); /* ^^^ */
82999- atomic_long_inc(&rsp->expedited_done_lost);
83000+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
83001 break;
83002 }
83003 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
83004- atomic_long_inc(&rsp->expedited_done_exit);
83005+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
83006
83007 put_online_cpus();
83008 }
83009@@ -2893,7 +2893,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
83010 * ACCESS_ONCE() to prevent the compiler from speculating
83011 * the increment to precede the early-exit check.
83012 */
83013- ACCESS_ONCE(rsp->n_barrier_done)++;
83014+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
83015 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
83016 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
83017 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
83018@@ -2943,7 +2943,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
83019
83020 /* Increment ->n_barrier_done to prevent duplicate work. */
83021 smp_mb(); /* Keep increment after above mechanism. */
83022- ACCESS_ONCE(rsp->n_barrier_done)++;
83023+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
83024 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
83025 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
83026 smp_mb(); /* Keep increment before caller's subsequent code. */
83027@@ -2988,10 +2988,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
83028 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
83029 init_callback_list(rdp);
83030 rdp->qlen_lazy = 0;
83031- ACCESS_ONCE(rdp->qlen) = 0;
83032+ ACCESS_ONCE_RW(rdp->qlen) = 0;
83033 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
83034 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
83035- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
83036+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
83037 rdp->cpu = cpu;
83038 rdp->rsp = rsp;
83039 rcu_boot_init_nocb_percpu_data(rdp);
83040@@ -3024,8 +3024,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
83041 rdp->blimit = blimit;
83042 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
83043 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
83044- atomic_set(&rdp->dynticks->dynticks,
83045- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
83046+ atomic_set_unchecked(&rdp->dynticks->dynticks,
83047+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
83048 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
83049
83050 /* Add CPU to rcu_node bitmasks. */
83051@@ -3120,7 +3120,7 @@ static int __init rcu_spawn_gp_kthread(void)
83052 struct task_struct *t;
83053
83054 for_each_rcu_flavor(rsp) {
83055- t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
83056+ t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
83057 BUG_ON(IS_ERR(t));
83058 rnp = rcu_get_root(rsp);
83059 raw_spin_lock_irqsave(&rnp->lock, flags);
83060diff --git a/kernel/rcutree.h b/kernel/rcutree.h
83061index 4df5034..5ee93f2 100644
83062--- a/kernel/rcutree.h
83063+++ b/kernel/rcutree.h
83064@@ -87,7 +87,7 @@ struct rcu_dynticks {
83065 long long dynticks_nesting; /* Track irq/process nesting level. */
83066 /* Process level is worth LLONG_MAX/2. */
83067 int dynticks_nmi_nesting; /* Track NMI nesting level. */
83068- atomic_t dynticks; /* Even value for idle, else odd. */
83069+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
83070 #ifdef CONFIG_RCU_FAST_NO_HZ
83071 bool all_lazy; /* Are all CPU's CBs lazy? */
83072 unsigned long nonlazy_posted;
83073@@ -414,17 +414,17 @@ struct rcu_state {
83074 /* _rcu_barrier(). */
83075 /* End of fields guarded by barrier_mutex. */
83076
83077- atomic_long_t expedited_start; /* Starting ticket. */
83078- atomic_long_t expedited_done; /* Done ticket. */
83079- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
83080- atomic_long_t expedited_tryfail; /* # acquisition failures. */
83081- atomic_long_t expedited_workdone1; /* # done by others #1. */
83082- atomic_long_t expedited_workdone2; /* # done by others #2. */
83083- atomic_long_t expedited_normal; /* # fallbacks to normal. */
83084- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
83085- atomic_long_t expedited_done_tries; /* # tries to update _done. */
83086- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
83087- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
83088+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
83089+ atomic_long_t expedited_done; /* Done ticket. */
83090+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
83091+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
83092+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
83093+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
83094+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
83095+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
83096+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
83097+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
83098+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
83099
83100 unsigned long jiffies_force_qs; /* Time at which to invoke */
83101 /* force_quiescent_state(). */
83102diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
83103index 3db5a37..b395fb35 100644
83104--- a/kernel/rcutree_plugin.h
83105+++ b/kernel/rcutree_plugin.h
83106@@ -903,7 +903,7 @@ void synchronize_rcu_expedited(void)
83107
83108 /* Clean up and exit. */
83109 smp_mb(); /* ensure expedited GP seen before counter increment. */
83110- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
83111+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
83112 unlock_mb_ret:
83113 mutex_unlock(&sync_rcu_preempt_exp_mutex);
83114 mb_ret:
83115@@ -1451,7 +1451,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
83116 free_cpumask_var(cm);
83117 }
83118
83119-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
83120+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
83121 .store = &rcu_cpu_kthread_task,
83122 .thread_should_run = rcu_cpu_kthread_should_run,
83123 .thread_fn = rcu_cpu_kthread,
83124@@ -1916,7 +1916,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
83125 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
83126 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
83127 cpu, ticks_value, ticks_title,
83128- atomic_read(&rdtp->dynticks) & 0xfff,
83129+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
83130 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
83131 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
83132 fast_no_hz);
83133@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
83134
83135 /* Enqueue the callback on the nocb list and update counts. */
83136 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
83137- ACCESS_ONCE(*old_rhpp) = rhp;
83138+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
83139 atomic_long_add(rhcount, &rdp->nocb_q_count);
83140 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
83141
83142@@ -2219,12 +2219,12 @@ static int rcu_nocb_kthread(void *arg)
83143 * Extract queued callbacks, update counts, and wait
83144 * for a grace period to elapse.
83145 */
83146- ACCESS_ONCE(rdp->nocb_head) = NULL;
83147+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
83148 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
83149 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
83150 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
83151- ACCESS_ONCE(rdp->nocb_p_count) += c;
83152- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
83153+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
83154+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
83155 rcu_nocb_wait_gp(rdp);
83156
83157 /* Each pass through the following loop invokes a callback. */
83158@@ -2246,8 +2246,8 @@ static int rcu_nocb_kthread(void *arg)
83159 list = next;
83160 }
83161 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
83162- ACCESS_ONCE(rdp->nocb_p_count) -= c;
83163- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
83164+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
83165+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
83166 rdp->n_nocbs_invoked += c;
83167 }
83168 return 0;
83169@@ -2274,7 +2274,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
83170 t = kthread_run(rcu_nocb_kthread, rdp,
83171 "rcuo%c/%d", rsp->abbr, cpu);
83172 BUG_ON(IS_ERR(t));
83173- ACCESS_ONCE(rdp->nocb_kthread) = t;
83174+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
83175 }
83176 }
83177
83178diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
83179index cf6c174..a8f4b50 100644
83180--- a/kernel/rcutree_trace.c
83181+++ b/kernel/rcutree_trace.c
83182@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
83183 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
83184 rdp->passed_quiesce, rdp->qs_pending);
83185 seq_printf(m, " dt=%d/%llx/%d df=%lu",
83186- atomic_read(&rdp->dynticks->dynticks),
83187+ atomic_read_unchecked(&rdp->dynticks->dynticks),
83188 rdp->dynticks->dynticks_nesting,
83189 rdp->dynticks->dynticks_nmi_nesting,
83190 rdp->dynticks_fqs);
83191@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
83192 struct rcu_state *rsp = (struct rcu_state *)m->private;
83193
83194 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
83195- atomic_long_read(&rsp->expedited_start),
83196+ atomic_long_read_unchecked(&rsp->expedited_start),
83197 atomic_long_read(&rsp->expedited_done),
83198- atomic_long_read(&rsp->expedited_wrap),
83199- atomic_long_read(&rsp->expedited_tryfail),
83200- atomic_long_read(&rsp->expedited_workdone1),
83201- atomic_long_read(&rsp->expedited_workdone2),
83202- atomic_long_read(&rsp->expedited_normal),
83203- atomic_long_read(&rsp->expedited_stoppedcpus),
83204- atomic_long_read(&rsp->expedited_done_tries),
83205- atomic_long_read(&rsp->expedited_done_lost),
83206- atomic_long_read(&rsp->expedited_done_exit));
83207+ atomic_long_read_unchecked(&rsp->expedited_wrap),
83208+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
83209+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
83210+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
83211+ atomic_long_read_unchecked(&rsp->expedited_normal),
83212+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
83213+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
83214+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
83215+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
83216 return 0;
83217 }
83218
83219diff --git a/kernel/resource.c b/kernel/resource.c
83220index d738698..5f8e60a 100644
83221--- a/kernel/resource.c
83222+++ b/kernel/resource.c
83223@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
83224
83225 static int __init ioresources_init(void)
83226 {
83227+#ifdef CONFIG_GRKERNSEC_PROC_ADD
83228+#ifdef CONFIG_GRKERNSEC_PROC_USER
83229+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
83230+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
83231+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83232+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
83233+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
83234+#endif
83235+#else
83236 proc_create("ioports", 0, NULL, &proc_ioports_operations);
83237 proc_create("iomem", 0, NULL, &proc_iomem_operations);
83238+#endif
83239 return 0;
83240 }
83241 __initcall(ioresources_init);
83242diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
83243index 1d96dd0..994ff19 100644
83244--- a/kernel/rtmutex-tester.c
83245+++ b/kernel/rtmutex-tester.c
83246@@ -22,7 +22,7 @@
83247 #define MAX_RT_TEST_MUTEXES 8
83248
83249 static spinlock_t rttest_lock;
83250-static atomic_t rttest_event;
83251+static atomic_unchecked_t rttest_event;
83252
83253 struct test_thread_data {
83254 int opcode;
83255@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83256
83257 case RTTEST_LOCKCONT:
83258 td->mutexes[td->opdata] = 1;
83259- td->event = atomic_add_return(1, &rttest_event);
83260+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83261 return 0;
83262
83263 case RTTEST_RESET:
83264@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83265 return 0;
83266
83267 case RTTEST_RESETEVENT:
83268- atomic_set(&rttest_event, 0);
83269+ atomic_set_unchecked(&rttest_event, 0);
83270 return 0;
83271
83272 default:
83273@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83274 return ret;
83275
83276 td->mutexes[id] = 1;
83277- td->event = atomic_add_return(1, &rttest_event);
83278+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83279 rt_mutex_lock(&mutexes[id]);
83280- td->event = atomic_add_return(1, &rttest_event);
83281+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83282 td->mutexes[id] = 4;
83283 return 0;
83284
83285@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83286 return ret;
83287
83288 td->mutexes[id] = 1;
83289- td->event = atomic_add_return(1, &rttest_event);
83290+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83291 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
83292- td->event = atomic_add_return(1, &rttest_event);
83293+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83294 td->mutexes[id] = ret ? 0 : 4;
83295 return ret ? -EINTR : 0;
83296
83297@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
83298 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
83299 return ret;
83300
83301- td->event = atomic_add_return(1, &rttest_event);
83302+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83303 rt_mutex_unlock(&mutexes[id]);
83304- td->event = atomic_add_return(1, &rttest_event);
83305+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83306 td->mutexes[id] = 0;
83307 return 0;
83308
83309@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83310 break;
83311
83312 td->mutexes[dat] = 2;
83313- td->event = atomic_add_return(1, &rttest_event);
83314+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83315 break;
83316
83317 default:
83318@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83319 return;
83320
83321 td->mutexes[dat] = 3;
83322- td->event = atomic_add_return(1, &rttest_event);
83323+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83324 break;
83325
83326 case RTTEST_LOCKNOWAIT:
83327@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
83328 return;
83329
83330 td->mutexes[dat] = 1;
83331- td->event = atomic_add_return(1, &rttest_event);
83332+ td->event = atomic_add_return_unchecked(1, &rttest_event);
83333 return;
83334
83335 default:
83336diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
83337index 64de5f8..7735e12 100644
83338--- a/kernel/sched/auto_group.c
83339+++ b/kernel/sched/auto_group.c
83340@@ -11,7 +11,7 @@
83341
83342 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
83343 static struct autogroup autogroup_default;
83344-static atomic_t autogroup_seq_nr;
83345+static atomic_unchecked_t autogroup_seq_nr;
83346
83347 void __init autogroup_init(struct task_struct *init_task)
83348 {
83349@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
83350
83351 kref_init(&ag->kref);
83352 init_rwsem(&ag->lock);
83353- ag->id = atomic_inc_return(&autogroup_seq_nr);
83354+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
83355 ag->tg = tg;
83356 #ifdef CONFIG_RT_GROUP_SCHED
83357 /*
83358diff --git a/kernel/sched/core.c b/kernel/sched/core.c
83359index e8b3350..d83d44e 100644
83360--- a/kernel/sched/core.c
83361+++ b/kernel/sched/core.c
83362@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
83363 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
83364 * positive (at least 1, or number of jiffies left till timeout) if completed.
83365 */
83366-long __sched
83367+long __sched __intentional_overflow(-1)
83368 wait_for_completion_interruptible_timeout(struct completion *x,
83369 unsigned long timeout)
83370 {
83371@@ -3457,7 +3457,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
83372 *
83373 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
83374 */
83375-int __sched wait_for_completion_killable(struct completion *x)
83376+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
83377 {
83378 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
83379 if (t == -ERESTARTSYS)
83380@@ -3478,7 +3478,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
83381 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
83382 * positive (at least 1, or number of jiffies left till timeout) if completed.
83383 */
83384-long __sched
83385+long __sched __intentional_overflow(-1)
83386 wait_for_completion_killable_timeout(struct completion *x,
83387 unsigned long timeout)
83388 {
83389@@ -3704,6 +3704,8 @@ int can_nice(const struct task_struct *p, const int nice)
83390 /* convert nice value [19,-20] to rlimit style value [1,40] */
83391 int nice_rlim = 20 - nice;
83392
83393+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
83394+
83395 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
83396 capable(CAP_SYS_NICE));
83397 }
83398@@ -3737,7 +3739,8 @@ SYSCALL_DEFINE1(nice, int, increment)
83399 if (nice > 19)
83400 nice = 19;
83401
83402- if (increment < 0 && !can_nice(current, nice))
83403+ if (increment < 0 && (!can_nice(current, nice) ||
83404+ gr_handle_chroot_nice()))
83405 return -EPERM;
83406
83407 retval = security_task_setnice(current, nice);
83408@@ -3891,6 +3894,7 @@ recheck:
83409 unsigned long rlim_rtprio =
83410 task_rlimit(p, RLIMIT_RTPRIO);
83411
83412+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
83413 /* can't set/change the rt policy */
83414 if (policy != p->policy && !rlim_rtprio)
83415 return -EPERM;
83416@@ -4988,7 +4992,7 @@ static void migrate_tasks(unsigned int dead_cpu)
83417
83418 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
83419
83420-static struct ctl_table sd_ctl_dir[] = {
83421+static ctl_table_no_const sd_ctl_dir[] __read_only = {
83422 {
83423 .procname = "sched_domain",
83424 .mode = 0555,
83425@@ -5005,17 +5009,17 @@ static struct ctl_table sd_ctl_root[] = {
83426 {}
83427 };
83428
83429-static struct ctl_table *sd_alloc_ctl_entry(int n)
83430+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
83431 {
83432- struct ctl_table *entry =
83433+ ctl_table_no_const *entry =
83434 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
83435
83436 return entry;
83437 }
83438
83439-static void sd_free_ctl_entry(struct ctl_table **tablep)
83440+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
83441 {
83442- struct ctl_table *entry;
83443+ ctl_table_no_const *entry;
83444
83445 /*
83446 * In the intermediate directories, both the child directory and
83447@@ -5023,22 +5027,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
83448 * will always be set. In the lowest directory the names are
83449 * static strings and all have proc handlers.
83450 */
83451- for (entry = *tablep; entry->mode; entry++) {
83452- if (entry->child)
83453- sd_free_ctl_entry(&entry->child);
83454+ for (entry = tablep; entry->mode; entry++) {
83455+ if (entry->child) {
83456+ sd_free_ctl_entry(entry->child);
83457+ pax_open_kernel();
83458+ entry->child = NULL;
83459+ pax_close_kernel();
83460+ }
83461 if (entry->proc_handler == NULL)
83462 kfree(entry->procname);
83463 }
83464
83465- kfree(*tablep);
83466- *tablep = NULL;
83467+ kfree(tablep);
83468 }
83469
83470 static int min_load_idx = 0;
83471 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
83472
83473 static void
83474-set_table_entry(struct ctl_table *entry,
83475+set_table_entry(ctl_table_no_const *entry,
83476 const char *procname, void *data, int maxlen,
83477 umode_t mode, proc_handler *proc_handler,
83478 bool load_idx)
83479@@ -5058,7 +5065,7 @@ set_table_entry(struct ctl_table *entry,
83480 static struct ctl_table *
83481 sd_alloc_ctl_domain_table(struct sched_domain *sd)
83482 {
83483- struct ctl_table *table = sd_alloc_ctl_entry(13);
83484+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
83485
83486 if (table == NULL)
83487 return NULL;
83488@@ -5093,9 +5100,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
83489 return table;
83490 }
83491
83492-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
83493+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
83494 {
83495- struct ctl_table *entry, *table;
83496+ ctl_table_no_const *entry, *table;
83497 struct sched_domain *sd;
83498 int domain_num = 0, i;
83499 char buf[32];
83500@@ -5122,11 +5129,13 @@ static struct ctl_table_header *sd_sysctl_header;
83501 static void register_sched_domain_sysctl(void)
83502 {
83503 int i, cpu_num = num_possible_cpus();
83504- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
83505+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
83506 char buf[32];
83507
83508 WARN_ON(sd_ctl_dir[0].child);
83509+ pax_open_kernel();
83510 sd_ctl_dir[0].child = entry;
83511+ pax_close_kernel();
83512
83513 if (entry == NULL)
83514 return;
83515@@ -5149,8 +5158,12 @@ static void unregister_sched_domain_sysctl(void)
83516 if (sd_sysctl_header)
83517 unregister_sysctl_table(sd_sysctl_header);
83518 sd_sysctl_header = NULL;
83519- if (sd_ctl_dir[0].child)
83520- sd_free_ctl_entry(&sd_ctl_dir[0].child);
83521+ if (sd_ctl_dir[0].child) {
83522+ sd_free_ctl_entry(sd_ctl_dir[0].child);
83523+ pax_open_kernel();
83524+ sd_ctl_dir[0].child = NULL;
83525+ pax_close_kernel();
83526+ }
83527 }
83528 #else
83529 static void register_sched_domain_sysctl(void)
83530@@ -5249,7 +5262,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
83531 * happens before everything else. This has to be lower priority than
83532 * the notifier in the perf_event subsystem, though.
83533 */
83534-static struct notifier_block __cpuinitdata migration_notifier = {
83535+static struct notifier_block migration_notifier = {
83536 .notifier_call = migration_call,
83537 .priority = CPU_PRI_MIGRATION,
83538 };
83539diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
83540index 03b73be..9422b9f 100644
83541--- a/kernel/sched/fair.c
83542+++ b/kernel/sched/fair.c
83543@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
83544
83545 static void reset_ptenuma_scan(struct task_struct *p)
83546 {
83547- ACCESS_ONCE(p->mm->numa_scan_seq)++;
83548+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
83549 p->mm->numa_scan_offset = 0;
83550 }
83551
83552@@ -5687,7 +5687,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
83553 * run_rebalance_domains is triggered when needed from the scheduler tick.
83554 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
83555 */
83556-static void run_rebalance_domains(struct softirq_action *h)
83557+static void run_rebalance_domains(void)
83558 {
83559 int this_cpu = smp_processor_id();
83560 struct rq *this_rq = cpu_rq(this_cpu);
83561diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
83562index ce39224d..0e09343 100644
83563--- a/kernel/sched/sched.h
83564+++ b/kernel/sched/sched.h
83565@@ -1009,7 +1009,7 @@ struct sched_class {
83566 #ifdef CONFIG_FAIR_GROUP_SCHED
83567 void (*task_move_group) (struct task_struct *p, int on_rq);
83568 #endif
83569-};
83570+} __do_const;
83571
83572 #define sched_class_highest (&stop_sched_class)
83573 #define for_each_class(class) \
83574diff --git a/kernel/signal.c b/kernel/signal.c
83575index 113411b..20d0a99 100644
83576--- a/kernel/signal.c
83577+++ b/kernel/signal.c
83578@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
83579
83580 int print_fatal_signals __read_mostly;
83581
83582-static void __user *sig_handler(struct task_struct *t, int sig)
83583+static __sighandler_t sig_handler(struct task_struct *t, int sig)
83584 {
83585 return t->sighand->action[sig - 1].sa.sa_handler;
83586 }
83587
83588-static int sig_handler_ignored(void __user *handler, int sig)
83589+static int sig_handler_ignored(__sighandler_t handler, int sig)
83590 {
83591 /* Is it explicitly or implicitly ignored? */
83592 return handler == SIG_IGN ||
83593@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
83594
83595 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
83596 {
83597- void __user *handler;
83598+ __sighandler_t handler;
83599
83600 handler = sig_handler(t, sig);
83601
83602@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
83603 atomic_inc(&user->sigpending);
83604 rcu_read_unlock();
83605
83606+ if (!override_rlimit)
83607+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
83608+
83609 if (override_rlimit ||
83610 atomic_read(&user->sigpending) <=
83611 task_rlimit(t, RLIMIT_SIGPENDING)) {
83612@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
83613
83614 int unhandled_signal(struct task_struct *tsk, int sig)
83615 {
83616- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
83617+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
83618 if (is_global_init(tsk))
83619 return 1;
83620 if (handler != SIG_IGN && handler != SIG_DFL)
83621@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
83622 }
83623 }
83624
83625+ /* allow glibc communication via tgkill to other threads in our
83626+ thread group */
83627+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
83628+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
83629+ && gr_handle_signal(t, sig))
83630+ return -EPERM;
83631+
83632 return security_task_kill(t, info, sig, 0);
83633 }
83634
83635@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
83636 return send_signal(sig, info, p, 1);
83637 }
83638
83639-static int
83640+int
83641 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83642 {
83643 return send_signal(sig, info, t, 0);
83644@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83645 unsigned long int flags;
83646 int ret, blocked, ignored;
83647 struct k_sigaction *action;
83648+ int is_unhandled = 0;
83649
83650 spin_lock_irqsave(&t->sighand->siglock, flags);
83651 action = &t->sighand->action[sig-1];
83652@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
83653 }
83654 if (action->sa.sa_handler == SIG_DFL)
83655 t->signal->flags &= ~SIGNAL_UNKILLABLE;
83656+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
83657+ is_unhandled = 1;
83658 ret = specific_send_sig_info(sig, info, t);
83659 spin_unlock_irqrestore(&t->sighand->siglock, flags);
83660
83661+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
83662+ normal operation */
83663+ if (is_unhandled) {
83664+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
83665+ gr_handle_crash(t, sig);
83666+ }
83667+
83668 return ret;
83669 }
83670
83671@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
83672 ret = check_kill_permission(sig, info, p);
83673 rcu_read_unlock();
83674
83675- if (!ret && sig)
83676+ if (!ret && sig) {
83677 ret = do_send_sig_info(sig, info, p, true);
83678+ if (!ret)
83679+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
83680+ }
83681
83682 return ret;
83683 }
83684@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
83685 int error = -ESRCH;
83686
83687 rcu_read_lock();
83688- p = find_task_by_vpid(pid);
83689+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83690+ /* allow glibc communication via tgkill to other threads in our
83691+ thread group */
83692+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
83693+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
83694+ p = find_task_by_vpid_unrestricted(pid);
83695+ else
83696+#endif
83697+ p = find_task_by_vpid(pid);
83698 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
83699 error = check_kill_permission(sig, info, p);
83700 /*
83701@@ -3219,6 +3250,16 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
83702 __put_user(t->sas_ss_size, &uss->ss_size);
83703 }
83704
83705+#ifdef CONFIG_X86
83706+void __save_altstack_ex(stack_t __user *uss, unsigned long sp)
83707+{
83708+ struct task_struct *t = current;
83709+ put_user_ex((void __user *)t->sas_ss_sp, &uss->ss_sp);
83710+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
83711+ put_user_ex(t->sas_ss_size, &uss->ss_size);
83712+}
83713+#endif
83714+
83715 #ifdef CONFIG_COMPAT
83716 COMPAT_SYSCALL_DEFINE2(sigaltstack,
83717 const compat_stack_t __user *, uss_ptr,
83718@@ -3240,8 +3281,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
83719 }
83720 seg = get_fs();
83721 set_fs(KERNEL_DS);
83722- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
83723- (stack_t __force __user *) &uoss,
83724+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
83725+ (stack_t __force_user *) &uoss,
83726 compat_user_stack_pointer());
83727 set_fs(seg);
83728 if (ret >= 0 && uoss_ptr) {
83729@@ -3268,6 +3309,16 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
83730 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
83731 __put_user(t->sas_ss_size, &uss->ss_size);
83732 }
83733+
83734+#ifdef CONFIG_X86
83735+void __compat_save_altstack_ex(compat_stack_t __user *uss, unsigned long sp)
83736+{
83737+ struct task_struct *t = current;
83738+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp);
83739+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
83740+ put_user_ex(t->sas_ss_size, &uss->ss_size);
83741+}
83742+#endif
83743 #endif
83744
83745 #ifdef __ARCH_WANT_SYS_SIGPENDING
83746diff --git a/kernel/smp.c b/kernel/smp.c
83747index 4dba0f7..fe9f773 100644
83748--- a/kernel/smp.c
83749+++ b/kernel/smp.c
83750@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
83751 return NOTIFY_OK;
83752 }
83753
83754-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
83755+static struct notifier_block hotplug_cfd_notifier = {
83756 .notifier_call = hotplug_cfd,
83757 };
83758
83759diff --git a/kernel/smpboot.c b/kernel/smpboot.c
83760index 02fc5c9..e54c335 100644
83761--- a/kernel/smpboot.c
83762+++ b/kernel/smpboot.c
83763@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
83764 }
83765 smpboot_unpark_thread(plug_thread, cpu);
83766 }
83767- list_add(&plug_thread->list, &hotplug_threads);
83768+ pax_list_add(&plug_thread->list, &hotplug_threads);
83769 out:
83770 mutex_unlock(&smpboot_threads_lock);
83771 return ret;
83772@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
83773 {
83774 get_online_cpus();
83775 mutex_lock(&smpboot_threads_lock);
83776- list_del(&plug_thread->list);
83777+ pax_list_del(&plug_thread->list);
83778 smpboot_destroy_threads(plug_thread);
83779 mutex_unlock(&smpboot_threads_lock);
83780 put_online_cpus();
83781diff --git a/kernel/softirq.c b/kernel/softirq.c
83782index 3d6833f..da6d93d 100644
83783--- a/kernel/softirq.c
83784+++ b/kernel/softirq.c
83785@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
83786 EXPORT_SYMBOL(irq_stat);
83787 #endif
83788
83789-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
83790+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
83791
83792 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
83793
83794-char *softirq_to_name[NR_SOFTIRQS] = {
83795+const char * const softirq_to_name[NR_SOFTIRQS] = {
83796 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
83797 "TASKLET", "SCHED", "HRTIMER", "RCU"
83798 };
83799@@ -250,7 +250,7 @@ restart:
83800 kstat_incr_softirqs_this_cpu(vec_nr);
83801
83802 trace_softirq_entry(vec_nr);
83803- h->action(h);
83804+ h->action();
83805 trace_softirq_exit(vec_nr);
83806 if (unlikely(prev_count != preempt_count())) {
83807 printk(KERN_ERR "huh, entered softirq %u %s %p"
83808@@ -405,7 +405,7 @@ void __raise_softirq_irqoff(unsigned int nr)
83809 or_softirq_pending(1UL << nr);
83810 }
83811
83812-void open_softirq(int nr, void (*action)(struct softirq_action *))
83813+void __init open_softirq(int nr, void (*action)(void))
83814 {
83815 softirq_vec[nr].action = action;
83816 }
83817@@ -461,7 +461,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
83818
83819 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
83820
83821-static void tasklet_action(struct softirq_action *a)
83822+static void tasklet_action(void)
83823 {
83824 struct tasklet_struct *list;
83825
83826@@ -496,7 +496,7 @@ static void tasklet_action(struct softirq_action *a)
83827 }
83828 }
83829
83830-static void tasklet_hi_action(struct softirq_action *a)
83831+static void tasklet_hi_action(void)
83832 {
83833 struct tasklet_struct *list;
83834
83835@@ -730,7 +730,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
83836 return NOTIFY_OK;
83837 }
83838
83839-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
83840+static struct notifier_block remote_softirq_cpu_notifier = {
83841 .notifier_call = remote_softirq_cpu_notify,
83842 };
83843
83844@@ -847,11 +847,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
83845 return NOTIFY_OK;
83846 }
83847
83848-static struct notifier_block __cpuinitdata cpu_nfb = {
83849+static struct notifier_block cpu_nfb = {
83850 .notifier_call = cpu_callback
83851 };
83852
83853-static struct smp_hotplug_thread softirq_threads = {
83854+static struct smp_hotplug_thread softirq_threads __read_only = {
83855 .store = &ksoftirqd,
83856 .thread_should_run = ksoftirqd_should_run,
83857 .thread_fn = run_ksoftirqd,
83858diff --git a/kernel/srcu.c b/kernel/srcu.c
83859index 01d5ccb..cdcbee6 100644
83860--- a/kernel/srcu.c
83861+++ b/kernel/srcu.c
83862@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
83863
83864 idx = ACCESS_ONCE(sp->completed) & 0x1;
83865 preempt_disable();
83866- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
83867+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
83868 smp_mb(); /* B */ /* Avoid leaking the critical section. */
83869- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
83870+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
83871 preempt_enable();
83872 return idx;
83873 }
83874diff --git a/kernel/sys.c b/kernel/sys.c
83875index 2bbd9a7..0875671 100644
83876--- a/kernel/sys.c
83877+++ b/kernel/sys.c
83878@@ -163,6 +163,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
83879 error = -EACCES;
83880 goto out;
83881 }
83882+
83883+ if (gr_handle_chroot_setpriority(p, niceval)) {
83884+ error = -EACCES;
83885+ goto out;
83886+ }
83887+
83888 no_nice = security_task_setnice(p, niceval);
83889 if (no_nice) {
83890 error = no_nice;
83891@@ -626,6 +632,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
83892 goto error;
83893 }
83894
83895+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
83896+ goto error;
83897+
83898 if (rgid != (gid_t) -1 ||
83899 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
83900 new->sgid = new->egid;
83901@@ -661,6 +670,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
83902 old = current_cred();
83903
83904 retval = -EPERM;
83905+
83906+ if (gr_check_group_change(kgid, kgid, kgid))
83907+ goto error;
83908+
83909 if (nsown_capable(CAP_SETGID))
83910 new->gid = new->egid = new->sgid = new->fsgid = kgid;
83911 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
83912@@ -678,7 +691,7 @@ error:
83913 /*
83914 * change the user struct in a credentials set to match the new UID
83915 */
83916-static int set_user(struct cred *new)
83917+int set_user(struct cred *new)
83918 {
83919 struct user_struct *new_user;
83920
83921@@ -758,6 +771,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
83922 goto error;
83923 }
83924
83925+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
83926+ goto error;
83927+
83928 if (!uid_eq(new->uid, old->uid)) {
83929 retval = set_user(new);
83930 if (retval < 0)
83931@@ -808,6 +824,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
83932 old = current_cred();
83933
83934 retval = -EPERM;
83935+
83936+ if (gr_check_crash_uid(kuid))
83937+ goto error;
83938+ if (gr_check_user_change(kuid, kuid, kuid))
83939+ goto error;
83940+
83941 if (nsown_capable(CAP_SETUID)) {
83942 new->suid = new->uid = kuid;
83943 if (!uid_eq(kuid, old->uid)) {
83944@@ -877,6 +899,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
83945 goto error;
83946 }
83947
83948+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
83949+ goto error;
83950+
83951 if (ruid != (uid_t) -1) {
83952 new->uid = kruid;
83953 if (!uid_eq(kruid, old->uid)) {
83954@@ -959,6 +984,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
83955 goto error;
83956 }
83957
83958+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
83959+ goto error;
83960+
83961 if (rgid != (gid_t) -1)
83962 new->gid = krgid;
83963 if (egid != (gid_t) -1)
83964@@ -1020,12 +1048,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
83965 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
83966 nsown_capable(CAP_SETUID)) {
83967 if (!uid_eq(kuid, old->fsuid)) {
83968+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
83969+ goto error;
83970+
83971 new->fsuid = kuid;
83972 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
83973 goto change_okay;
83974 }
83975 }
83976
83977+error:
83978 abort_creds(new);
83979 return old_fsuid;
83980
83981@@ -1058,12 +1090,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
83982 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
83983 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
83984 nsown_capable(CAP_SETGID)) {
83985+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
83986+ goto error;
83987+
83988 if (!gid_eq(kgid, old->fsgid)) {
83989 new->fsgid = kgid;
83990 goto change_okay;
83991 }
83992 }
83993
83994+error:
83995 abort_creds(new);
83996 return old_fsgid;
83997
83998@@ -1432,19 +1468,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
83999 return -EFAULT;
84000
84001 down_read(&uts_sem);
84002- error = __copy_to_user(&name->sysname, &utsname()->sysname,
84003+ error = __copy_to_user(name->sysname, &utsname()->sysname,
84004 __OLD_UTS_LEN);
84005 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
84006- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
84007+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
84008 __OLD_UTS_LEN);
84009 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
84010- error |= __copy_to_user(&name->release, &utsname()->release,
84011+ error |= __copy_to_user(name->release, &utsname()->release,
84012 __OLD_UTS_LEN);
84013 error |= __put_user(0, name->release + __OLD_UTS_LEN);
84014- error |= __copy_to_user(&name->version, &utsname()->version,
84015+ error |= __copy_to_user(name->version, &utsname()->version,
84016 __OLD_UTS_LEN);
84017 error |= __put_user(0, name->version + __OLD_UTS_LEN);
84018- error |= __copy_to_user(&name->machine, &utsname()->machine,
84019+ error |= __copy_to_user(name->machine, &utsname()->machine,
84020 __OLD_UTS_LEN);
84021 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
84022 up_read(&uts_sem);
84023@@ -1646,6 +1682,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
84024 */
84025 new_rlim->rlim_cur = 1;
84026 }
84027+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
84028+ is changed to a lower value. Since tasks can be created by the same
84029+ user in between this limit change and an execve by this task, force
84030+ a recheck only for this task by setting PF_NPROC_EXCEEDED
84031+ */
84032+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
84033+ tsk->flags |= PF_NPROC_EXCEEDED;
84034 }
84035 if (!retval) {
84036 if (old_rlim)
84037diff --git a/kernel/sysctl.c b/kernel/sysctl.c
84038index 9edcf45..713c960 100644
84039--- a/kernel/sysctl.c
84040+++ b/kernel/sysctl.c
84041@@ -93,7 +93,6 @@
84042
84043
84044 #if defined(CONFIG_SYSCTL)
84045-
84046 /* External variables not in a header file. */
84047 extern int sysctl_overcommit_memory;
84048 extern int sysctl_overcommit_ratio;
84049@@ -119,18 +118,18 @@ extern int blk_iopoll_enabled;
84050
84051 /* Constants used for minimum and maximum */
84052 #ifdef CONFIG_LOCKUP_DETECTOR
84053-static int sixty = 60;
84054-static int neg_one = -1;
84055+static int sixty __read_only = 60;
84056 #endif
84057
84058-static int zero;
84059-static int __maybe_unused one = 1;
84060-static int __maybe_unused two = 2;
84061-static int __maybe_unused three = 3;
84062-static unsigned long one_ul = 1;
84063-static int one_hundred = 100;
84064+static int neg_one __read_only = -1;
84065+static int zero __read_only = 0;
84066+static int __maybe_unused one __read_only = 1;
84067+static int __maybe_unused two __read_only = 2;
84068+static int __maybe_unused three __read_only = 3;
84069+static unsigned long one_ul __read_only = 1;
84070+static int one_hundred __read_only = 100;
84071 #ifdef CONFIG_PRINTK
84072-static int ten_thousand = 10000;
84073+static int ten_thousand __read_only = 10000;
84074 #endif
84075
84076 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
84077@@ -177,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
84078 void __user *buffer, size_t *lenp, loff_t *ppos);
84079 #endif
84080
84081-#ifdef CONFIG_PRINTK
84082 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84083 void __user *buffer, size_t *lenp, loff_t *ppos);
84084-#endif
84085
84086 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
84087 void __user *buffer, size_t *lenp, loff_t *ppos);
84088@@ -211,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
84089
84090 #endif
84091
84092+extern struct ctl_table grsecurity_table[];
84093+
84094 static struct ctl_table kern_table[];
84095 static struct ctl_table vm_table[];
84096 static struct ctl_table fs_table[];
84097@@ -225,6 +224,20 @@ extern struct ctl_table epoll_table[];
84098 int sysctl_legacy_va_layout;
84099 #endif
84100
84101+#ifdef CONFIG_PAX_SOFTMODE
84102+static ctl_table pax_table[] = {
84103+ {
84104+ .procname = "softmode",
84105+ .data = &pax_softmode,
84106+ .maxlen = sizeof(unsigned int),
84107+ .mode = 0600,
84108+ .proc_handler = &proc_dointvec,
84109+ },
84110+
84111+ { }
84112+};
84113+#endif
84114+
84115 /* The default sysctl tables: */
84116
84117 static struct ctl_table sysctl_base_table[] = {
84118@@ -273,6 +286,22 @@ static int max_extfrag_threshold = 1000;
84119 #endif
84120
84121 static struct ctl_table kern_table[] = {
84122+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
84123+ {
84124+ .procname = "grsecurity",
84125+ .mode = 0500,
84126+ .child = grsecurity_table,
84127+ },
84128+#endif
84129+
84130+#ifdef CONFIG_PAX_SOFTMODE
84131+ {
84132+ .procname = "pax",
84133+ .mode = 0500,
84134+ .child = pax_table,
84135+ },
84136+#endif
84137+
84138 {
84139 .procname = "sched_child_runs_first",
84140 .data = &sysctl_sched_child_runs_first,
84141@@ -607,7 +636,7 @@ static struct ctl_table kern_table[] = {
84142 .data = &modprobe_path,
84143 .maxlen = KMOD_PATH_LEN,
84144 .mode = 0644,
84145- .proc_handler = proc_dostring,
84146+ .proc_handler = proc_dostring_modpriv,
84147 },
84148 {
84149 .procname = "modules_disabled",
84150@@ -774,16 +803,20 @@ static struct ctl_table kern_table[] = {
84151 .extra1 = &zero,
84152 .extra2 = &one,
84153 },
84154+#endif
84155 {
84156 .procname = "kptr_restrict",
84157 .data = &kptr_restrict,
84158 .maxlen = sizeof(int),
84159 .mode = 0644,
84160 .proc_handler = proc_dointvec_minmax_sysadmin,
84161+#ifdef CONFIG_GRKERNSEC_HIDESYM
84162+ .extra1 = &two,
84163+#else
84164 .extra1 = &zero,
84165+#endif
84166 .extra2 = &two,
84167 },
84168-#endif
84169 {
84170 .procname = "ngroups_max",
84171 .data = &ngroups_max,
84172@@ -1025,10 +1058,17 @@ static struct ctl_table kern_table[] = {
84173 */
84174 {
84175 .procname = "perf_event_paranoid",
84176- .data = &sysctl_perf_event_paranoid,
84177- .maxlen = sizeof(sysctl_perf_event_paranoid),
84178+ .data = &sysctl_perf_event_legitimately_concerned,
84179+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
84180 .mode = 0644,
84181- .proc_handler = proc_dointvec,
84182+ /* go ahead, be a hero */
84183+ .proc_handler = proc_dointvec_minmax_sysadmin,
84184+ .extra1 = &neg_one,
84185+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
84186+ .extra2 = &three,
84187+#else
84188+ .extra2 = &two,
84189+#endif
84190 },
84191 {
84192 .procname = "perf_event_mlock_kb",
84193@@ -1282,6 +1322,13 @@ static struct ctl_table vm_table[] = {
84194 .proc_handler = proc_dointvec_minmax,
84195 .extra1 = &zero,
84196 },
84197+ {
84198+ .procname = "heap_stack_gap",
84199+ .data = &sysctl_heap_stack_gap,
84200+ .maxlen = sizeof(sysctl_heap_stack_gap),
84201+ .mode = 0644,
84202+ .proc_handler = proc_doulongvec_minmax,
84203+ },
84204 #else
84205 {
84206 .procname = "nr_trim_pages",
84207@@ -1746,6 +1793,16 @@ int proc_dostring(struct ctl_table *table, int write,
84208 buffer, lenp, ppos);
84209 }
84210
84211+int proc_dostring_modpriv(struct ctl_table *table, int write,
84212+ void __user *buffer, size_t *lenp, loff_t *ppos)
84213+{
84214+ if (write && !capable(CAP_SYS_MODULE))
84215+ return -EPERM;
84216+
84217+ return _proc_do_string(table->data, table->maxlen, write,
84218+ buffer, lenp, ppos);
84219+}
84220+
84221 static size_t proc_skip_spaces(char **buf)
84222 {
84223 size_t ret;
84224@@ -1851,6 +1908,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
84225 len = strlen(tmp);
84226 if (len > *size)
84227 len = *size;
84228+ if (len > sizeof(tmp))
84229+ len = sizeof(tmp);
84230 if (copy_to_user(*buf, tmp, len))
84231 return -EFAULT;
84232 *size -= len;
84233@@ -2015,7 +2074,7 @@ int proc_dointvec(struct ctl_table *table, int write,
84234 static int proc_taint(struct ctl_table *table, int write,
84235 void __user *buffer, size_t *lenp, loff_t *ppos)
84236 {
84237- struct ctl_table t;
84238+ ctl_table_no_const t;
84239 unsigned long tmptaint = get_taint();
84240 int err;
84241
84242@@ -2043,7 +2102,6 @@ static int proc_taint(struct ctl_table *table, int write,
84243 return err;
84244 }
84245
84246-#ifdef CONFIG_PRINTK
84247 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84248 void __user *buffer, size_t *lenp, loff_t *ppos)
84249 {
84250@@ -2052,7 +2110,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
84251
84252 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
84253 }
84254-#endif
84255
84256 struct do_proc_dointvec_minmax_conv_param {
84257 int *min;
84258@@ -2199,8 +2256,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
84259 *i = val;
84260 } else {
84261 val = convdiv * (*i) / convmul;
84262- if (!first)
84263+ if (!first) {
84264 err = proc_put_char(&buffer, &left, '\t');
84265+ if (err)
84266+ break;
84267+ }
84268 err = proc_put_long(&buffer, &left, val, false);
84269 if (err)
84270 break;
84271@@ -2592,6 +2652,12 @@ int proc_dostring(struct ctl_table *table, int write,
84272 return -ENOSYS;
84273 }
84274
84275+int proc_dostring_modpriv(struct ctl_table *table, int write,
84276+ void __user *buffer, size_t *lenp, loff_t *ppos)
84277+{
84278+ return -ENOSYS;
84279+}
84280+
84281 int proc_dointvec(struct ctl_table *table, int write,
84282 void __user *buffer, size_t *lenp, loff_t *ppos)
84283 {
84284@@ -2648,5 +2714,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
84285 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
84286 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
84287 EXPORT_SYMBOL(proc_dostring);
84288+EXPORT_SYMBOL(proc_dostring_modpriv);
84289 EXPORT_SYMBOL(proc_doulongvec_minmax);
84290 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
84291diff --git a/kernel/taskstats.c b/kernel/taskstats.c
84292index 145bb4d..b2aa969 100644
84293--- a/kernel/taskstats.c
84294+++ b/kernel/taskstats.c
84295@@ -28,9 +28,12 @@
84296 #include <linux/fs.h>
84297 #include <linux/file.h>
84298 #include <linux/pid_namespace.h>
84299+#include <linux/grsecurity.h>
84300 #include <net/genetlink.h>
84301 #include <linux/atomic.h>
84302
84303+extern int gr_is_taskstats_denied(int pid);
84304+
84305 /*
84306 * Maximum length of a cpumask that can be specified in
84307 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
84308@@ -570,6 +573,9 @@ err:
84309
84310 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
84311 {
84312+ if (gr_is_taskstats_denied(current->pid))
84313+ return -EACCES;
84314+
84315 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
84316 return cmd_attr_register_cpumask(info);
84317 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
84318diff --git a/kernel/time.c b/kernel/time.c
84319index d3617db..c98bbe9 100644
84320--- a/kernel/time.c
84321+++ b/kernel/time.c
84322@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
84323 return error;
84324
84325 if (tz) {
84326+ /* we log in do_settimeofday called below, so don't log twice
84327+ */
84328+ if (!tv)
84329+ gr_log_timechange();
84330+
84331 sys_tz = *tz;
84332 update_vsyscall_tz();
84333 if (firsttime) {
84334@@ -502,7 +507,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
84335 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
84336 * value to a scaled second value.
84337 */
84338-unsigned long
84339+unsigned long __intentional_overflow(-1)
84340 timespec_to_jiffies(const struct timespec *value)
84341 {
84342 unsigned long sec = value->tv_sec;
84343diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
84344index f11d83b..d016d91 100644
84345--- a/kernel/time/alarmtimer.c
84346+++ b/kernel/time/alarmtimer.c
84347@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
84348 struct platform_device *pdev;
84349 int error = 0;
84350 int i;
84351- struct k_clock alarm_clock = {
84352+ static struct k_clock alarm_clock = {
84353 .clock_getres = alarm_clock_getres,
84354 .clock_get = alarm_clock_get,
84355 .timer_create = alarm_timer_create,
84356diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
84357index baeeb5c..c22704a 100644
84358--- a/kernel/time/timekeeping.c
84359+++ b/kernel/time/timekeeping.c
84360@@ -15,6 +15,7 @@
84361 #include <linux/init.h>
84362 #include <linux/mm.h>
84363 #include <linux/sched.h>
84364+#include <linux/grsecurity.h>
84365 #include <linux/syscore_ops.h>
84366 #include <linux/clocksource.h>
84367 #include <linux/jiffies.h>
84368@@ -495,6 +496,8 @@ int do_settimeofday(const struct timespec *tv)
84369 if (!timespec_valid_strict(tv))
84370 return -EINVAL;
84371
84372+ gr_log_timechange();
84373+
84374 raw_spin_lock_irqsave(&timekeeper_lock, flags);
84375 write_seqcount_begin(&timekeeper_seq);
84376
84377diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
84378index 3bdf283..cc68d83 100644
84379--- a/kernel/time/timer_list.c
84380+++ b/kernel/time/timer_list.c
84381@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
84382
84383 static void print_name_offset(struct seq_file *m, void *sym)
84384 {
84385+#ifdef CONFIG_GRKERNSEC_HIDESYM
84386+ SEQ_printf(m, "<%p>", NULL);
84387+#else
84388 char symname[KSYM_NAME_LEN];
84389
84390 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
84391 SEQ_printf(m, "<%pK>", sym);
84392 else
84393 SEQ_printf(m, "%s", symname);
84394+#endif
84395 }
84396
84397 static void
84398@@ -119,7 +123,11 @@ next_one:
84399 static void
84400 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
84401 {
84402+#ifdef CONFIG_GRKERNSEC_HIDESYM
84403+ SEQ_printf(m, " .base: %p\n", NULL);
84404+#else
84405 SEQ_printf(m, " .base: %pK\n", base);
84406+#endif
84407 SEQ_printf(m, " .index: %d\n",
84408 base->index);
84409 SEQ_printf(m, " .resolution: %Lu nsecs\n",
84410@@ -355,7 +363,11 @@ static int __init init_timer_list_procfs(void)
84411 {
84412 struct proc_dir_entry *pe;
84413
84414+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84415+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
84416+#else
84417 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
84418+#endif
84419 if (!pe)
84420 return -ENOMEM;
84421 return 0;
84422diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
84423index 0b537f2..40d6c20 100644
84424--- a/kernel/time/timer_stats.c
84425+++ b/kernel/time/timer_stats.c
84426@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
84427 static unsigned long nr_entries;
84428 static struct entry entries[MAX_ENTRIES];
84429
84430-static atomic_t overflow_count;
84431+static atomic_unchecked_t overflow_count;
84432
84433 /*
84434 * The entries are in a hash-table, for fast lookup:
84435@@ -140,7 +140,7 @@ static void reset_entries(void)
84436 nr_entries = 0;
84437 memset(entries, 0, sizeof(entries));
84438 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
84439- atomic_set(&overflow_count, 0);
84440+ atomic_set_unchecked(&overflow_count, 0);
84441 }
84442
84443 static struct entry *alloc_entry(void)
84444@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
84445 if (likely(entry))
84446 entry->count++;
84447 else
84448- atomic_inc(&overflow_count);
84449+ atomic_inc_unchecked(&overflow_count);
84450
84451 out_unlock:
84452 raw_spin_unlock_irqrestore(lock, flags);
84453@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
84454
84455 static void print_name_offset(struct seq_file *m, unsigned long addr)
84456 {
84457+#ifdef CONFIG_GRKERNSEC_HIDESYM
84458+ seq_printf(m, "<%p>", NULL);
84459+#else
84460 char symname[KSYM_NAME_LEN];
84461
84462 if (lookup_symbol_name(addr, symname) < 0)
84463- seq_printf(m, "<%p>", (void *)addr);
84464+ seq_printf(m, "<%pK>", (void *)addr);
84465 else
84466 seq_printf(m, "%s", symname);
84467+#endif
84468 }
84469
84470 static int tstats_show(struct seq_file *m, void *v)
84471@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
84472
84473 seq_puts(m, "Timer Stats Version: v0.2\n");
84474 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
84475- if (atomic_read(&overflow_count))
84476+ if (atomic_read_unchecked(&overflow_count))
84477 seq_printf(m, "Overflow: %d entries\n",
84478- atomic_read(&overflow_count));
84479+ atomic_read_unchecked(&overflow_count));
84480
84481 for (i = 0; i < nr_entries; i++) {
84482 entry = entries + i;
84483@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
84484 {
84485 struct proc_dir_entry *pe;
84486
84487+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84488+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
84489+#else
84490 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
84491+#endif
84492 if (!pe)
84493 return -ENOMEM;
84494 return 0;
84495diff --git a/kernel/timer.c b/kernel/timer.c
84496index 15bc1b4..32da49c 100644
84497--- a/kernel/timer.c
84498+++ b/kernel/timer.c
84499@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
84500 /*
84501 * This function runs timers and the timer-tq in bottom half context.
84502 */
84503-static void run_timer_softirq(struct softirq_action *h)
84504+static void run_timer_softirq(void)
84505 {
84506 struct tvec_base *base = __this_cpu_read(tvec_bases);
84507
84508@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
84509 *
84510 * In all cases the return value is guaranteed to be non-negative.
84511 */
84512-signed long __sched schedule_timeout(signed long timeout)
84513+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
84514 {
84515 struct timer_list timer;
84516 unsigned long expire;
84517@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
84518 return NOTIFY_OK;
84519 }
84520
84521-static struct notifier_block __cpuinitdata timers_nb = {
84522+static struct notifier_block timers_nb = {
84523 .notifier_call = timer_cpu_notify,
84524 };
84525
84526diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
84527index b8b8560..75b1a09 100644
84528--- a/kernel/trace/blktrace.c
84529+++ b/kernel/trace/blktrace.c
84530@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
84531 struct blk_trace *bt = filp->private_data;
84532 char buf[16];
84533
84534- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
84535+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
84536
84537 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
84538 }
84539@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
84540 return 1;
84541
84542 bt = buf->chan->private_data;
84543- atomic_inc(&bt->dropped);
84544+ atomic_inc_unchecked(&bt->dropped);
84545 return 0;
84546 }
84547
84548@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
84549
84550 bt->dir = dir;
84551 bt->dev = dev;
84552- atomic_set(&bt->dropped, 0);
84553+ atomic_set_unchecked(&bt->dropped, 0);
84554
84555 ret = -EIO;
84556 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
84557diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
84558index 6c508ff..ee55a13 100644
84559--- a/kernel/trace/ftrace.c
84560+++ b/kernel/trace/ftrace.c
84561@@ -1915,12 +1915,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
84562 if (unlikely(ftrace_disabled))
84563 return 0;
84564
84565+ ret = ftrace_arch_code_modify_prepare();
84566+ FTRACE_WARN_ON(ret);
84567+ if (ret)
84568+ return 0;
84569+
84570 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
84571+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
84572 if (ret) {
84573 ftrace_bug(ret, ip);
84574- return 0;
84575 }
84576- return 1;
84577+ return ret ? 0 : 1;
84578 }
84579
84580 /*
84581@@ -3931,8 +3936,10 @@ static int ftrace_process_locs(struct module *mod,
84582 if (!count)
84583 return 0;
84584
84585+ pax_open_kernel();
84586 sort(start, count, sizeof(*start),
84587 ftrace_cmp_ips, ftrace_swap_ips);
84588+ pax_close_kernel();
84589
84590 start_pg = ftrace_allocate_pages(count);
84591 if (!start_pg)
84592@@ -4655,8 +4662,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
84593 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
84594
84595 static int ftrace_graph_active;
84596-static struct notifier_block ftrace_suspend_notifier;
84597-
84598 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
84599 {
84600 return 0;
84601@@ -4800,6 +4805,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
84602 return NOTIFY_DONE;
84603 }
84604
84605+static struct notifier_block ftrace_suspend_notifier = {
84606+ .notifier_call = ftrace_suspend_notifier_call
84607+};
84608+
84609 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
84610 trace_func_graph_ent_t entryfunc)
84611 {
84612@@ -4813,7 +4822,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
84613 goto out;
84614 }
84615
84616- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
84617 register_pm_notifier(&ftrace_suspend_notifier);
84618
84619 ftrace_graph_active++;
84620diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
84621index e444ff8..438b8f4 100644
84622--- a/kernel/trace/ring_buffer.c
84623+++ b/kernel/trace/ring_buffer.c
84624@@ -352,9 +352,9 @@ struct buffer_data_page {
84625 */
84626 struct buffer_page {
84627 struct list_head list; /* list of buffer pages */
84628- local_t write; /* index for next write */
84629+ local_unchecked_t write; /* index for next write */
84630 unsigned read; /* index for next read */
84631- local_t entries; /* entries on this page */
84632+ local_unchecked_t entries; /* entries on this page */
84633 unsigned long real_end; /* real end of data */
84634 struct buffer_data_page *page; /* Actual data page */
84635 };
84636@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
84637 unsigned long last_overrun;
84638 local_t entries_bytes;
84639 local_t entries;
84640- local_t overrun;
84641- local_t commit_overrun;
84642+ local_unchecked_t overrun;
84643+ local_unchecked_t commit_overrun;
84644 local_t dropped_events;
84645 local_t committing;
84646 local_t commits;
84647@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
84648 *
84649 * We add a counter to the write field to denote this.
84650 */
84651- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
84652- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
84653+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
84654+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
84655
84656 /*
84657 * Just make sure we have seen our old_write and synchronize
84658@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
84659 * cmpxchg to only update if an interrupt did not already
84660 * do it for us. If the cmpxchg fails, we don't care.
84661 */
84662- (void)local_cmpxchg(&next_page->write, old_write, val);
84663- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
84664+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
84665+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
84666
84667 /*
84668 * No need to worry about races with clearing out the commit.
84669@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
84670
84671 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
84672 {
84673- return local_read(&bpage->entries) & RB_WRITE_MASK;
84674+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
84675 }
84676
84677 static inline unsigned long rb_page_write(struct buffer_page *bpage)
84678 {
84679- return local_read(&bpage->write) & RB_WRITE_MASK;
84680+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
84681 }
84682
84683 static int
84684@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
84685 * bytes consumed in ring buffer from here.
84686 * Increment overrun to account for the lost events.
84687 */
84688- local_add(page_entries, &cpu_buffer->overrun);
84689+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
84690 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
84691 }
84692
84693@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
84694 * it is our responsibility to update
84695 * the counters.
84696 */
84697- local_add(entries, &cpu_buffer->overrun);
84698+ local_add_unchecked(entries, &cpu_buffer->overrun);
84699 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
84700
84701 /*
84702@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84703 if (tail == BUF_PAGE_SIZE)
84704 tail_page->real_end = 0;
84705
84706- local_sub(length, &tail_page->write);
84707+ local_sub_unchecked(length, &tail_page->write);
84708 return;
84709 }
84710
84711@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84712 rb_event_set_padding(event);
84713
84714 /* Set the write back to the previous setting */
84715- local_sub(length, &tail_page->write);
84716+ local_sub_unchecked(length, &tail_page->write);
84717 return;
84718 }
84719
84720@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
84721
84722 /* Set write to end of buffer */
84723 length = (tail + length) - BUF_PAGE_SIZE;
84724- local_sub(length, &tail_page->write);
84725+ local_sub_unchecked(length, &tail_page->write);
84726 }
84727
84728 /*
84729@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
84730 * about it.
84731 */
84732 if (unlikely(next_page == commit_page)) {
84733- local_inc(&cpu_buffer->commit_overrun);
84734+ local_inc_unchecked(&cpu_buffer->commit_overrun);
84735 goto out_reset;
84736 }
84737
84738@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
84739 cpu_buffer->tail_page) &&
84740 (cpu_buffer->commit_page ==
84741 cpu_buffer->reader_page))) {
84742- local_inc(&cpu_buffer->commit_overrun);
84743+ local_inc_unchecked(&cpu_buffer->commit_overrun);
84744 goto out_reset;
84745 }
84746 }
84747@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
84748 length += RB_LEN_TIME_EXTEND;
84749
84750 tail_page = cpu_buffer->tail_page;
84751- write = local_add_return(length, &tail_page->write);
84752+ write = local_add_return_unchecked(length, &tail_page->write);
84753
84754 /* set write to only the index of the write */
84755 write &= RB_WRITE_MASK;
84756@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
84757 kmemcheck_annotate_bitfield(event, bitfield);
84758 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
84759
84760- local_inc(&tail_page->entries);
84761+ local_inc_unchecked(&tail_page->entries);
84762
84763 /*
84764 * If this is the first commit on the page, then update
84765@@ -2440,7 +2440,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
84766
84767 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
84768 unsigned long write_mask =
84769- local_read(&bpage->write) & ~RB_WRITE_MASK;
84770+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
84771 unsigned long event_length = rb_event_length(event);
84772 /*
84773 * This is on the tail page. It is possible that
84774@@ -2450,7 +2450,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
84775 */
84776 old_index += write_mask;
84777 new_index += write_mask;
84778- index = local_cmpxchg(&bpage->write, old_index, new_index);
84779+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
84780 if (index == old_index) {
84781 /* update counters */
84782 local_sub(event_length, &cpu_buffer->entries_bytes);
84783@@ -2842,7 +2842,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
84784
84785 /* Do the likely case first */
84786 if (likely(bpage->page == (void *)addr)) {
84787- local_dec(&bpage->entries);
84788+ local_dec_unchecked(&bpage->entries);
84789 return;
84790 }
84791
84792@@ -2854,7 +2854,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
84793 start = bpage;
84794 do {
84795 if (bpage->page == (void *)addr) {
84796- local_dec(&bpage->entries);
84797+ local_dec_unchecked(&bpage->entries);
84798 return;
84799 }
84800 rb_inc_page(cpu_buffer, &bpage);
84801@@ -3138,7 +3138,7 @@ static inline unsigned long
84802 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
84803 {
84804 return local_read(&cpu_buffer->entries) -
84805- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
84806+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
84807 }
84808
84809 /**
84810@@ -3227,7 +3227,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
84811 return 0;
84812
84813 cpu_buffer = buffer->buffers[cpu];
84814- ret = local_read(&cpu_buffer->overrun);
84815+ ret = local_read_unchecked(&cpu_buffer->overrun);
84816
84817 return ret;
84818 }
84819@@ -3250,7 +3250,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
84820 return 0;
84821
84822 cpu_buffer = buffer->buffers[cpu];
84823- ret = local_read(&cpu_buffer->commit_overrun);
84824+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
84825
84826 return ret;
84827 }
84828@@ -3335,7 +3335,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
84829 /* if you care about this being correct, lock the buffer */
84830 for_each_buffer_cpu(buffer, cpu) {
84831 cpu_buffer = buffer->buffers[cpu];
84832- overruns += local_read(&cpu_buffer->overrun);
84833+ overruns += local_read_unchecked(&cpu_buffer->overrun);
84834 }
84835
84836 return overruns;
84837@@ -3511,8 +3511,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
84838 /*
84839 * Reset the reader page to size zero.
84840 */
84841- local_set(&cpu_buffer->reader_page->write, 0);
84842- local_set(&cpu_buffer->reader_page->entries, 0);
84843+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
84844+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
84845 local_set(&cpu_buffer->reader_page->page->commit, 0);
84846 cpu_buffer->reader_page->real_end = 0;
84847
84848@@ -3546,7 +3546,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
84849 * want to compare with the last_overrun.
84850 */
84851 smp_mb();
84852- overwrite = local_read(&(cpu_buffer->overrun));
84853+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
84854
84855 /*
84856 * Here's the tricky part.
84857@@ -4116,8 +4116,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
84858
84859 cpu_buffer->head_page
84860 = list_entry(cpu_buffer->pages, struct buffer_page, list);
84861- local_set(&cpu_buffer->head_page->write, 0);
84862- local_set(&cpu_buffer->head_page->entries, 0);
84863+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
84864+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
84865 local_set(&cpu_buffer->head_page->page->commit, 0);
84866
84867 cpu_buffer->head_page->read = 0;
84868@@ -4127,14 +4127,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
84869
84870 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
84871 INIT_LIST_HEAD(&cpu_buffer->new_pages);
84872- local_set(&cpu_buffer->reader_page->write, 0);
84873- local_set(&cpu_buffer->reader_page->entries, 0);
84874+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
84875+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
84876 local_set(&cpu_buffer->reader_page->page->commit, 0);
84877 cpu_buffer->reader_page->read = 0;
84878
84879 local_set(&cpu_buffer->entries_bytes, 0);
84880- local_set(&cpu_buffer->overrun, 0);
84881- local_set(&cpu_buffer->commit_overrun, 0);
84882+ local_set_unchecked(&cpu_buffer->overrun, 0);
84883+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
84884 local_set(&cpu_buffer->dropped_events, 0);
84885 local_set(&cpu_buffer->entries, 0);
84886 local_set(&cpu_buffer->committing, 0);
84887@@ -4538,8 +4538,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
84888 rb_init_page(bpage);
84889 bpage = reader->page;
84890 reader->page = *data_page;
84891- local_set(&reader->write, 0);
84892- local_set(&reader->entries, 0);
84893+ local_set_unchecked(&reader->write, 0);
84894+ local_set_unchecked(&reader->entries, 0);
84895 reader->read = 0;
84896 *data_page = bpage;
84897
84898diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
84899index 06a5bce..53ad6e7 100644
84900--- a/kernel/trace/trace.c
84901+++ b/kernel/trace/trace.c
84902@@ -3347,7 +3347,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
84903 return 0;
84904 }
84905
84906-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
84907+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
84908 {
84909 /* do nothing if flag is already set */
84910 if (!!(trace_flags & mask) == !!enabled)
84911diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
84912index 51b4448..7be601f 100644
84913--- a/kernel/trace/trace.h
84914+++ b/kernel/trace/trace.h
84915@@ -1035,7 +1035,7 @@ extern const char *__stop___trace_bprintk_fmt[];
84916 void trace_printk_init_buffers(void);
84917 void trace_printk_start_comm(void);
84918 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
84919-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
84920+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
84921
84922 /*
84923 * Normal trace_printk() and friends allocates special buffers
84924diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
84925index 6953263..2004e16 100644
84926--- a/kernel/trace/trace_events.c
84927+++ b/kernel/trace/trace_events.c
84928@@ -1748,10 +1748,6 @@ static LIST_HEAD(ftrace_module_file_list);
84929 struct ftrace_module_file_ops {
84930 struct list_head list;
84931 struct module *mod;
84932- struct file_operations id;
84933- struct file_operations enable;
84934- struct file_operations format;
84935- struct file_operations filter;
84936 };
84937
84938 static struct ftrace_module_file_ops *
84939@@ -1792,17 +1788,12 @@ trace_create_file_ops(struct module *mod)
84940
84941 file_ops->mod = mod;
84942
84943- file_ops->id = ftrace_event_id_fops;
84944- file_ops->id.owner = mod;
84945-
84946- file_ops->enable = ftrace_enable_fops;
84947- file_ops->enable.owner = mod;
84948-
84949- file_ops->filter = ftrace_event_filter_fops;
84950- file_ops->filter.owner = mod;
84951-
84952- file_ops->format = ftrace_event_format_fops;
84953- file_ops->format.owner = mod;
84954+ pax_open_kernel();
84955+ mod->trace_id.owner = mod;
84956+ mod->trace_enable.owner = mod;
84957+ mod->trace_filter.owner = mod;
84958+ mod->trace_format.owner = mod;
84959+ pax_close_kernel();
84960
84961 list_add(&file_ops->list, &ftrace_module_file_list);
84962
84963@@ -1895,8 +1886,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
84964 struct ftrace_module_file_ops *file_ops)
84965 {
84966 return __trace_add_new_event(call, tr,
84967- &file_ops->id, &file_ops->enable,
84968- &file_ops->filter, &file_ops->format);
84969+ &file_ops->mod->trace_id, &file_ops->mod->trace_enable,
84970+ &file_ops->mod->trace_filter, &file_ops->mod->trace_format);
84971 }
84972
84973 #else
84974diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
84975index a5e8f48..a9690d2 100644
84976--- a/kernel/trace/trace_mmiotrace.c
84977+++ b/kernel/trace/trace_mmiotrace.c
84978@@ -24,7 +24,7 @@ struct header_iter {
84979 static struct trace_array *mmio_trace_array;
84980 static bool overrun_detected;
84981 static unsigned long prev_overruns;
84982-static atomic_t dropped_count;
84983+static atomic_unchecked_t dropped_count;
84984
84985 static void mmio_reset_data(struct trace_array *tr)
84986 {
84987@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
84988
84989 static unsigned long count_overruns(struct trace_iterator *iter)
84990 {
84991- unsigned long cnt = atomic_xchg(&dropped_count, 0);
84992+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
84993 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
84994
84995 if (over > prev_overruns)
84996@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
84997 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
84998 sizeof(*entry), 0, pc);
84999 if (!event) {
85000- atomic_inc(&dropped_count);
85001+ atomic_inc_unchecked(&dropped_count);
85002 return;
85003 }
85004 entry = ring_buffer_event_data(event);
85005@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
85006 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
85007 sizeof(*entry), 0, pc);
85008 if (!event) {
85009- atomic_inc(&dropped_count);
85010+ atomic_inc_unchecked(&dropped_count);
85011 return;
85012 }
85013 entry = ring_buffer_event_data(event);
85014diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
85015index bb922d9..2a54a257 100644
85016--- a/kernel/trace/trace_output.c
85017+++ b/kernel/trace/trace_output.c
85018@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
85019
85020 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
85021 if (!IS_ERR(p)) {
85022- p = mangle_path(s->buffer + s->len, p, "\n");
85023+ p = mangle_path(s->buffer + s->len, p, "\n\\");
85024 if (p) {
85025 s->len = p - s->buffer;
85026 return 1;
85027@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
85028 goto out;
85029 }
85030
85031+ pax_open_kernel();
85032 if (event->funcs->trace == NULL)
85033- event->funcs->trace = trace_nop_print;
85034+ *(void **)&event->funcs->trace = trace_nop_print;
85035 if (event->funcs->raw == NULL)
85036- event->funcs->raw = trace_nop_print;
85037+ *(void **)&event->funcs->raw = trace_nop_print;
85038 if (event->funcs->hex == NULL)
85039- event->funcs->hex = trace_nop_print;
85040+ *(void **)&event->funcs->hex = trace_nop_print;
85041 if (event->funcs->binary == NULL)
85042- event->funcs->binary = trace_nop_print;
85043+ *(void **)&event->funcs->binary = trace_nop_print;
85044+ pax_close_kernel();
85045
85046 key = event->type & (EVENT_HASHSIZE - 1);
85047
85048diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
85049index b20428c..4845a10 100644
85050--- a/kernel/trace/trace_stack.c
85051+++ b/kernel/trace/trace_stack.c
85052@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
85053 return;
85054
85055 /* we do not handle interrupt stacks yet */
85056- if (!object_is_on_stack(stack))
85057+ if (!object_starts_on_stack(stack))
85058 return;
85059
85060 local_irq_save(flags);
85061diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
85062index 9064b91..1f5d2f8 100644
85063--- a/kernel/user_namespace.c
85064+++ b/kernel/user_namespace.c
85065@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
85066 !kgid_has_mapping(parent_ns, group))
85067 return -EPERM;
85068
85069+#ifdef CONFIG_GRKERNSEC
85070+ /*
85071+ * This doesn't really inspire confidence:
85072+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
85073+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
85074+ * Increases kernel attack surface in areas developers
85075+ * previously cared little about ("low importance due
85076+ * to requiring "root" capability")
85077+ * To be removed when this code receives *proper* review
85078+ */
85079+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
85080+ !capable(CAP_SETGID))
85081+ return -EPERM;
85082+#endif
85083+
85084 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
85085 if (!ns)
85086 return -ENOMEM;
85087@@ -862,7 +877,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
85088 if (atomic_read(&current->mm->mm_users) > 1)
85089 return -EINVAL;
85090
85091- if (current->fs->users != 1)
85092+ if (atomic_read(&current->fs->users) != 1)
85093 return -EINVAL;
85094
85095 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
85096diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
85097index 4f69f9a..7c6f8f8 100644
85098--- a/kernel/utsname_sysctl.c
85099+++ b/kernel/utsname_sysctl.c
85100@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
85101 static int proc_do_uts_string(ctl_table *table, int write,
85102 void __user *buffer, size_t *lenp, loff_t *ppos)
85103 {
85104- struct ctl_table uts_table;
85105+ ctl_table_no_const uts_table;
85106 int r;
85107 memcpy(&uts_table, table, sizeof(uts_table));
85108 uts_table.data = get_uts(table, write);
85109diff --git a/kernel/watchdog.c b/kernel/watchdog.c
85110index 05039e3..17490c7 100644
85111--- a/kernel/watchdog.c
85112+++ b/kernel/watchdog.c
85113@@ -531,7 +531,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
85114 }
85115 #endif /* CONFIG_SYSCTL */
85116
85117-static struct smp_hotplug_thread watchdog_threads = {
85118+static struct smp_hotplug_thread watchdog_threads __read_only = {
85119 .store = &softlockup_watchdog,
85120 .thread_should_run = watchdog_should_run,
85121 .thread_fn = watchdog,
85122diff --git a/kernel/workqueue.c b/kernel/workqueue.c
85123index 6f01921..139869b 100644
85124--- a/kernel/workqueue.c
85125+++ b/kernel/workqueue.c
85126@@ -4596,7 +4596,7 @@ static void rebind_workers(struct worker_pool *pool)
85127 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
85128 worker_flags |= WORKER_REBOUND;
85129 worker_flags &= ~WORKER_UNBOUND;
85130- ACCESS_ONCE(worker->flags) = worker_flags;
85131+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
85132 }
85133
85134 spin_unlock_irq(&pool->lock);
85135diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
85136index 74fdc5c..3310593 100644
85137--- a/lib/Kconfig.debug
85138+++ b/lib/Kconfig.debug
85139@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
85140
85141 config DEBUG_LOCK_ALLOC
85142 bool "Lock debugging: detect incorrect freeing of live locks"
85143- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85144+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85145 select DEBUG_SPINLOCK
85146 select DEBUG_MUTEXES
85147 select LOCKDEP
85148@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
85149
85150 config PROVE_LOCKING
85151 bool "Lock debugging: prove locking correctness"
85152- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85153+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85154 select LOCKDEP
85155 select DEBUG_SPINLOCK
85156 select DEBUG_MUTEXES
85157@@ -614,7 +614,7 @@ config LOCKDEP
85158
85159 config LOCK_STAT
85160 bool "Lock usage statistics"
85161- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
85162+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
85163 select LOCKDEP
85164 select DEBUG_SPINLOCK
85165 select DEBUG_MUTEXES
85166@@ -1282,6 +1282,7 @@ config LATENCYTOP
85167 depends on DEBUG_KERNEL
85168 depends on STACKTRACE_SUPPORT
85169 depends on PROC_FS
85170+ depends on !GRKERNSEC_HIDESYM
85171 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
85172 select KALLSYMS
85173 select KALLSYMS_ALL
85174@@ -1298,7 +1299,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
85175 config DEBUG_STRICT_USER_COPY_CHECKS
85176 bool "Strict user copy size checks"
85177 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
85178- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
85179+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
85180 help
85181 Enabling this option turns a certain set of sanity checks for user
85182 copy operations into compile time failures.
85183@@ -1328,7 +1329,7 @@ config INTERVAL_TREE_TEST
85184
85185 config PROVIDE_OHCI1394_DMA_INIT
85186 bool "Remote debugging over FireWire early on boot"
85187- depends on PCI && X86
85188+ depends on PCI && X86 && !GRKERNSEC
85189 help
85190 If you want to debug problems which hang or crash the kernel early
85191 on boot and the crashing machine has a FireWire port, you can use
85192@@ -1357,7 +1358,7 @@ config PROVIDE_OHCI1394_DMA_INIT
85193
85194 config FIREWIRE_OHCI_REMOTE_DMA
85195 bool "Remote debugging over FireWire with firewire-ohci"
85196- depends on FIREWIRE_OHCI
85197+ depends on FIREWIRE_OHCI && !GRKERNSEC
85198 help
85199 This option lets you use the FireWire bus for remote debugging
85200 with help of the firewire-ohci driver. It enables unfiltered
85201diff --git a/lib/Makefile b/lib/Makefile
85202index c55a037..fb46e3b 100644
85203--- a/lib/Makefile
85204+++ b/lib/Makefile
85205@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
85206
85207 obj-$(CONFIG_BTREE) += btree.o
85208 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
85209-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
85210+obj-y += list_debug.o
85211 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
85212
85213 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
85214diff --git a/lib/bitmap.c b/lib/bitmap.c
85215index 06f7e4f..f3cf2b0 100644
85216--- a/lib/bitmap.c
85217+++ b/lib/bitmap.c
85218@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
85219 {
85220 int c, old_c, totaldigits, ndigits, nchunks, nbits;
85221 u32 chunk;
85222- const char __user __force *ubuf = (const char __user __force *)buf;
85223+ const char __user *ubuf = (const char __force_user *)buf;
85224
85225 bitmap_zero(maskp, nmaskbits);
85226
85227@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
85228 {
85229 if (!access_ok(VERIFY_READ, ubuf, ulen))
85230 return -EFAULT;
85231- return __bitmap_parse((const char __force *)ubuf,
85232+ return __bitmap_parse((const char __force_kernel *)ubuf,
85233 ulen, 1, maskp, nmaskbits);
85234
85235 }
85236@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
85237 {
85238 unsigned a, b;
85239 int c, old_c, totaldigits;
85240- const char __user __force *ubuf = (const char __user __force *)buf;
85241+ const char __user *ubuf = (const char __force_user *)buf;
85242 int exp_digit, in_range;
85243
85244 totaldigits = c = 0;
85245@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
85246 {
85247 if (!access_ok(VERIFY_READ, ubuf, ulen))
85248 return -EFAULT;
85249- return __bitmap_parselist((const char __force *)ubuf,
85250+ return __bitmap_parselist((const char __force_kernel *)ubuf,
85251 ulen, 1, maskp, nmaskbits);
85252 }
85253 EXPORT_SYMBOL(bitmap_parselist_user);
85254diff --git a/lib/bug.c b/lib/bug.c
85255index 1686034..a9c00c8 100644
85256--- a/lib/bug.c
85257+++ b/lib/bug.c
85258@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
85259 return BUG_TRAP_TYPE_NONE;
85260
85261 bug = find_bug(bugaddr);
85262+ if (!bug)
85263+ return BUG_TRAP_TYPE_NONE;
85264
85265 file = NULL;
85266 line = 0;
85267diff --git a/lib/debugobjects.c b/lib/debugobjects.c
85268index 37061ed..da83f48 100644
85269--- a/lib/debugobjects.c
85270+++ b/lib/debugobjects.c
85271@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
85272 if (limit > 4)
85273 return;
85274
85275- is_on_stack = object_is_on_stack(addr);
85276+ is_on_stack = object_starts_on_stack(addr);
85277 if (is_on_stack == onstack)
85278 return;
85279
85280diff --git a/lib/devres.c b/lib/devres.c
85281index 8235331..5881053 100644
85282--- a/lib/devres.c
85283+++ b/lib/devres.c
85284@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
85285 void devm_iounmap(struct device *dev, void __iomem *addr)
85286 {
85287 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
85288- (void *)addr));
85289+ (void __force *)addr));
85290 iounmap(addr);
85291 }
85292 EXPORT_SYMBOL(devm_iounmap);
85293@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
85294 {
85295 ioport_unmap(addr);
85296 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
85297- devm_ioport_map_match, (void *)addr));
85298+ devm_ioport_map_match, (void __force *)addr));
85299 }
85300 EXPORT_SYMBOL(devm_ioport_unmap);
85301 #endif /* CONFIG_HAS_IOPORT */
85302diff --git a/lib/div64.c b/lib/div64.c
85303index a163b6c..9618fa5 100644
85304--- a/lib/div64.c
85305+++ b/lib/div64.c
85306@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
85307 EXPORT_SYMBOL(__div64_32);
85308
85309 #ifndef div_s64_rem
85310-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
85311+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
85312 {
85313 u64 quotient;
85314
85315@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
85316 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
85317 */
85318 #ifndef div64_u64
85319-u64 div64_u64(u64 dividend, u64 divisor)
85320+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
85321 {
85322 u32 high = divisor >> 32;
85323 u64 quot;
85324diff --git a/lib/dma-debug.c b/lib/dma-debug.c
85325index d87a17a..ac0d79a 100644
85326--- a/lib/dma-debug.c
85327+++ b/lib/dma-debug.c
85328@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
85329
85330 void dma_debug_add_bus(struct bus_type *bus)
85331 {
85332- struct notifier_block *nb;
85333+ notifier_block_no_const *nb;
85334
85335 if (global_disable)
85336 return;
85337@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
85338
85339 static void check_for_stack(struct device *dev, void *addr)
85340 {
85341- if (object_is_on_stack(addr))
85342+ if (object_starts_on_stack(addr))
85343 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
85344 "stack [addr=%p]\n", addr);
85345 }
85346diff --git a/lib/inflate.c b/lib/inflate.c
85347index 013a761..c28f3fc 100644
85348--- a/lib/inflate.c
85349+++ b/lib/inflate.c
85350@@ -269,7 +269,7 @@ static void free(void *where)
85351 malloc_ptr = free_mem_ptr;
85352 }
85353 #else
85354-#define malloc(a) kmalloc(a, GFP_KERNEL)
85355+#define malloc(a) kmalloc((a), GFP_KERNEL)
85356 #define free(a) kfree(a)
85357 #endif
85358
85359diff --git a/lib/ioremap.c b/lib/ioremap.c
85360index 0c9216c..863bd89 100644
85361--- a/lib/ioremap.c
85362+++ b/lib/ioremap.c
85363@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
85364 unsigned long next;
85365
85366 phys_addr -= addr;
85367- pmd = pmd_alloc(&init_mm, pud, addr);
85368+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
85369 if (!pmd)
85370 return -ENOMEM;
85371 do {
85372@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
85373 unsigned long next;
85374
85375 phys_addr -= addr;
85376- pud = pud_alloc(&init_mm, pgd, addr);
85377+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
85378 if (!pud)
85379 return -ENOMEM;
85380 do {
85381diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
85382index bd2bea9..6b3c95e 100644
85383--- a/lib/is_single_threaded.c
85384+++ b/lib/is_single_threaded.c
85385@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
85386 struct task_struct *p, *t;
85387 bool ret;
85388
85389+ if (!mm)
85390+ return true;
85391+
85392 if (atomic_read(&task->signal->live) != 1)
85393 return false;
85394
85395diff --git a/lib/kobject.c b/lib/kobject.c
85396index b7e29a6..2f3ca75 100644
85397--- a/lib/kobject.c
85398+++ b/lib/kobject.c
85399@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
85400 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
85401 if (!kset)
85402 return NULL;
85403- retval = kobject_set_name(&kset->kobj, name);
85404+ retval = kobject_set_name(&kset->kobj, "%s", name);
85405 if (retval) {
85406 kfree(kset);
85407 return NULL;
85408@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
85409
85410
85411 static DEFINE_SPINLOCK(kobj_ns_type_lock);
85412-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
85413+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
85414
85415-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
85416+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
85417 {
85418 enum kobj_ns_type type = ops->type;
85419 int error;
85420diff --git a/lib/list_debug.c b/lib/list_debug.c
85421index c24c2f7..06e070b 100644
85422--- a/lib/list_debug.c
85423+++ b/lib/list_debug.c
85424@@ -11,7 +11,9 @@
85425 #include <linux/bug.h>
85426 #include <linux/kernel.h>
85427 #include <linux/rculist.h>
85428+#include <linux/mm.h>
85429
85430+#ifdef CONFIG_DEBUG_LIST
85431 /*
85432 * Insert a new entry between two known consecutive entries.
85433 *
85434@@ -19,21 +21,32 @@
85435 * the prev/next entries already!
85436 */
85437
85438-void __list_add(struct list_head *new,
85439- struct list_head *prev,
85440- struct list_head *next)
85441+static bool __list_add_debug(struct list_head *new,
85442+ struct list_head *prev,
85443+ struct list_head *next)
85444 {
85445- WARN(next->prev != prev,
85446+ if (WARN(next->prev != prev,
85447 "list_add corruption. next->prev should be "
85448 "prev (%p), but was %p. (next=%p).\n",
85449- prev, next->prev, next);
85450- WARN(prev->next != next,
85451+ prev, next->prev, next) ||
85452+ WARN(prev->next != next,
85453 "list_add corruption. prev->next should be "
85454 "next (%p), but was %p. (prev=%p).\n",
85455- next, prev->next, prev);
85456- WARN(new == prev || new == next,
85457- "list_add double add: new=%p, prev=%p, next=%p.\n",
85458- new, prev, next);
85459+ next, prev->next, prev) ||
85460+ WARN(new == prev || new == next,
85461+ "list_add double add: new=%p, prev=%p, next=%p.\n",
85462+ new, prev, next))
85463+ return false;
85464+ return true;
85465+}
85466+
85467+void __list_add(struct list_head *new,
85468+ struct list_head *prev,
85469+ struct list_head *next)
85470+{
85471+ if (!__list_add_debug(new, prev, next))
85472+ return;
85473+
85474 next->prev = new;
85475 new->next = next;
85476 new->prev = prev;
85477@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
85478 }
85479 EXPORT_SYMBOL(__list_add);
85480
85481-void __list_del_entry(struct list_head *entry)
85482+static bool __list_del_entry_debug(struct list_head *entry)
85483 {
85484 struct list_head *prev, *next;
85485
85486@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
85487 WARN(next->prev != entry,
85488 "list_del corruption. next->prev should be %p, "
85489 "but was %p\n", entry, next->prev))
85490+ return false;
85491+ return true;
85492+}
85493+
85494+void __list_del_entry(struct list_head *entry)
85495+{
85496+ if (!__list_del_entry_debug(entry))
85497 return;
85498
85499- __list_del(prev, next);
85500+ __list_del(entry->prev, entry->next);
85501 }
85502 EXPORT_SYMBOL(__list_del_entry);
85503
85504@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
85505 void __list_add_rcu(struct list_head *new,
85506 struct list_head *prev, struct list_head *next)
85507 {
85508- WARN(next->prev != prev,
85509- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
85510- prev, next->prev, next);
85511- WARN(prev->next != next,
85512- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
85513- next, prev->next, prev);
85514+ if (!__list_add_debug(new, prev, next))
85515+ return;
85516+
85517 new->next = next;
85518 new->prev = prev;
85519 rcu_assign_pointer(list_next_rcu(prev), new);
85520 next->prev = new;
85521 }
85522 EXPORT_SYMBOL(__list_add_rcu);
85523+#endif
85524+
85525+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
85526+{
85527+#ifdef CONFIG_DEBUG_LIST
85528+ if (!__list_add_debug(new, prev, next))
85529+ return;
85530+#endif
85531+
85532+ pax_open_kernel();
85533+ next->prev = new;
85534+ new->next = next;
85535+ new->prev = prev;
85536+ prev->next = new;
85537+ pax_close_kernel();
85538+}
85539+EXPORT_SYMBOL(__pax_list_add);
85540+
85541+void pax_list_del(struct list_head *entry)
85542+{
85543+#ifdef CONFIG_DEBUG_LIST
85544+ if (!__list_del_entry_debug(entry))
85545+ return;
85546+#endif
85547+
85548+ pax_open_kernel();
85549+ __list_del(entry->prev, entry->next);
85550+ entry->next = LIST_POISON1;
85551+ entry->prev = LIST_POISON2;
85552+ pax_close_kernel();
85553+}
85554+EXPORT_SYMBOL(pax_list_del);
85555+
85556+void pax_list_del_init(struct list_head *entry)
85557+{
85558+ pax_open_kernel();
85559+ __list_del(entry->prev, entry->next);
85560+ INIT_LIST_HEAD(entry);
85561+ pax_close_kernel();
85562+}
85563+EXPORT_SYMBOL(pax_list_del_init);
85564+
85565+void __pax_list_add_rcu(struct list_head *new,
85566+ struct list_head *prev, struct list_head *next)
85567+{
85568+#ifdef CONFIG_DEBUG_LIST
85569+ if (!__list_add_debug(new, prev, next))
85570+ return;
85571+#endif
85572+
85573+ pax_open_kernel();
85574+ new->next = next;
85575+ new->prev = prev;
85576+ rcu_assign_pointer(list_next_rcu(prev), new);
85577+ next->prev = new;
85578+ pax_close_kernel();
85579+}
85580+EXPORT_SYMBOL(__pax_list_add_rcu);
85581+
85582+void pax_list_del_rcu(struct list_head *entry)
85583+{
85584+#ifdef CONFIG_DEBUG_LIST
85585+ if (!__list_del_entry_debug(entry))
85586+ return;
85587+#endif
85588+
85589+ pax_open_kernel();
85590+ __list_del(entry->prev, entry->next);
85591+ entry->next = LIST_POISON1;
85592+ entry->prev = LIST_POISON2;
85593+ pax_close_kernel();
85594+}
85595+EXPORT_SYMBOL(pax_list_del_rcu);
85596diff --git a/lib/radix-tree.c b/lib/radix-tree.c
85597index e796429..6e38f9f 100644
85598--- a/lib/radix-tree.c
85599+++ b/lib/radix-tree.c
85600@@ -92,7 +92,7 @@ struct radix_tree_preload {
85601 int nr;
85602 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
85603 };
85604-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
85605+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
85606
85607 static inline void *ptr_to_indirect(void *ptr)
85608 {
85609diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
85610index bb2b201..46abaf9 100644
85611--- a/lib/strncpy_from_user.c
85612+++ b/lib/strncpy_from_user.c
85613@@ -21,7 +21,7 @@
85614 */
85615 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
85616 {
85617- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85618+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85619 long res = 0;
85620
85621 /*
85622diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
85623index a28df52..3d55877 100644
85624--- a/lib/strnlen_user.c
85625+++ b/lib/strnlen_user.c
85626@@ -26,7 +26,7 @@
85627 */
85628 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
85629 {
85630- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85631+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
85632 long align, res = 0;
85633 unsigned long c;
85634
85635diff --git a/lib/swiotlb.c b/lib/swiotlb.c
85636index d23762e..e21eab2 100644
85637--- a/lib/swiotlb.c
85638+++ b/lib/swiotlb.c
85639@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
85640
85641 void
85642 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
85643- dma_addr_t dev_addr)
85644+ dma_addr_t dev_addr, struct dma_attrs *attrs)
85645 {
85646 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
85647
85648diff --git a/lib/usercopy.c b/lib/usercopy.c
85649index 4f5b1dd..7cab418 100644
85650--- a/lib/usercopy.c
85651+++ b/lib/usercopy.c
85652@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
85653 WARN(1, "Buffer overflow detected!\n");
85654 }
85655 EXPORT_SYMBOL(copy_from_user_overflow);
85656+
85657+void copy_to_user_overflow(void)
85658+{
85659+ WARN(1, "Buffer overflow detected!\n");
85660+}
85661+EXPORT_SYMBOL(copy_to_user_overflow);
85662diff --git a/lib/vsprintf.c b/lib/vsprintf.c
85663index e149c64..24aa71a 100644
85664--- a/lib/vsprintf.c
85665+++ b/lib/vsprintf.c
85666@@ -16,6 +16,9 @@
85667 * - scnprintf and vscnprintf
85668 */
85669
85670+#ifdef CONFIG_GRKERNSEC_HIDESYM
85671+#define __INCLUDED_BY_HIDESYM 1
85672+#endif
85673 #include <stdarg.h>
85674 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
85675 #include <linux/types.h>
85676@@ -981,7 +984,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
85677 return number(buf, end, *(const netdev_features_t *)addr, spec);
85678 }
85679
85680+#ifdef CONFIG_GRKERNSEC_HIDESYM
85681+int kptr_restrict __read_mostly = 2;
85682+#else
85683 int kptr_restrict __read_mostly;
85684+#endif
85685
85686 /*
85687 * Show a '%p' thing. A kernel extension is that the '%p' is followed
85688@@ -994,6 +1001,7 @@ int kptr_restrict __read_mostly;
85689 * - 'f' For simple symbolic function names without offset
85690 * - 'S' For symbolic direct pointers with offset
85691 * - 's' For symbolic direct pointers without offset
85692+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
85693 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
85694 * - 'B' For backtraced symbolic direct pointers with offset
85695 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
85696@@ -1052,12 +1060,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85697
85698 if (!ptr && *fmt != 'K') {
85699 /*
85700- * Print (null) with the same width as a pointer so it makes
85701+ * Print (nil) with the same width as a pointer so it makes
85702 * tabular output look nice.
85703 */
85704 if (spec.field_width == -1)
85705 spec.field_width = default_width;
85706- return string(buf, end, "(null)", spec);
85707+ return string(buf, end, "(nil)", spec);
85708 }
85709
85710 switch (*fmt) {
85711@@ -1067,6 +1075,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85712 /* Fallthrough */
85713 case 'S':
85714 case 's':
85715+#ifdef CONFIG_GRKERNSEC_HIDESYM
85716+ break;
85717+#else
85718+ return symbol_string(buf, end, ptr, spec, fmt);
85719+#endif
85720+ case 'A':
85721 case 'B':
85722 return symbol_string(buf, end, ptr, spec, fmt);
85723 case 'R':
85724@@ -1107,6 +1121,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85725 va_end(va);
85726 return buf;
85727 }
85728+ case 'P':
85729+ break;
85730 case 'K':
85731 /*
85732 * %pK cannot be used in IRQ context because its test
85733@@ -1136,6 +1152,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
85734 return number(buf, end,
85735 (unsigned long long) *((phys_addr_t *)ptr), spec);
85736 }
85737+
85738+#ifdef CONFIG_GRKERNSEC_HIDESYM
85739+ /* 'P' = approved pointers to copy to userland,
85740+ as in the /proc/kallsyms case, as we make it display nothing
85741+ for non-root users, and the real contents for root users
85742+ Also ignore 'K' pointers, since we force their NULLing for non-root users
85743+ above
85744+ */
85745+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
85746+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
85747+ dump_stack();
85748+ ptr = NULL;
85749+ }
85750+#endif
85751+
85752 spec.flags |= SMALL;
85753 if (spec.field_width == -1) {
85754 spec.field_width = default_width;
85755@@ -1857,11 +1888,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
85756 typeof(type) value; \
85757 if (sizeof(type) == 8) { \
85758 args = PTR_ALIGN(args, sizeof(u32)); \
85759- *(u32 *)&value = *(u32 *)args; \
85760- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
85761+ *(u32 *)&value = *(const u32 *)args; \
85762+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
85763 } else { \
85764 args = PTR_ALIGN(args, sizeof(type)); \
85765- value = *(typeof(type) *)args; \
85766+ value = *(const typeof(type) *)args; \
85767 } \
85768 args += sizeof(type); \
85769 value; \
85770@@ -1924,7 +1955,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
85771 case FORMAT_TYPE_STR: {
85772 const char *str_arg = args;
85773 args += strlen(str_arg) + 1;
85774- str = string(str, end, (char *)str_arg, spec);
85775+ str = string(str, end, str_arg, spec);
85776 break;
85777 }
85778
85779diff --git a/localversion-grsec b/localversion-grsec
85780new file mode 100644
85781index 0000000..7cd6065
85782--- /dev/null
85783+++ b/localversion-grsec
85784@@ -0,0 +1 @@
85785+-grsec
85786diff --git a/mm/Kconfig b/mm/Kconfig
85787index e742d06..c56fdd8 100644
85788--- a/mm/Kconfig
85789+++ b/mm/Kconfig
85790@@ -317,10 +317,10 @@ config KSM
85791 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
85792
85793 config DEFAULT_MMAP_MIN_ADDR
85794- int "Low address space to protect from user allocation"
85795+ int "Low address space to protect from user allocation"
85796 depends on MMU
85797- default 4096
85798- help
85799+ default 65536
85800+ help
85801 This is the portion of low virtual memory which should be protected
85802 from userspace allocation. Keeping a user from writing to low pages
85803 can help reduce the impact of kernel NULL pointer bugs.
85804@@ -351,7 +351,7 @@ config MEMORY_FAILURE
85805
85806 config HWPOISON_INJECT
85807 tristate "HWPoison pages injector"
85808- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
85809+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
85810 select PROC_PAGE_MONITOR
85811
85812 config NOMMU_INITIAL_TRIM_EXCESS
85813diff --git a/mm/backing-dev.c b/mm/backing-dev.c
85814index 5025174..9d67dcd 100644
85815--- a/mm/backing-dev.c
85816+++ b/mm/backing-dev.c
85817@@ -12,7 +12,7 @@
85818 #include <linux/device.h>
85819 #include <trace/events/writeback.h>
85820
85821-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
85822+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
85823
85824 struct backing_dev_info default_backing_dev_info = {
85825 .name = "default",
85826@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
85827 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
85828 unsigned int cap)
85829 {
85830- char tmp[32];
85831 int err;
85832
85833 bdi->name = name;
85834@@ -524,8 +523,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
85835 if (err)
85836 return err;
85837
85838- sprintf(tmp, "%.28s%s", name, "-%d");
85839- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
85840+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
85841 if (err) {
85842 bdi_destroy(bdi);
85843 return err;
85844diff --git a/mm/filemap.c b/mm/filemap.c
85845index 7905fe7..f59502b 100644
85846--- a/mm/filemap.c
85847+++ b/mm/filemap.c
85848@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
85849 struct address_space *mapping = file->f_mapping;
85850
85851 if (!mapping->a_ops->readpage)
85852- return -ENOEXEC;
85853+ return -ENODEV;
85854 file_accessed(file);
85855 vma->vm_ops = &generic_file_vm_ops;
85856 return 0;
85857@@ -1948,7 +1948,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
85858
85859 while (bytes) {
85860 char __user *buf = iov->iov_base + base;
85861- int copy = min(bytes, iov->iov_len - base);
85862+ size_t copy = min(bytes, iov->iov_len - base);
85863
85864 base = 0;
85865 left = __copy_from_user_inatomic(vaddr, buf, copy);
85866@@ -1977,7 +1977,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
85867 BUG_ON(!in_atomic());
85868 kaddr = kmap_atomic(page);
85869 if (likely(i->nr_segs == 1)) {
85870- int left;
85871+ size_t left;
85872 char __user *buf = i->iov->iov_base + i->iov_offset;
85873 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
85874 copied = bytes - left;
85875@@ -2005,7 +2005,7 @@ size_t iov_iter_copy_from_user(struct page *page,
85876
85877 kaddr = kmap(page);
85878 if (likely(i->nr_segs == 1)) {
85879- int left;
85880+ size_t left;
85881 char __user *buf = i->iov->iov_base + i->iov_offset;
85882 left = __copy_from_user(kaddr + offset, buf, bytes);
85883 copied = bytes - left;
85884@@ -2035,7 +2035,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
85885 * zero-length segments (without overruning the iovec).
85886 */
85887 while (bytes || unlikely(i->count && !iov->iov_len)) {
85888- int copy;
85889+ size_t copy;
85890
85891 copy = min(bytes, iov->iov_len - base);
85892 BUG_ON(!i->count || i->count < copy);
85893@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
85894 *pos = i_size_read(inode);
85895
85896 if (limit != RLIM_INFINITY) {
85897+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
85898 if (*pos >= limit) {
85899 send_sig(SIGXFSZ, current, 0);
85900 return -EFBIG;
85901diff --git a/mm/fremap.c b/mm/fremap.c
85902index 87da359..3f41cb1 100644
85903--- a/mm/fremap.c
85904+++ b/mm/fremap.c
85905@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
85906 retry:
85907 vma = find_vma(mm, start);
85908
85909+#ifdef CONFIG_PAX_SEGMEXEC
85910+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
85911+ goto out;
85912+#endif
85913+
85914 /*
85915 * Make sure the vma is shared, that it supports prefaulting,
85916 * and that the remapped range is valid and fully within
85917diff --git a/mm/highmem.c b/mm/highmem.c
85918index b32b70c..e512eb0 100644
85919--- a/mm/highmem.c
85920+++ b/mm/highmem.c
85921@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
85922 * So no dangers, even with speculative execution.
85923 */
85924 page = pte_page(pkmap_page_table[i]);
85925+ pax_open_kernel();
85926 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
85927-
85928+ pax_close_kernel();
85929 set_page_address(page, NULL);
85930 need_flush = 1;
85931 }
85932@@ -198,9 +199,11 @@ start:
85933 }
85934 }
85935 vaddr = PKMAP_ADDR(last_pkmap_nr);
85936+
85937+ pax_open_kernel();
85938 set_pte_at(&init_mm, vaddr,
85939 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
85940-
85941+ pax_close_kernel();
85942 pkmap_count[last_pkmap_nr] = 1;
85943 set_page_address(page, (void *)vaddr);
85944
85945diff --git a/mm/hugetlb.c b/mm/hugetlb.c
85946index 7c5eb85..5c01c2f 100644
85947--- a/mm/hugetlb.c
85948+++ b/mm/hugetlb.c
85949@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
85950 struct hstate *h = &default_hstate;
85951 unsigned long tmp;
85952 int ret;
85953+ ctl_table_no_const hugetlb_table;
85954
85955 tmp = h->max_huge_pages;
85956
85957 if (write && h->order >= MAX_ORDER)
85958 return -EINVAL;
85959
85960- table->data = &tmp;
85961- table->maxlen = sizeof(unsigned long);
85962- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
85963+ hugetlb_table = *table;
85964+ hugetlb_table.data = &tmp;
85965+ hugetlb_table.maxlen = sizeof(unsigned long);
85966+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
85967 if (ret)
85968 goto out;
85969
85970@@ -2087,15 +2089,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
85971 struct hstate *h = &default_hstate;
85972 unsigned long tmp;
85973 int ret;
85974+ ctl_table_no_const hugetlb_table;
85975
85976 tmp = h->nr_overcommit_huge_pages;
85977
85978 if (write && h->order >= MAX_ORDER)
85979 return -EINVAL;
85980
85981- table->data = &tmp;
85982- table->maxlen = sizeof(unsigned long);
85983- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
85984+ hugetlb_table = *table;
85985+ hugetlb_table.data = &tmp;
85986+ hugetlb_table.maxlen = sizeof(unsigned long);
85987+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
85988 if (ret)
85989 goto out;
85990
85991@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
85992 return 1;
85993 }
85994
85995+#ifdef CONFIG_PAX_SEGMEXEC
85996+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
85997+{
85998+ struct mm_struct *mm = vma->vm_mm;
85999+ struct vm_area_struct *vma_m;
86000+ unsigned long address_m;
86001+ pte_t *ptep_m;
86002+
86003+ vma_m = pax_find_mirror_vma(vma);
86004+ if (!vma_m)
86005+ return;
86006+
86007+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86008+ address_m = address + SEGMEXEC_TASK_SIZE;
86009+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
86010+ get_page(page_m);
86011+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
86012+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
86013+}
86014+#endif
86015+
86016 /*
86017 * Hugetlb_cow() should be called with page lock of the original hugepage held.
86018 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
86019@@ -2663,6 +2688,11 @@ retry_avoidcopy:
86020 make_huge_pte(vma, new_page, 1));
86021 page_remove_rmap(old_page);
86022 hugepage_add_new_anon_rmap(new_page, vma, address);
86023+
86024+#ifdef CONFIG_PAX_SEGMEXEC
86025+ pax_mirror_huge_pte(vma, address, new_page);
86026+#endif
86027+
86028 /* Make the old page be freed below */
86029 new_page = old_page;
86030 }
86031@@ -2821,6 +2851,10 @@ retry:
86032 && (vma->vm_flags & VM_SHARED)));
86033 set_huge_pte_at(mm, address, ptep, new_pte);
86034
86035+#ifdef CONFIG_PAX_SEGMEXEC
86036+ pax_mirror_huge_pte(vma, address, page);
86037+#endif
86038+
86039 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
86040 /* Optimization, do the COW without a second fault */
86041 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
86042@@ -2850,6 +2884,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86043 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
86044 struct hstate *h = hstate_vma(vma);
86045
86046+#ifdef CONFIG_PAX_SEGMEXEC
86047+ struct vm_area_struct *vma_m;
86048+#endif
86049+
86050 address &= huge_page_mask(h);
86051
86052 ptep = huge_pte_offset(mm, address);
86053@@ -2863,6 +2901,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86054 VM_FAULT_SET_HINDEX(hstate_index(h));
86055 }
86056
86057+#ifdef CONFIG_PAX_SEGMEXEC
86058+ vma_m = pax_find_mirror_vma(vma);
86059+ if (vma_m) {
86060+ unsigned long address_m;
86061+
86062+ if (vma->vm_start > vma_m->vm_start) {
86063+ address_m = address;
86064+ address -= SEGMEXEC_TASK_SIZE;
86065+ vma = vma_m;
86066+ h = hstate_vma(vma);
86067+ } else
86068+ address_m = address + SEGMEXEC_TASK_SIZE;
86069+
86070+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
86071+ return VM_FAULT_OOM;
86072+ address_m &= HPAGE_MASK;
86073+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
86074+ }
86075+#endif
86076+
86077 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
86078 if (!ptep)
86079 return VM_FAULT_OOM;
86080diff --git a/mm/internal.h b/mm/internal.h
86081index 8562de0..92b2073 100644
86082--- a/mm/internal.h
86083+++ b/mm/internal.h
86084@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
86085 * in mm/page_alloc.c
86086 */
86087 extern void __free_pages_bootmem(struct page *page, unsigned int order);
86088+extern void free_compound_page(struct page *page);
86089 extern void prep_compound_page(struct page *page, unsigned long order);
86090 #ifdef CONFIG_MEMORY_FAILURE
86091 extern bool is_free_buddy_page(struct page *page);
86092@@ -355,7 +356,7 @@ extern u32 hwpoison_filter_enable;
86093
86094 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
86095 unsigned long, unsigned long,
86096- unsigned long, unsigned long);
86097+ unsigned long, unsigned long) __intentional_overflow(-1);
86098
86099 extern void set_pageblock_order(void);
86100 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
86101diff --git a/mm/kmemleak.c b/mm/kmemleak.c
86102index c8d7f31..2dbeffd 100644
86103--- a/mm/kmemleak.c
86104+++ b/mm/kmemleak.c
86105@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
86106
86107 for (i = 0; i < object->trace_len; i++) {
86108 void *ptr = (void *)object->trace[i];
86109- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
86110+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
86111 }
86112 }
86113
86114@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
86115 return -ENOMEM;
86116 }
86117
86118- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
86119+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
86120 &kmemleak_fops);
86121 if (!dentry)
86122 pr_warning("Failed to create the debugfs kmemleak file\n");
86123diff --git a/mm/maccess.c b/mm/maccess.c
86124index d53adf9..03a24bf 100644
86125--- a/mm/maccess.c
86126+++ b/mm/maccess.c
86127@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
86128 set_fs(KERNEL_DS);
86129 pagefault_disable();
86130 ret = __copy_from_user_inatomic(dst,
86131- (__force const void __user *)src, size);
86132+ (const void __force_user *)src, size);
86133 pagefault_enable();
86134 set_fs(old_fs);
86135
86136@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
86137
86138 set_fs(KERNEL_DS);
86139 pagefault_disable();
86140- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
86141+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
86142 pagefault_enable();
86143 set_fs(old_fs);
86144
86145diff --git a/mm/madvise.c b/mm/madvise.c
86146index 7055883..aafb1ed 100644
86147--- a/mm/madvise.c
86148+++ b/mm/madvise.c
86149@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
86150 pgoff_t pgoff;
86151 unsigned long new_flags = vma->vm_flags;
86152
86153+#ifdef CONFIG_PAX_SEGMEXEC
86154+ struct vm_area_struct *vma_m;
86155+#endif
86156+
86157 switch (behavior) {
86158 case MADV_NORMAL:
86159 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
86160@@ -126,6 +130,13 @@ success:
86161 /*
86162 * vm_flags is protected by the mmap_sem held in write mode.
86163 */
86164+
86165+#ifdef CONFIG_PAX_SEGMEXEC
86166+ vma_m = pax_find_mirror_vma(vma);
86167+ if (vma_m)
86168+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
86169+#endif
86170+
86171 vma->vm_flags = new_flags;
86172
86173 out:
86174@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
86175 struct vm_area_struct ** prev,
86176 unsigned long start, unsigned long end)
86177 {
86178+
86179+#ifdef CONFIG_PAX_SEGMEXEC
86180+ struct vm_area_struct *vma_m;
86181+#endif
86182+
86183 *prev = vma;
86184 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
86185 return -EINVAL;
86186@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
86187 zap_page_range(vma, start, end - start, &details);
86188 } else
86189 zap_page_range(vma, start, end - start, NULL);
86190+
86191+#ifdef CONFIG_PAX_SEGMEXEC
86192+ vma_m = pax_find_mirror_vma(vma);
86193+ if (vma_m) {
86194+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
86195+ struct zap_details details = {
86196+ .nonlinear_vma = vma_m,
86197+ .last_index = ULONG_MAX,
86198+ };
86199+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
86200+ } else
86201+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
86202+ }
86203+#endif
86204+
86205 return 0;
86206 }
86207
86208@@ -485,6 +516,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
86209 if (end < start)
86210 return error;
86211
86212+#ifdef CONFIG_PAX_SEGMEXEC
86213+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
86214+ if (end > SEGMEXEC_TASK_SIZE)
86215+ return error;
86216+ } else
86217+#endif
86218+
86219+ if (end > TASK_SIZE)
86220+ return error;
86221+
86222 error = 0;
86223 if (end == start)
86224 return error;
86225diff --git a/mm/memory-failure.c b/mm/memory-failure.c
86226index ceb0c7f..b2b8e94 100644
86227--- a/mm/memory-failure.c
86228+++ b/mm/memory-failure.c
86229@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
86230
86231 int sysctl_memory_failure_recovery __read_mostly = 1;
86232
86233-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
86234+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
86235
86236 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
86237
86238@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
86239 pfn, t->comm, t->pid);
86240 si.si_signo = SIGBUS;
86241 si.si_errno = 0;
86242- si.si_addr = (void *)addr;
86243+ si.si_addr = (void __user *)addr;
86244 #ifdef __ARCH_SI_TRAPNO
86245 si.si_trapno = trapno;
86246 #endif
86247@@ -760,7 +760,7 @@ static struct page_state {
86248 unsigned long res;
86249 char *msg;
86250 int (*action)(struct page *p, unsigned long pfn);
86251-} error_states[] = {
86252+} __do_const error_states[] = {
86253 { reserved, reserved, "reserved kernel", me_kernel },
86254 /*
86255 * free pages are specially detected outside this table:
86256@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86257 nr_pages = 1 << compound_order(hpage);
86258 else /* normal page or thp */
86259 nr_pages = 1;
86260- atomic_long_add(nr_pages, &num_poisoned_pages);
86261+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
86262
86263 /*
86264 * We need/can do nothing about count=0 pages.
86265@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86266 if (!PageHWPoison(hpage)
86267 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
86268 || (p != hpage && TestSetPageHWPoison(hpage))) {
86269- atomic_long_sub(nr_pages, &num_poisoned_pages);
86270+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86271 return 0;
86272 }
86273 set_page_hwpoison_huge_page(hpage);
86274@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
86275 }
86276 if (hwpoison_filter(p)) {
86277 if (TestClearPageHWPoison(p))
86278- atomic_long_sub(nr_pages, &num_poisoned_pages);
86279+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86280 unlock_page(hpage);
86281 put_page(hpage);
86282 return 0;
86283@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
86284 return 0;
86285 }
86286 if (TestClearPageHWPoison(p))
86287- atomic_long_sub(nr_pages, &num_poisoned_pages);
86288+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86289 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
86290 return 0;
86291 }
86292@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
86293 */
86294 if (TestClearPageHWPoison(page)) {
86295 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
86296- atomic_long_sub(nr_pages, &num_poisoned_pages);
86297+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
86298 freeit = 1;
86299 if (PageHuge(page))
86300 clear_page_hwpoison_huge_page(page);
86301@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
86302 } else {
86303 set_page_hwpoison_huge_page(hpage);
86304 dequeue_hwpoisoned_huge_page(hpage);
86305- atomic_long_add(1 << compound_trans_order(hpage),
86306+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
86307 &num_poisoned_pages);
86308 }
86309 /* keep elevated page count for bad page */
86310@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
86311 if (PageHuge(page)) {
86312 set_page_hwpoison_huge_page(hpage);
86313 dequeue_hwpoisoned_huge_page(hpage);
86314- atomic_long_add(1 << compound_trans_order(hpage),
86315+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
86316 &num_poisoned_pages);
86317 } else {
86318 SetPageHWPoison(page);
86319- atomic_long_inc(&num_poisoned_pages);
86320+ atomic_long_inc_unchecked(&num_poisoned_pages);
86321 }
86322 }
86323 /* keep elevated page count for bad page */
86324@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
86325 put_page(page);
86326 pr_info("soft_offline: %#lx: invalidated\n", pfn);
86327 SetPageHWPoison(page);
86328- atomic_long_inc(&num_poisoned_pages);
86329+ atomic_long_inc_unchecked(&num_poisoned_pages);
86330 return 0;
86331 }
86332
86333@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
86334 ret = -EIO;
86335 } else {
86336 SetPageHWPoison(page);
86337- atomic_long_inc(&num_poisoned_pages);
86338+ atomic_long_inc_unchecked(&num_poisoned_pages);
86339 }
86340 } else {
86341 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
86342diff --git a/mm/memory.c b/mm/memory.c
86343index 5a35443..7c0340f 100644
86344--- a/mm/memory.c
86345+++ b/mm/memory.c
86346@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
86347 free_pte_range(tlb, pmd, addr);
86348 } while (pmd++, addr = next, addr != end);
86349
86350+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
86351 start &= PUD_MASK;
86352 if (start < floor)
86353 return;
86354@@ -442,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
86355 pmd = pmd_offset(pud, start);
86356 pud_clear(pud);
86357 pmd_free_tlb(tlb, pmd, start);
86358+#endif
86359+
86360 }
86361
86362 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86363@@ -461,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86364 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
86365 } while (pud++, addr = next, addr != end);
86366
86367+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
86368 start &= PGDIR_MASK;
86369 if (start < floor)
86370 return;
86371@@ -475,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
86372 pud = pud_offset(pgd, start);
86373 pgd_clear(pgd);
86374 pud_free_tlb(tlb, pud, start);
86375+#endif
86376+
86377 }
86378
86379 /*
86380@@ -1644,12 +1650,6 @@ no_page_table:
86381 return page;
86382 }
86383
86384-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
86385-{
86386- return stack_guard_page_start(vma, addr) ||
86387- stack_guard_page_end(vma, addr+PAGE_SIZE);
86388-}
86389-
86390 /**
86391 * __get_user_pages() - pin user pages in memory
86392 * @tsk: task_struct of target task
86393@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86394
86395 i = 0;
86396
86397- do {
86398+ while (nr_pages) {
86399 struct vm_area_struct *vma;
86400
86401- vma = find_extend_vma(mm, start);
86402+ vma = find_vma(mm, start);
86403 if (!vma && in_gate_area(mm, start)) {
86404 unsigned long pg = start & PAGE_MASK;
86405 pgd_t *pgd;
86406@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86407 goto next_page;
86408 }
86409
86410- if (!vma ||
86411+ if (!vma || start < vma->vm_start ||
86412 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
86413 !(vm_flags & vma->vm_flags))
86414 return i ? : -EFAULT;
86415@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
86416 int ret;
86417 unsigned int fault_flags = 0;
86418
86419- /* For mlock, just skip the stack guard page. */
86420- if (foll_flags & FOLL_MLOCK) {
86421- if (stack_guard_page(vma, start))
86422- goto next_page;
86423- }
86424 if (foll_flags & FOLL_WRITE)
86425 fault_flags |= FAULT_FLAG_WRITE;
86426 if (nonblocking)
86427@@ -1901,7 +1896,7 @@ next_page:
86428 start += page_increm * PAGE_SIZE;
86429 nr_pages -= page_increm;
86430 } while (nr_pages && start < vma->vm_end);
86431- } while (nr_pages);
86432+ }
86433 return i;
86434 }
86435 EXPORT_SYMBOL(__get_user_pages);
86436@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
86437 page_add_file_rmap(page);
86438 set_pte_at(mm, addr, pte, mk_pte(page, prot));
86439
86440+#ifdef CONFIG_PAX_SEGMEXEC
86441+ pax_mirror_file_pte(vma, addr, page, ptl);
86442+#endif
86443+
86444 retval = 0;
86445 pte_unmap_unlock(pte, ptl);
86446 return retval;
86447@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
86448 if (!page_count(page))
86449 return -EINVAL;
86450 if (!(vma->vm_flags & VM_MIXEDMAP)) {
86451+
86452+#ifdef CONFIG_PAX_SEGMEXEC
86453+ struct vm_area_struct *vma_m;
86454+#endif
86455+
86456 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
86457 BUG_ON(vma->vm_flags & VM_PFNMAP);
86458 vma->vm_flags |= VM_MIXEDMAP;
86459+
86460+#ifdef CONFIG_PAX_SEGMEXEC
86461+ vma_m = pax_find_mirror_vma(vma);
86462+ if (vma_m)
86463+ vma_m->vm_flags |= VM_MIXEDMAP;
86464+#endif
86465+
86466 }
86467 return insert_page(vma, addr, page, vma->vm_page_prot);
86468 }
86469@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
86470 unsigned long pfn)
86471 {
86472 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
86473+ BUG_ON(vma->vm_mirror);
86474
86475 if (addr < vma->vm_start || addr >= vma->vm_end)
86476 return -EFAULT;
86477@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
86478
86479 BUG_ON(pud_huge(*pud));
86480
86481- pmd = pmd_alloc(mm, pud, addr);
86482+ pmd = (mm == &init_mm) ?
86483+ pmd_alloc_kernel(mm, pud, addr) :
86484+ pmd_alloc(mm, pud, addr);
86485 if (!pmd)
86486 return -ENOMEM;
86487 do {
86488@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
86489 unsigned long next;
86490 int err;
86491
86492- pud = pud_alloc(mm, pgd, addr);
86493+ pud = (mm == &init_mm) ?
86494+ pud_alloc_kernel(mm, pgd, addr) :
86495+ pud_alloc(mm, pgd, addr);
86496 if (!pud)
86497 return -ENOMEM;
86498 do {
86499@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
86500 copy_user_highpage(dst, src, va, vma);
86501 }
86502
86503+#ifdef CONFIG_PAX_SEGMEXEC
86504+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
86505+{
86506+ struct mm_struct *mm = vma->vm_mm;
86507+ spinlock_t *ptl;
86508+ pte_t *pte, entry;
86509+
86510+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
86511+ entry = *pte;
86512+ if (!pte_present(entry)) {
86513+ if (!pte_none(entry)) {
86514+ BUG_ON(pte_file(entry));
86515+ free_swap_and_cache(pte_to_swp_entry(entry));
86516+ pte_clear_not_present_full(mm, address, pte, 0);
86517+ }
86518+ } else {
86519+ struct page *page;
86520+
86521+ flush_cache_page(vma, address, pte_pfn(entry));
86522+ entry = ptep_clear_flush(vma, address, pte);
86523+ BUG_ON(pte_dirty(entry));
86524+ page = vm_normal_page(vma, address, entry);
86525+ if (page) {
86526+ update_hiwater_rss(mm);
86527+ if (PageAnon(page))
86528+ dec_mm_counter_fast(mm, MM_ANONPAGES);
86529+ else
86530+ dec_mm_counter_fast(mm, MM_FILEPAGES);
86531+ page_remove_rmap(page);
86532+ page_cache_release(page);
86533+ }
86534+ }
86535+ pte_unmap_unlock(pte, ptl);
86536+}
86537+
86538+/* PaX: if vma is mirrored, synchronize the mirror's PTE
86539+ *
86540+ * the ptl of the lower mapped page is held on entry and is not released on exit
86541+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
86542+ */
86543+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
86544+{
86545+ struct mm_struct *mm = vma->vm_mm;
86546+ unsigned long address_m;
86547+ spinlock_t *ptl_m;
86548+ struct vm_area_struct *vma_m;
86549+ pmd_t *pmd_m;
86550+ pte_t *pte_m, entry_m;
86551+
86552+ BUG_ON(!page_m || !PageAnon(page_m));
86553+
86554+ vma_m = pax_find_mirror_vma(vma);
86555+ if (!vma_m)
86556+ return;
86557+
86558+ BUG_ON(!PageLocked(page_m));
86559+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86560+ address_m = address + SEGMEXEC_TASK_SIZE;
86561+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86562+ pte_m = pte_offset_map(pmd_m, address_m);
86563+ ptl_m = pte_lockptr(mm, pmd_m);
86564+ if (ptl != ptl_m) {
86565+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86566+ if (!pte_none(*pte_m))
86567+ goto out;
86568+ }
86569+
86570+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
86571+ page_cache_get(page_m);
86572+ page_add_anon_rmap(page_m, vma_m, address_m);
86573+ inc_mm_counter_fast(mm, MM_ANONPAGES);
86574+ set_pte_at(mm, address_m, pte_m, entry_m);
86575+ update_mmu_cache(vma_m, address_m, pte_m);
86576+out:
86577+ if (ptl != ptl_m)
86578+ spin_unlock(ptl_m);
86579+ pte_unmap(pte_m);
86580+ unlock_page(page_m);
86581+}
86582+
86583+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
86584+{
86585+ struct mm_struct *mm = vma->vm_mm;
86586+ unsigned long address_m;
86587+ spinlock_t *ptl_m;
86588+ struct vm_area_struct *vma_m;
86589+ pmd_t *pmd_m;
86590+ pte_t *pte_m, entry_m;
86591+
86592+ BUG_ON(!page_m || PageAnon(page_m));
86593+
86594+ vma_m = pax_find_mirror_vma(vma);
86595+ if (!vma_m)
86596+ return;
86597+
86598+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86599+ address_m = address + SEGMEXEC_TASK_SIZE;
86600+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86601+ pte_m = pte_offset_map(pmd_m, address_m);
86602+ ptl_m = pte_lockptr(mm, pmd_m);
86603+ if (ptl != ptl_m) {
86604+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86605+ if (!pte_none(*pte_m))
86606+ goto out;
86607+ }
86608+
86609+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
86610+ page_cache_get(page_m);
86611+ page_add_file_rmap(page_m);
86612+ inc_mm_counter_fast(mm, MM_FILEPAGES);
86613+ set_pte_at(mm, address_m, pte_m, entry_m);
86614+ update_mmu_cache(vma_m, address_m, pte_m);
86615+out:
86616+ if (ptl != ptl_m)
86617+ spin_unlock(ptl_m);
86618+ pte_unmap(pte_m);
86619+}
86620+
86621+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
86622+{
86623+ struct mm_struct *mm = vma->vm_mm;
86624+ unsigned long address_m;
86625+ spinlock_t *ptl_m;
86626+ struct vm_area_struct *vma_m;
86627+ pmd_t *pmd_m;
86628+ pte_t *pte_m, entry_m;
86629+
86630+ vma_m = pax_find_mirror_vma(vma);
86631+ if (!vma_m)
86632+ return;
86633+
86634+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
86635+ address_m = address + SEGMEXEC_TASK_SIZE;
86636+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
86637+ pte_m = pte_offset_map(pmd_m, address_m);
86638+ ptl_m = pte_lockptr(mm, pmd_m);
86639+ if (ptl != ptl_m) {
86640+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
86641+ if (!pte_none(*pte_m))
86642+ goto out;
86643+ }
86644+
86645+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
86646+ set_pte_at(mm, address_m, pte_m, entry_m);
86647+out:
86648+ if (ptl != ptl_m)
86649+ spin_unlock(ptl_m);
86650+ pte_unmap(pte_m);
86651+}
86652+
86653+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
86654+{
86655+ struct page *page_m;
86656+ pte_t entry;
86657+
86658+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
86659+ goto out;
86660+
86661+ entry = *pte;
86662+ page_m = vm_normal_page(vma, address, entry);
86663+ if (!page_m)
86664+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
86665+ else if (PageAnon(page_m)) {
86666+ if (pax_find_mirror_vma(vma)) {
86667+ pte_unmap_unlock(pte, ptl);
86668+ lock_page(page_m);
86669+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
86670+ if (pte_same(entry, *pte))
86671+ pax_mirror_anon_pte(vma, address, page_m, ptl);
86672+ else
86673+ unlock_page(page_m);
86674+ }
86675+ } else
86676+ pax_mirror_file_pte(vma, address, page_m, ptl);
86677+
86678+out:
86679+ pte_unmap_unlock(pte, ptl);
86680+}
86681+#endif
86682+
86683 /*
86684 * This routine handles present pages, when users try to write
86685 * to a shared page. It is done by copying the page to a new address
86686@@ -2808,6 +3004,12 @@ gotten:
86687 */
86688 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
86689 if (likely(pte_same(*page_table, orig_pte))) {
86690+
86691+#ifdef CONFIG_PAX_SEGMEXEC
86692+ if (pax_find_mirror_vma(vma))
86693+ BUG_ON(!trylock_page(new_page));
86694+#endif
86695+
86696 if (old_page) {
86697 if (!PageAnon(old_page)) {
86698 dec_mm_counter_fast(mm, MM_FILEPAGES);
86699@@ -2859,6 +3061,10 @@ gotten:
86700 page_remove_rmap(old_page);
86701 }
86702
86703+#ifdef CONFIG_PAX_SEGMEXEC
86704+ pax_mirror_anon_pte(vma, address, new_page, ptl);
86705+#endif
86706+
86707 /* Free the old page.. */
86708 new_page = old_page;
86709 ret |= VM_FAULT_WRITE;
86710@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
86711 swap_free(entry);
86712 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
86713 try_to_free_swap(page);
86714+
86715+#ifdef CONFIG_PAX_SEGMEXEC
86716+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
86717+#endif
86718+
86719 unlock_page(page);
86720 if (page != swapcache) {
86721 /*
86722@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
86723
86724 /* No need to invalidate - it was non-present before */
86725 update_mmu_cache(vma, address, page_table);
86726+
86727+#ifdef CONFIG_PAX_SEGMEXEC
86728+ pax_mirror_anon_pte(vma, address, page, ptl);
86729+#endif
86730+
86731 unlock:
86732 pte_unmap_unlock(page_table, ptl);
86733 out:
86734@@ -3176,40 +3392,6 @@ out_release:
86735 }
86736
86737 /*
86738- * This is like a special single-page "expand_{down|up}wards()",
86739- * except we must first make sure that 'address{-|+}PAGE_SIZE'
86740- * doesn't hit another vma.
86741- */
86742-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
86743-{
86744- address &= PAGE_MASK;
86745- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
86746- struct vm_area_struct *prev = vma->vm_prev;
86747-
86748- /*
86749- * Is there a mapping abutting this one below?
86750- *
86751- * That's only ok if it's the same stack mapping
86752- * that has gotten split..
86753- */
86754- if (prev && prev->vm_end == address)
86755- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
86756-
86757- expand_downwards(vma, address - PAGE_SIZE);
86758- }
86759- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
86760- struct vm_area_struct *next = vma->vm_next;
86761-
86762- /* As VM_GROWSDOWN but s/below/above/ */
86763- if (next && next->vm_start == address + PAGE_SIZE)
86764- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
86765-
86766- expand_upwards(vma, address + PAGE_SIZE);
86767- }
86768- return 0;
86769-}
86770-
86771-/*
86772 * We enter with non-exclusive mmap_sem (to exclude vma changes,
86773 * but allow concurrent faults), and pte mapped but not yet locked.
86774 * We return with mmap_sem still held, but pte unmapped and unlocked.
86775@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
86776 unsigned long address, pte_t *page_table, pmd_t *pmd,
86777 unsigned int flags)
86778 {
86779- struct page *page;
86780+ struct page *page = NULL;
86781 spinlock_t *ptl;
86782 pte_t entry;
86783
86784- pte_unmap(page_table);
86785-
86786- /* Check if we need to add a guard page to the stack */
86787- if (check_stack_guard_page(vma, address) < 0)
86788- return VM_FAULT_SIGBUS;
86789-
86790- /* Use the zero-page for reads */
86791 if (!(flags & FAULT_FLAG_WRITE)) {
86792 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
86793 vma->vm_page_prot));
86794- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
86795+ ptl = pte_lockptr(mm, pmd);
86796+ spin_lock(ptl);
86797 if (!pte_none(*page_table))
86798 goto unlock;
86799 goto setpte;
86800 }
86801
86802 /* Allocate our own private page. */
86803+ pte_unmap(page_table);
86804+
86805 if (unlikely(anon_vma_prepare(vma)))
86806 goto oom;
86807 page = alloc_zeroed_user_highpage_movable(vma, address);
86808@@ -3262,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
86809 if (!pte_none(*page_table))
86810 goto release;
86811
86812+#ifdef CONFIG_PAX_SEGMEXEC
86813+ if (pax_find_mirror_vma(vma))
86814+ BUG_ON(!trylock_page(page));
86815+#endif
86816+
86817 inc_mm_counter_fast(mm, MM_ANONPAGES);
86818 page_add_new_anon_rmap(page, vma, address);
86819 setpte:
86820@@ -3269,6 +3452,12 @@ setpte:
86821
86822 /* No need to invalidate - it was non-present before */
86823 update_mmu_cache(vma, address, page_table);
86824+
86825+#ifdef CONFIG_PAX_SEGMEXEC
86826+ if (page)
86827+ pax_mirror_anon_pte(vma, address, page, ptl);
86828+#endif
86829+
86830 unlock:
86831 pte_unmap_unlock(page_table, ptl);
86832 return 0;
86833@@ -3412,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86834 */
86835 /* Only go through if we didn't race with anybody else... */
86836 if (likely(pte_same(*page_table, orig_pte))) {
86837+
86838+#ifdef CONFIG_PAX_SEGMEXEC
86839+ if (anon && pax_find_mirror_vma(vma))
86840+ BUG_ON(!trylock_page(page));
86841+#endif
86842+
86843 flush_icache_page(vma, page);
86844 entry = mk_pte(page, vma->vm_page_prot);
86845 if (flags & FAULT_FLAG_WRITE)
86846@@ -3431,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86847
86848 /* no need to invalidate: a not-present page won't be cached */
86849 update_mmu_cache(vma, address, page_table);
86850+
86851+#ifdef CONFIG_PAX_SEGMEXEC
86852+ if (anon)
86853+ pax_mirror_anon_pte(vma, address, page, ptl);
86854+ else
86855+ pax_mirror_file_pte(vma, address, page, ptl);
86856+#endif
86857+
86858 } else {
86859 if (cow_page)
86860 mem_cgroup_uncharge_page(cow_page);
86861@@ -3752,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
86862 if (flags & FAULT_FLAG_WRITE)
86863 flush_tlb_fix_spurious_fault(vma, address);
86864 }
86865+
86866+#ifdef CONFIG_PAX_SEGMEXEC
86867+ pax_mirror_pte(vma, address, pte, pmd, ptl);
86868+ return 0;
86869+#endif
86870+
86871 unlock:
86872 pte_unmap_unlock(pte, ptl);
86873 return 0;
86874@@ -3768,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86875 pmd_t *pmd;
86876 pte_t *pte;
86877
86878+#ifdef CONFIG_PAX_SEGMEXEC
86879+ struct vm_area_struct *vma_m;
86880+#endif
86881+
86882 __set_current_state(TASK_RUNNING);
86883
86884 count_vm_event(PGFAULT);
86885@@ -3779,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
86886 if (unlikely(is_vm_hugetlb_page(vma)))
86887 return hugetlb_fault(mm, vma, address, flags);
86888
86889+#ifdef CONFIG_PAX_SEGMEXEC
86890+ vma_m = pax_find_mirror_vma(vma);
86891+ if (vma_m) {
86892+ unsigned long address_m;
86893+ pgd_t *pgd_m;
86894+ pud_t *pud_m;
86895+ pmd_t *pmd_m;
86896+
86897+ if (vma->vm_start > vma_m->vm_start) {
86898+ address_m = address;
86899+ address -= SEGMEXEC_TASK_SIZE;
86900+ vma = vma_m;
86901+ } else
86902+ address_m = address + SEGMEXEC_TASK_SIZE;
86903+
86904+ pgd_m = pgd_offset(mm, address_m);
86905+ pud_m = pud_alloc(mm, pgd_m, address_m);
86906+ if (!pud_m)
86907+ return VM_FAULT_OOM;
86908+ pmd_m = pmd_alloc(mm, pud_m, address_m);
86909+ if (!pmd_m)
86910+ return VM_FAULT_OOM;
86911+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
86912+ return VM_FAULT_OOM;
86913+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
86914+ }
86915+#endif
86916+
86917 retry:
86918 pgd = pgd_offset(mm, address);
86919 pud = pud_alloc(mm, pgd, address);
86920@@ -3877,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
86921 spin_unlock(&mm->page_table_lock);
86922 return 0;
86923 }
86924+
86925+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
86926+{
86927+ pud_t *new = pud_alloc_one(mm, address);
86928+ if (!new)
86929+ return -ENOMEM;
86930+
86931+ smp_wmb(); /* See comment in __pte_alloc */
86932+
86933+ spin_lock(&mm->page_table_lock);
86934+ if (pgd_present(*pgd)) /* Another has populated it */
86935+ pud_free(mm, new);
86936+ else
86937+ pgd_populate_kernel(mm, pgd, new);
86938+ spin_unlock(&mm->page_table_lock);
86939+ return 0;
86940+}
86941 #endif /* __PAGETABLE_PUD_FOLDED */
86942
86943 #ifndef __PAGETABLE_PMD_FOLDED
86944@@ -3907,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
86945 spin_unlock(&mm->page_table_lock);
86946 return 0;
86947 }
86948+
86949+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
86950+{
86951+ pmd_t *new = pmd_alloc_one(mm, address);
86952+ if (!new)
86953+ return -ENOMEM;
86954+
86955+ smp_wmb(); /* See comment in __pte_alloc */
86956+
86957+ spin_lock(&mm->page_table_lock);
86958+#ifndef __ARCH_HAS_4LEVEL_HACK
86959+ if (pud_present(*pud)) /* Another has populated it */
86960+ pmd_free(mm, new);
86961+ else
86962+ pud_populate_kernel(mm, pud, new);
86963+#else
86964+ if (pgd_present(*pud)) /* Another has populated it */
86965+ pmd_free(mm, new);
86966+ else
86967+ pgd_populate_kernel(mm, pud, new);
86968+#endif /* __ARCH_HAS_4LEVEL_HACK */
86969+ spin_unlock(&mm->page_table_lock);
86970+ return 0;
86971+}
86972 #endif /* __PAGETABLE_PMD_FOLDED */
86973
86974 #if !defined(__HAVE_ARCH_GATE_AREA)
86975@@ -3920,7 +4202,7 @@ static int __init gate_vma_init(void)
86976 gate_vma.vm_start = FIXADDR_USER_START;
86977 gate_vma.vm_end = FIXADDR_USER_END;
86978 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
86979- gate_vma.vm_page_prot = __P101;
86980+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
86981
86982 return 0;
86983 }
86984@@ -4054,8 +4336,8 @@ out:
86985 return ret;
86986 }
86987
86988-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
86989- void *buf, int len, int write)
86990+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
86991+ void *buf, size_t len, int write)
86992 {
86993 resource_size_t phys_addr;
86994 unsigned long prot = 0;
86995@@ -4080,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
86996 * Access another process' address space as given in mm. If non-NULL, use the
86997 * given task for page fault accounting.
86998 */
86999-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87000- unsigned long addr, void *buf, int len, int write)
87001+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87002+ unsigned long addr, void *buf, size_t len, int write)
87003 {
87004 struct vm_area_struct *vma;
87005 void *old_buf = buf;
87006@@ -4089,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87007 down_read(&mm->mmap_sem);
87008 /* ignore errors, just check how much was successfully transferred */
87009 while (len) {
87010- int bytes, ret, offset;
87011+ ssize_t bytes, ret, offset;
87012 void *maddr;
87013 struct page *page = NULL;
87014
87015@@ -4148,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87016 *
87017 * The caller must hold a reference on @mm.
87018 */
87019-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
87020- void *buf, int len, int write)
87021+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
87022+ void *buf, size_t len, int write)
87023 {
87024 return __access_remote_vm(NULL, mm, addr, buf, len, write);
87025 }
87026@@ -4159,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
87027 * Source/target buffer must be kernel space,
87028 * Do not walk the page table directly, use get_user_pages
87029 */
87030-int access_process_vm(struct task_struct *tsk, unsigned long addr,
87031- void *buf, int len, int write)
87032+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
87033+ void *buf, size_t len, int write)
87034 {
87035 struct mm_struct *mm;
87036- int ret;
87037+ ssize_t ret;
87038
87039 mm = get_task_mm(tsk);
87040 if (!mm)
87041diff --git a/mm/mempolicy.c b/mm/mempolicy.c
87042index 4baf12e..5497066 100644
87043--- a/mm/mempolicy.c
87044+++ b/mm/mempolicy.c
87045@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
87046 unsigned long vmstart;
87047 unsigned long vmend;
87048
87049+#ifdef CONFIG_PAX_SEGMEXEC
87050+ struct vm_area_struct *vma_m;
87051+#endif
87052+
87053 vma = find_vma(mm, start);
87054 if (!vma || vma->vm_start > start)
87055 return -EFAULT;
87056@@ -751,6 +755,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
87057 err = vma_replace_policy(vma, new_pol);
87058 if (err)
87059 goto out;
87060+
87061+#ifdef CONFIG_PAX_SEGMEXEC
87062+ vma_m = pax_find_mirror_vma(vma);
87063+ if (vma_m) {
87064+ err = vma_replace_policy(vma_m, new_pol);
87065+ if (err)
87066+ goto out;
87067+ }
87068+#endif
87069+
87070 }
87071
87072 out:
87073@@ -1206,6 +1220,17 @@ static long do_mbind(unsigned long start, unsigned long len,
87074
87075 if (end < start)
87076 return -EINVAL;
87077+
87078+#ifdef CONFIG_PAX_SEGMEXEC
87079+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
87080+ if (end > SEGMEXEC_TASK_SIZE)
87081+ return -EINVAL;
87082+ } else
87083+#endif
87084+
87085+ if (end > TASK_SIZE)
87086+ return -EINVAL;
87087+
87088 if (end == start)
87089 return 0;
87090
87091@@ -1434,8 +1459,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
87092 */
87093 tcred = __task_cred(task);
87094 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
87095- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
87096- !capable(CAP_SYS_NICE)) {
87097+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
87098 rcu_read_unlock();
87099 err = -EPERM;
87100 goto out_put;
87101@@ -1466,6 +1490,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
87102 goto out;
87103 }
87104
87105+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87106+ if (mm != current->mm &&
87107+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
87108+ mmput(mm);
87109+ err = -EPERM;
87110+ goto out;
87111+ }
87112+#endif
87113+
87114 err = do_migrate_pages(mm, old, new,
87115 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
87116
87117diff --git a/mm/migrate.c b/mm/migrate.c
87118index 6f0c244..6d1ae32 100644
87119--- a/mm/migrate.c
87120+++ b/mm/migrate.c
87121@@ -1399,8 +1399,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
87122 */
87123 tcred = __task_cred(task);
87124 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
87125- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
87126- !capable(CAP_SYS_NICE)) {
87127+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
87128 rcu_read_unlock();
87129 err = -EPERM;
87130 goto out;
87131diff --git a/mm/mlock.c b/mm/mlock.c
87132index 79b7cf7..9944291 100644
87133--- a/mm/mlock.c
87134+++ b/mm/mlock.c
87135@@ -13,6 +13,7 @@
87136 #include <linux/pagemap.h>
87137 #include <linux/mempolicy.h>
87138 #include <linux/syscalls.h>
87139+#include <linux/security.h>
87140 #include <linux/sched.h>
87141 #include <linux/export.h>
87142 #include <linux/rmap.h>
87143@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
87144 {
87145 unsigned long nstart, end, tmp;
87146 struct vm_area_struct * vma, * prev;
87147- int error;
87148+ int error = 0;
87149
87150 VM_BUG_ON(start & ~PAGE_MASK);
87151 VM_BUG_ON(len != PAGE_ALIGN(len));
87152@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
87153 return -EINVAL;
87154 if (end == start)
87155 return 0;
87156+ if (end > TASK_SIZE)
87157+ return -EINVAL;
87158+
87159 vma = find_vma(current->mm, start);
87160 if (!vma || vma->vm_start > start)
87161 return -ENOMEM;
87162@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
87163 for (nstart = start ; ; ) {
87164 vm_flags_t newflags;
87165
87166+#ifdef CONFIG_PAX_SEGMEXEC
87167+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
87168+ break;
87169+#endif
87170+
87171 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
87172
87173 newflags = vma->vm_flags & ~VM_LOCKED;
87174@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
87175 lock_limit >>= PAGE_SHIFT;
87176
87177 /* check against resource limits */
87178+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
87179 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
87180 error = do_mlock(start, len, 1);
87181 up_write(&current->mm->mmap_sem);
87182@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
87183 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
87184 vm_flags_t newflags;
87185
87186+#ifdef CONFIG_PAX_SEGMEXEC
87187+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
87188+ break;
87189+#endif
87190+
87191 newflags = vma->vm_flags & ~VM_LOCKED;
87192 if (flags & MCL_CURRENT)
87193 newflags |= VM_LOCKED;
87194@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
87195 lock_limit >>= PAGE_SHIFT;
87196
87197 ret = -ENOMEM;
87198+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
87199 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
87200 capable(CAP_IPC_LOCK))
87201 ret = do_mlockall(flags);
87202diff --git a/mm/mmap.c b/mm/mmap.c
87203index 8d25fdc..bfb7626 100644
87204--- a/mm/mmap.c
87205+++ b/mm/mmap.c
87206@@ -36,6 +36,7 @@
87207 #include <linux/sched/sysctl.h>
87208 #include <linux/notifier.h>
87209 #include <linux/memory.h>
87210+#include <linux/random.h>
87211
87212 #include <asm/uaccess.h>
87213 #include <asm/cacheflush.h>
87214@@ -52,6 +53,16 @@
87215 #define arch_rebalance_pgtables(addr, len) (addr)
87216 #endif
87217
87218+static inline void verify_mm_writelocked(struct mm_struct *mm)
87219+{
87220+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
87221+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
87222+ up_read(&mm->mmap_sem);
87223+ BUG();
87224+ }
87225+#endif
87226+}
87227+
87228 static void unmap_region(struct mm_struct *mm,
87229 struct vm_area_struct *vma, struct vm_area_struct *prev,
87230 unsigned long start, unsigned long end);
87231@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
87232 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
87233 *
87234 */
87235-pgprot_t protection_map[16] = {
87236+pgprot_t protection_map[16] __read_only = {
87237 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
87238 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
87239 };
87240
87241-pgprot_t vm_get_page_prot(unsigned long vm_flags)
87242+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
87243 {
87244- return __pgprot(pgprot_val(protection_map[vm_flags &
87245+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
87246 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
87247 pgprot_val(arch_vm_get_page_prot(vm_flags)));
87248+
87249+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87250+ if (!(__supported_pte_mask & _PAGE_NX) &&
87251+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
87252+ (vm_flags & (VM_READ | VM_WRITE)))
87253+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
87254+#endif
87255+
87256+ return prot;
87257 }
87258 EXPORT_SYMBOL(vm_get_page_prot);
87259
87260@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
87261 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
87262 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
87263 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
87264+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
87265 /*
87266 * Make sure vm_committed_as in one cacheline and not cacheline shared with
87267 * other variables. It can be updated by several CPUs frequently.
87268@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
87269 struct vm_area_struct *next = vma->vm_next;
87270
87271 might_sleep();
87272+ BUG_ON(vma->vm_mirror);
87273 if (vma->vm_ops && vma->vm_ops->close)
87274 vma->vm_ops->close(vma);
87275 if (vma->vm_file)
87276@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
87277 * not page aligned -Ram Gupta
87278 */
87279 rlim = rlimit(RLIMIT_DATA);
87280+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
87281 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
87282 (mm->end_data - mm->start_data) > rlim)
87283 goto out;
87284@@ -933,6 +956,12 @@ static int
87285 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
87286 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
87287 {
87288+
87289+#ifdef CONFIG_PAX_SEGMEXEC
87290+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
87291+ return 0;
87292+#endif
87293+
87294 if (is_mergeable_vma(vma, file, vm_flags) &&
87295 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
87296 if (vma->vm_pgoff == vm_pgoff)
87297@@ -952,6 +981,12 @@ static int
87298 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
87299 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
87300 {
87301+
87302+#ifdef CONFIG_PAX_SEGMEXEC
87303+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
87304+ return 0;
87305+#endif
87306+
87307 if (is_mergeable_vma(vma, file, vm_flags) &&
87308 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
87309 pgoff_t vm_pglen;
87310@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
87311 struct vm_area_struct *vma_merge(struct mm_struct *mm,
87312 struct vm_area_struct *prev, unsigned long addr,
87313 unsigned long end, unsigned long vm_flags,
87314- struct anon_vma *anon_vma, struct file *file,
87315+ struct anon_vma *anon_vma, struct file *file,
87316 pgoff_t pgoff, struct mempolicy *policy)
87317 {
87318 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
87319 struct vm_area_struct *area, *next;
87320 int err;
87321
87322+#ifdef CONFIG_PAX_SEGMEXEC
87323+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
87324+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
87325+
87326+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
87327+#endif
87328+
87329 /*
87330 * We later require that vma->vm_flags == vm_flags,
87331 * so this tests vma->vm_flags & VM_SPECIAL, too.
87332@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87333 if (next && next->vm_end == end) /* cases 6, 7, 8 */
87334 next = next->vm_next;
87335
87336+#ifdef CONFIG_PAX_SEGMEXEC
87337+ if (prev)
87338+ prev_m = pax_find_mirror_vma(prev);
87339+ if (area)
87340+ area_m = pax_find_mirror_vma(area);
87341+ if (next)
87342+ next_m = pax_find_mirror_vma(next);
87343+#endif
87344+
87345 /*
87346 * Can it merge with the predecessor?
87347 */
87348@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87349 /* cases 1, 6 */
87350 err = vma_adjust(prev, prev->vm_start,
87351 next->vm_end, prev->vm_pgoff, NULL);
87352- } else /* cases 2, 5, 7 */
87353+
87354+#ifdef CONFIG_PAX_SEGMEXEC
87355+ if (!err && prev_m)
87356+ err = vma_adjust(prev_m, prev_m->vm_start,
87357+ next_m->vm_end, prev_m->vm_pgoff, NULL);
87358+#endif
87359+
87360+ } else { /* cases 2, 5, 7 */
87361 err = vma_adjust(prev, prev->vm_start,
87362 end, prev->vm_pgoff, NULL);
87363+
87364+#ifdef CONFIG_PAX_SEGMEXEC
87365+ if (!err && prev_m)
87366+ err = vma_adjust(prev_m, prev_m->vm_start,
87367+ end_m, prev_m->vm_pgoff, NULL);
87368+#endif
87369+
87370+ }
87371 if (err)
87372 return NULL;
87373 khugepaged_enter_vma_merge(prev);
87374@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
87375 mpol_equal(policy, vma_policy(next)) &&
87376 can_vma_merge_before(next, vm_flags,
87377 anon_vma, file, pgoff+pglen)) {
87378- if (prev && addr < prev->vm_end) /* case 4 */
87379+ if (prev && addr < prev->vm_end) { /* case 4 */
87380 err = vma_adjust(prev, prev->vm_start,
87381 addr, prev->vm_pgoff, NULL);
87382- else /* cases 3, 8 */
87383+
87384+#ifdef CONFIG_PAX_SEGMEXEC
87385+ if (!err && prev_m)
87386+ err = vma_adjust(prev_m, prev_m->vm_start,
87387+ addr_m, prev_m->vm_pgoff, NULL);
87388+#endif
87389+
87390+ } else { /* cases 3, 8 */
87391 err = vma_adjust(area, addr, next->vm_end,
87392 next->vm_pgoff - pglen, NULL);
87393+
87394+#ifdef CONFIG_PAX_SEGMEXEC
87395+ if (!err && area_m)
87396+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
87397+ next_m->vm_pgoff - pglen, NULL);
87398+#endif
87399+
87400+ }
87401 if (err)
87402 return NULL;
87403 khugepaged_enter_vma_merge(area);
87404@@ -1165,8 +1246,10 @@ none:
87405 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
87406 struct file *file, long pages)
87407 {
87408- const unsigned long stack_flags
87409- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
87410+
87411+#ifdef CONFIG_PAX_RANDMMAP
87412+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
87413+#endif
87414
87415 mm->total_vm += pages;
87416
87417@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
87418 mm->shared_vm += pages;
87419 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
87420 mm->exec_vm += pages;
87421- } else if (flags & stack_flags)
87422+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
87423 mm->stack_vm += pages;
87424 }
87425 #endif /* CONFIG_PROC_FS */
87426@@ -1213,7 +1296,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87427 * (the exception is when the underlying filesystem is noexec
87428 * mounted, in which case we dont add PROT_EXEC.)
87429 */
87430- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
87431+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
87432 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
87433 prot |= PROT_EXEC;
87434
87435@@ -1239,7 +1322,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87436 /* Obtain the address to map to. we verify (or select) it and ensure
87437 * that it represents a valid section of the address space.
87438 */
87439- addr = get_unmapped_area(file, addr, len, pgoff, flags);
87440+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
87441 if (addr & ~PAGE_MASK)
87442 return addr;
87443
87444@@ -1250,6 +1333,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87445 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
87446 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
87447
87448+#ifdef CONFIG_PAX_MPROTECT
87449+ if (mm->pax_flags & MF_PAX_MPROTECT) {
87450+
87451+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
87452+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
87453+ mm->binfmt->handle_mmap)
87454+ mm->binfmt->handle_mmap(file);
87455+#endif
87456+
87457+#ifndef CONFIG_PAX_MPROTECT_COMPAT
87458+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
87459+ gr_log_rwxmmap(file);
87460+
87461+#ifdef CONFIG_PAX_EMUPLT
87462+ vm_flags &= ~VM_EXEC;
87463+#else
87464+ return -EPERM;
87465+#endif
87466+
87467+ }
87468+
87469+ if (!(vm_flags & VM_EXEC))
87470+ vm_flags &= ~VM_MAYEXEC;
87471+#else
87472+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
87473+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
87474+#endif
87475+ else
87476+ vm_flags &= ~VM_MAYWRITE;
87477+ }
87478+#endif
87479+
87480+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87481+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
87482+ vm_flags &= ~VM_PAGEEXEC;
87483+#endif
87484+
87485 if (flags & MAP_LOCKED)
87486 if (!can_do_mlock())
87487 return -EPERM;
87488@@ -1261,6 +1381,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87489 locked += mm->locked_vm;
87490 lock_limit = rlimit(RLIMIT_MEMLOCK);
87491 lock_limit >>= PAGE_SHIFT;
87492+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
87493 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
87494 return -EAGAIN;
87495 }
87496@@ -1341,6 +1462,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
87497 vm_flags |= VM_NORESERVE;
87498 }
87499
87500+ if (!gr_acl_handle_mmap(file, prot))
87501+ return -EACCES;
87502+
87503 addr = mmap_region(file, addr, len, vm_flags, pgoff);
87504 if (!IS_ERR_VALUE(addr) &&
87505 ((vm_flags & VM_LOCKED) ||
87506@@ -1432,7 +1556,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
87507 vm_flags_t vm_flags = vma->vm_flags;
87508
87509 /* If it was private or non-writable, the write bit is already clear */
87510- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
87511+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
87512 return 0;
87513
87514 /* The backer wishes to know when pages are first written to? */
87515@@ -1480,7 +1604,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
87516 unsigned long charged = 0;
87517 struct inode *inode = file ? file_inode(file) : NULL;
87518
87519+#ifdef CONFIG_PAX_SEGMEXEC
87520+ struct vm_area_struct *vma_m = NULL;
87521+#endif
87522+
87523+ /*
87524+ * mm->mmap_sem is required to protect against another thread
87525+ * changing the mappings in case we sleep.
87526+ */
87527+ verify_mm_writelocked(mm);
87528+
87529 /* Check against address space limit. */
87530+
87531+#ifdef CONFIG_PAX_RANDMMAP
87532+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
87533+#endif
87534+
87535 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
87536 unsigned long nr_pages;
87537
87538@@ -1499,11 +1638,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
87539
87540 /* Clear old maps */
87541 error = -ENOMEM;
87542-munmap_back:
87543 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
87544 if (do_munmap(mm, addr, len))
87545 return -ENOMEM;
87546- goto munmap_back;
87547+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
87548 }
87549
87550 /*
87551@@ -1534,6 +1672,16 @@ munmap_back:
87552 goto unacct_error;
87553 }
87554
87555+#ifdef CONFIG_PAX_SEGMEXEC
87556+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
87557+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
87558+ if (!vma_m) {
87559+ error = -ENOMEM;
87560+ goto free_vma;
87561+ }
87562+ }
87563+#endif
87564+
87565 vma->vm_mm = mm;
87566 vma->vm_start = addr;
87567 vma->vm_end = addr + len;
87568@@ -1558,6 +1706,13 @@ munmap_back:
87569 if (error)
87570 goto unmap_and_free_vma;
87571
87572+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87573+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
87574+ vma->vm_flags |= VM_PAGEEXEC;
87575+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
87576+ }
87577+#endif
87578+
87579 /* Can addr have changed??
87580 *
87581 * Answer: Yes, several device drivers can do it in their
87582@@ -1596,6 +1751,11 @@ munmap_back:
87583 vma_link(mm, vma, prev, rb_link, rb_parent);
87584 file = vma->vm_file;
87585
87586+#ifdef CONFIG_PAX_SEGMEXEC
87587+ if (vma_m)
87588+ BUG_ON(pax_mirror_vma(vma_m, vma));
87589+#endif
87590+
87591 /* Once vma denies write, undo our temporary denial count */
87592 if (correct_wcount)
87593 atomic_inc(&inode->i_writecount);
87594@@ -1603,6 +1763,7 @@ out:
87595 perf_event_mmap(vma);
87596
87597 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
87598+ track_exec_limit(mm, addr, addr + len, vm_flags);
87599 if (vm_flags & VM_LOCKED) {
87600 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
87601 vma == get_gate_vma(current->mm)))
87602@@ -1626,6 +1787,12 @@ unmap_and_free_vma:
87603 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
87604 charged = 0;
87605 free_vma:
87606+
87607+#ifdef CONFIG_PAX_SEGMEXEC
87608+ if (vma_m)
87609+ kmem_cache_free(vm_area_cachep, vma_m);
87610+#endif
87611+
87612 kmem_cache_free(vm_area_cachep, vma);
87613 unacct_error:
87614 if (charged)
87615@@ -1633,7 +1800,63 @@ unacct_error:
87616 return error;
87617 }
87618
87619-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
87620+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
87621+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
87622+{
87623+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
87624+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
87625+
87626+ return 0;
87627+}
87628+#endif
87629+
87630+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
87631+{
87632+ if (!vma) {
87633+#ifdef CONFIG_STACK_GROWSUP
87634+ if (addr > sysctl_heap_stack_gap)
87635+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
87636+ else
87637+ vma = find_vma(current->mm, 0);
87638+ if (vma && (vma->vm_flags & VM_GROWSUP))
87639+ return false;
87640+#endif
87641+ return true;
87642+ }
87643+
87644+ if (addr + len > vma->vm_start)
87645+ return false;
87646+
87647+ if (vma->vm_flags & VM_GROWSDOWN)
87648+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
87649+#ifdef CONFIG_STACK_GROWSUP
87650+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
87651+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
87652+#endif
87653+ else if (offset)
87654+ return offset <= vma->vm_start - addr - len;
87655+
87656+ return true;
87657+}
87658+
87659+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
87660+{
87661+ if (vma->vm_start < len)
87662+ return -ENOMEM;
87663+
87664+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
87665+ if (offset <= vma->vm_start - len)
87666+ return vma->vm_start - len - offset;
87667+ else
87668+ return -ENOMEM;
87669+ }
87670+
87671+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
87672+ return vma->vm_start - len - sysctl_heap_stack_gap;
87673+ return -ENOMEM;
87674+}
87675+
87676+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
87677 {
87678 /*
87679 * We implement the search by looking for an rbtree node that
87680@@ -1681,11 +1904,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
87681 }
87682 }
87683
87684- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
87685+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
87686 check_current:
87687 /* Check if current node has a suitable gap */
87688 if (gap_start > high_limit)
87689 return -ENOMEM;
87690+
87691+ if (gap_end - gap_start > info->threadstack_offset)
87692+ gap_start += info->threadstack_offset;
87693+ else
87694+ gap_start = gap_end;
87695+
87696+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
87697+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87698+ gap_start += sysctl_heap_stack_gap;
87699+ else
87700+ gap_start = gap_end;
87701+ }
87702+ if (vma->vm_flags & VM_GROWSDOWN) {
87703+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87704+ gap_end -= sysctl_heap_stack_gap;
87705+ else
87706+ gap_end = gap_start;
87707+ }
87708 if (gap_end >= low_limit && gap_end - gap_start >= length)
87709 goto found;
87710
87711@@ -1735,7 +1976,7 @@ found:
87712 return gap_start;
87713 }
87714
87715-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
87716+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
87717 {
87718 struct mm_struct *mm = current->mm;
87719 struct vm_area_struct *vma;
87720@@ -1789,6 +2030,24 @@ check_current:
87721 gap_end = vma->vm_start;
87722 if (gap_end < low_limit)
87723 return -ENOMEM;
87724+
87725+ if (gap_end - gap_start > info->threadstack_offset)
87726+ gap_end -= info->threadstack_offset;
87727+ else
87728+ gap_end = gap_start;
87729+
87730+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
87731+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87732+ gap_start += sysctl_heap_stack_gap;
87733+ else
87734+ gap_start = gap_end;
87735+ }
87736+ if (vma->vm_flags & VM_GROWSDOWN) {
87737+ if (gap_end - gap_start > sysctl_heap_stack_gap)
87738+ gap_end -= sysctl_heap_stack_gap;
87739+ else
87740+ gap_end = gap_start;
87741+ }
87742 if (gap_start <= high_limit && gap_end - gap_start >= length)
87743 goto found;
87744
87745@@ -1852,6 +2111,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
87746 struct mm_struct *mm = current->mm;
87747 struct vm_area_struct *vma;
87748 struct vm_unmapped_area_info info;
87749+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
87750
87751 if (len > TASK_SIZE)
87752 return -ENOMEM;
87753@@ -1859,29 +2119,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
87754 if (flags & MAP_FIXED)
87755 return addr;
87756
87757+#ifdef CONFIG_PAX_RANDMMAP
87758+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
87759+#endif
87760+
87761 if (addr) {
87762 addr = PAGE_ALIGN(addr);
87763 vma = find_vma(mm, addr);
87764- if (TASK_SIZE - len >= addr &&
87765- (!vma || addr + len <= vma->vm_start))
87766+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
87767 return addr;
87768 }
87769
87770 info.flags = 0;
87771 info.length = len;
87772 info.low_limit = TASK_UNMAPPED_BASE;
87773+
87774+#ifdef CONFIG_PAX_RANDMMAP
87775+ if (mm->pax_flags & MF_PAX_RANDMMAP)
87776+ info.low_limit += mm->delta_mmap;
87777+#endif
87778+
87779 info.high_limit = TASK_SIZE;
87780 info.align_mask = 0;
87781+ info.threadstack_offset = offset;
87782 return vm_unmapped_area(&info);
87783 }
87784 #endif
87785
87786 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
87787 {
87788+
87789+#ifdef CONFIG_PAX_SEGMEXEC
87790+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
87791+ return;
87792+#endif
87793+
87794 /*
87795 * Is this a new hole at the lowest possible address?
87796 */
87797- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
87798+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
87799 mm->free_area_cache = addr;
87800 }
87801
87802@@ -1899,6 +2175,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87803 struct mm_struct *mm = current->mm;
87804 unsigned long addr = addr0;
87805 struct vm_unmapped_area_info info;
87806+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
87807
87808 /* requested length too big for entire address space */
87809 if (len > TASK_SIZE)
87810@@ -1907,12 +2184,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87811 if (flags & MAP_FIXED)
87812 return addr;
87813
87814+#ifdef CONFIG_PAX_RANDMMAP
87815+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
87816+#endif
87817+
87818 /* requesting a specific address */
87819 if (addr) {
87820 addr = PAGE_ALIGN(addr);
87821 vma = find_vma(mm, addr);
87822- if (TASK_SIZE - len >= addr &&
87823- (!vma || addr + len <= vma->vm_start))
87824+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
87825 return addr;
87826 }
87827
87828@@ -1921,6 +2201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87829 info.low_limit = PAGE_SIZE;
87830 info.high_limit = mm->mmap_base;
87831 info.align_mask = 0;
87832+ info.threadstack_offset = offset;
87833 addr = vm_unmapped_area(&info);
87834
87835 /*
87836@@ -1933,6 +2214,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87837 VM_BUG_ON(addr != -ENOMEM);
87838 info.flags = 0;
87839 info.low_limit = TASK_UNMAPPED_BASE;
87840+
87841+#ifdef CONFIG_PAX_RANDMMAP
87842+ if (mm->pax_flags & MF_PAX_RANDMMAP)
87843+ info.low_limit += mm->delta_mmap;
87844+#endif
87845+
87846 info.high_limit = TASK_SIZE;
87847 addr = vm_unmapped_area(&info);
87848 }
87849@@ -1943,6 +2230,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87850
87851 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
87852 {
87853+
87854+#ifdef CONFIG_PAX_SEGMEXEC
87855+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
87856+ return;
87857+#endif
87858+
87859 /*
87860 * Is this a new hole at the highest possible address?
87861 */
87862@@ -1950,8 +2243,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
87863 mm->free_area_cache = addr;
87864
87865 /* dont allow allocations above current base */
87866- if (mm->free_area_cache > mm->mmap_base)
87867+ if (mm->free_area_cache > mm->mmap_base) {
87868 mm->free_area_cache = mm->mmap_base;
87869+ mm->cached_hole_size = ~0UL;
87870+ }
87871 }
87872
87873 unsigned long
87874@@ -2047,6 +2342,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
87875 return vma;
87876 }
87877
87878+#ifdef CONFIG_PAX_SEGMEXEC
87879+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
87880+{
87881+ struct vm_area_struct *vma_m;
87882+
87883+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
87884+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
87885+ BUG_ON(vma->vm_mirror);
87886+ return NULL;
87887+ }
87888+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
87889+ vma_m = vma->vm_mirror;
87890+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
87891+ BUG_ON(vma->vm_file != vma_m->vm_file);
87892+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
87893+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
87894+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
87895+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
87896+ return vma_m;
87897+}
87898+#endif
87899+
87900 /*
87901 * Verify that the stack growth is acceptable and
87902 * update accounting. This is shared with both the
87903@@ -2063,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
87904 return -ENOMEM;
87905
87906 /* Stack limit test */
87907+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
87908 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
87909 return -ENOMEM;
87910
87911@@ -2073,6 +2391,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
87912 locked = mm->locked_vm + grow;
87913 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
87914 limit >>= PAGE_SHIFT;
87915+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
87916 if (locked > limit && !capable(CAP_IPC_LOCK))
87917 return -ENOMEM;
87918 }
87919@@ -2102,37 +2421,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
87920 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
87921 * vma is the last one with address > vma->vm_end. Have to extend vma.
87922 */
87923+#ifndef CONFIG_IA64
87924+static
87925+#endif
87926 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
87927 {
87928 int error;
87929+ bool locknext;
87930
87931 if (!(vma->vm_flags & VM_GROWSUP))
87932 return -EFAULT;
87933
87934+ /* Also guard against wrapping around to address 0. */
87935+ if (address < PAGE_ALIGN(address+1))
87936+ address = PAGE_ALIGN(address+1);
87937+ else
87938+ return -ENOMEM;
87939+
87940 /*
87941 * We must make sure the anon_vma is allocated
87942 * so that the anon_vma locking is not a noop.
87943 */
87944 if (unlikely(anon_vma_prepare(vma)))
87945 return -ENOMEM;
87946+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
87947+ if (locknext && anon_vma_prepare(vma->vm_next))
87948+ return -ENOMEM;
87949 vma_lock_anon_vma(vma);
87950+ if (locknext)
87951+ vma_lock_anon_vma(vma->vm_next);
87952
87953 /*
87954 * vma->vm_start/vm_end cannot change under us because the caller
87955 * is required to hold the mmap_sem in read mode. We need the
87956- * anon_vma lock to serialize against concurrent expand_stacks.
87957- * Also guard against wrapping around to address 0.
87958+ * anon_vma locks to serialize against concurrent expand_stacks
87959+ * and expand_upwards.
87960 */
87961- if (address < PAGE_ALIGN(address+4))
87962- address = PAGE_ALIGN(address+4);
87963- else {
87964- vma_unlock_anon_vma(vma);
87965- return -ENOMEM;
87966- }
87967 error = 0;
87968
87969 /* Somebody else might have raced and expanded it already */
87970- if (address > vma->vm_end) {
87971+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
87972+ error = -ENOMEM;
87973+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
87974 unsigned long size, grow;
87975
87976 size = address - vma->vm_start;
87977@@ -2167,6 +2497,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
87978 }
87979 }
87980 }
87981+ if (locknext)
87982+ vma_unlock_anon_vma(vma->vm_next);
87983 vma_unlock_anon_vma(vma);
87984 khugepaged_enter_vma_merge(vma);
87985 validate_mm(vma->vm_mm);
87986@@ -2181,6 +2513,8 @@ int expand_downwards(struct vm_area_struct *vma,
87987 unsigned long address)
87988 {
87989 int error;
87990+ bool lockprev = false;
87991+ struct vm_area_struct *prev;
87992
87993 /*
87994 * We must make sure the anon_vma is allocated
87995@@ -2194,6 +2528,15 @@ int expand_downwards(struct vm_area_struct *vma,
87996 if (error)
87997 return error;
87998
87999+ prev = vma->vm_prev;
88000+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
88001+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
88002+#endif
88003+ if (lockprev && anon_vma_prepare(prev))
88004+ return -ENOMEM;
88005+ if (lockprev)
88006+ vma_lock_anon_vma(prev);
88007+
88008 vma_lock_anon_vma(vma);
88009
88010 /*
88011@@ -2203,9 +2546,17 @@ int expand_downwards(struct vm_area_struct *vma,
88012 */
88013
88014 /* Somebody else might have raced and expanded it already */
88015- if (address < vma->vm_start) {
88016+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
88017+ error = -ENOMEM;
88018+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
88019 unsigned long size, grow;
88020
88021+#ifdef CONFIG_PAX_SEGMEXEC
88022+ struct vm_area_struct *vma_m;
88023+
88024+ vma_m = pax_find_mirror_vma(vma);
88025+#endif
88026+
88027 size = vma->vm_end - address;
88028 grow = (vma->vm_start - address) >> PAGE_SHIFT;
88029
88030@@ -2230,13 +2581,27 @@ int expand_downwards(struct vm_area_struct *vma,
88031 vma->vm_pgoff -= grow;
88032 anon_vma_interval_tree_post_update_vma(vma);
88033 vma_gap_update(vma);
88034+
88035+#ifdef CONFIG_PAX_SEGMEXEC
88036+ if (vma_m) {
88037+ anon_vma_interval_tree_pre_update_vma(vma_m);
88038+ vma_m->vm_start -= grow << PAGE_SHIFT;
88039+ vma_m->vm_pgoff -= grow;
88040+ anon_vma_interval_tree_post_update_vma(vma_m);
88041+ vma_gap_update(vma_m);
88042+ }
88043+#endif
88044+
88045 spin_unlock(&vma->vm_mm->page_table_lock);
88046
88047+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
88048 perf_event_mmap(vma);
88049 }
88050 }
88051 }
88052 vma_unlock_anon_vma(vma);
88053+ if (lockprev)
88054+ vma_unlock_anon_vma(prev);
88055 khugepaged_enter_vma_merge(vma);
88056 validate_mm(vma->vm_mm);
88057 return error;
88058@@ -2334,6 +2699,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
88059 do {
88060 long nrpages = vma_pages(vma);
88061
88062+#ifdef CONFIG_PAX_SEGMEXEC
88063+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
88064+ vma = remove_vma(vma);
88065+ continue;
88066+ }
88067+#endif
88068+
88069 if (vma->vm_flags & VM_ACCOUNT)
88070 nr_accounted += nrpages;
88071 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
88072@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
88073 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
88074 vma->vm_prev = NULL;
88075 do {
88076+
88077+#ifdef CONFIG_PAX_SEGMEXEC
88078+ if (vma->vm_mirror) {
88079+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
88080+ vma->vm_mirror->vm_mirror = NULL;
88081+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
88082+ vma->vm_mirror = NULL;
88083+ }
88084+#endif
88085+
88086 vma_rb_erase(vma, &mm->mm_rb);
88087 mm->map_count--;
88088 tail_vma = vma;
88089@@ -2410,14 +2792,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88090 struct vm_area_struct *new;
88091 int err = -ENOMEM;
88092
88093+#ifdef CONFIG_PAX_SEGMEXEC
88094+ struct vm_area_struct *vma_m, *new_m = NULL;
88095+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
88096+#endif
88097+
88098 if (is_vm_hugetlb_page(vma) && (addr &
88099 ~(huge_page_mask(hstate_vma(vma)))))
88100 return -EINVAL;
88101
88102+#ifdef CONFIG_PAX_SEGMEXEC
88103+ vma_m = pax_find_mirror_vma(vma);
88104+#endif
88105+
88106 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
88107 if (!new)
88108 goto out_err;
88109
88110+#ifdef CONFIG_PAX_SEGMEXEC
88111+ if (vma_m) {
88112+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
88113+ if (!new_m) {
88114+ kmem_cache_free(vm_area_cachep, new);
88115+ goto out_err;
88116+ }
88117+ }
88118+#endif
88119+
88120 /* most fields are the same, copy all, and then fixup */
88121 *new = *vma;
88122
88123@@ -2430,6 +2831,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88124 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
88125 }
88126
88127+#ifdef CONFIG_PAX_SEGMEXEC
88128+ if (vma_m) {
88129+ *new_m = *vma_m;
88130+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
88131+ new_m->vm_mirror = new;
88132+ new->vm_mirror = new_m;
88133+
88134+ if (new_below)
88135+ new_m->vm_end = addr_m;
88136+ else {
88137+ new_m->vm_start = addr_m;
88138+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
88139+ }
88140+ }
88141+#endif
88142+
88143 pol = mpol_dup(vma_policy(vma));
88144 if (IS_ERR(pol)) {
88145 err = PTR_ERR(pol);
88146@@ -2452,6 +2869,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88147 else
88148 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
88149
88150+#ifdef CONFIG_PAX_SEGMEXEC
88151+ if (!err && vma_m) {
88152+ if (anon_vma_clone(new_m, vma_m))
88153+ goto out_free_mpol;
88154+
88155+ mpol_get(pol);
88156+ vma_set_policy(new_m, pol);
88157+
88158+ if (new_m->vm_file)
88159+ get_file(new_m->vm_file);
88160+
88161+ if (new_m->vm_ops && new_m->vm_ops->open)
88162+ new_m->vm_ops->open(new_m);
88163+
88164+ if (new_below)
88165+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
88166+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
88167+ else
88168+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
88169+
88170+ if (err) {
88171+ if (new_m->vm_ops && new_m->vm_ops->close)
88172+ new_m->vm_ops->close(new_m);
88173+ if (new_m->vm_file)
88174+ fput(new_m->vm_file);
88175+ mpol_put(pol);
88176+ }
88177+ }
88178+#endif
88179+
88180 /* Success. */
88181 if (!err)
88182 return 0;
88183@@ -2461,10 +2908,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88184 new->vm_ops->close(new);
88185 if (new->vm_file)
88186 fput(new->vm_file);
88187- unlink_anon_vmas(new);
88188 out_free_mpol:
88189 mpol_put(pol);
88190 out_free_vma:
88191+
88192+#ifdef CONFIG_PAX_SEGMEXEC
88193+ if (new_m) {
88194+ unlink_anon_vmas(new_m);
88195+ kmem_cache_free(vm_area_cachep, new_m);
88196+ }
88197+#endif
88198+
88199+ unlink_anon_vmas(new);
88200 kmem_cache_free(vm_area_cachep, new);
88201 out_err:
88202 return err;
88203@@ -2477,6 +2932,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
88204 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88205 unsigned long addr, int new_below)
88206 {
88207+
88208+#ifdef CONFIG_PAX_SEGMEXEC
88209+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
88210+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
88211+ if (mm->map_count >= sysctl_max_map_count-1)
88212+ return -ENOMEM;
88213+ } else
88214+#endif
88215+
88216 if (mm->map_count >= sysctl_max_map_count)
88217 return -ENOMEM;
88218
88219@@ -2488,11 +2952,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88220 * work. This now handles partial unmappings.
88221 * Jeremy Fitzhardinge <jeremy@goop.org>
88222 */
88223+#ifdef CONFIG_PAX_SEGMEXEC
88224 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88225 {
88226+ int ret = __do_munmap(mm, start, len);
88227+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
88228+ return ret;
88229+
88230+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
88231+}
88232+
88233+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88234+#else
88235+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88236+#endif
88237+{
88238 unsigned long end;
88239 struct vm_area_struct *vma, *prev, *last;
88240
88241+ /*
88242+ * mm->mmap_sem is required to protect against another thread
88243+ * changing the mappings in case we sleep.
88244+ */
88245+ verify_mm_writelocked(mm);
88246+
88247 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
88248 return -EINVAL;
88249
88250@@ -2567,6 +3050,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
88251 /* Fix up all other VM information */
88252 remove_vma_list(mm, vma);
88253
88254+ track_exec_limit(mm, start, end, 0UL);
88255+
88256 return 0;
88257 }
88258
88259@@ -2575,6 +3060,13 @@ int vm_munmap(unsigned long start, size_t len)
88260 int ret;
88261 struct mm_struct *mm = current->mm;
88262
88263+
88264+#ifdef CONFIG_PAX_SEGMEXEC
88265+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
88266+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
88267+ return -EINVAL;
88268+#endif
88269+
88270 down_write(&mm->mmap_sem);
88271 ret = do_munmap(mm, start, len);
88272 up_write(&mm->mmap_sem);
88273@@ -2588,16 +3080,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
88274 return vm_munmap(addr, len);
88275 }
88276
88277-static inline void verify_mm_writelocked(struct mm_struct *mm)
88278-{
88279-#ifdef CONFIG_DEBUG_VM
88280- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
88281- WARN_ON(1);
88282- up_read(&mm->mmap_sem);
88283- }
88284-#endif
88285-}
88286-
88287 /*
88288 * this is really a simplified "do_mmap". it only handles
88289 * anonymous maps. eventually we may be able to do some
88290@@ -2611,6 +3093,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88291 struct rb_node ** rb_link, * rb_parent;
88292 pgoff_t pgoff = addr >> PAGE_SHIFT;
88293 int error;
88294+ unsigned long charged;
88295
88296 len = PAGE_ALIGN(len);
88297 if (!len)
88298@@ -2618,16 +3101,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88299
88300 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
88301
88302+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
88303+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
88304+ flags &= ~VM_EXEC;
88305+
88306+#ifdef CONFIG_PAX_MPROTECT
88307+ if (mm->pax_flags & MF_PAX_MPROTECT)
88308+ flags &= ~VM_MAYEXEC;
88309+#endif
88310+
88311+ }
88312+#endif
88313+
88314 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
88315 if (error & ~PAGE_MASK)
88316 return error;
88317
88318+ charged = len >> PAGE_SHIFT;
88319+
88320 /*
88321 * mlock MCL_FUTURE?
88322 */
88323 if (mm->def_flags & VM_LOCKED) {
88324 unsigned long locked, lock_limit;
88325- locked = len >> PAGE_SHIFT;
88326+ locked = charged;
88327 locked += mm->locked_vm;
88328 lock_limit = rlimit(RLIMIT_MEMLOCK);
88329 lock_limit >>= PAGE_SHIFT;
88330@@ -2644,21 +3141,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88331 /*
88332 * Clear old maps. this also does some error checking for us
88333 */
88334- munmap_back:
88335 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
88336 if (do_munmap(mm, addr, len))
88337 return -ENOMEM;
88338- goto munmap_back;
88339+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
88340 }
88341
88342 /* Check against address space limits *after* clearing old maps... */
88343- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
88344+ if (!may_expand_vm(mm, charged))
88345 return -ENOMEM;
88346
88347 if (mm->map_count > sysctl_max_map_count)
88348 return -ENOMEM;
88349
88350- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
88351+ if (security_vm_enough_memory_mm(mm, charged))
88352 return -ENOMEM;
88353
88354 /* Can we just expand an old private anonymous mapping? */
88355@@ -2672,7 +3168,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88356 */
88357 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88358 if (!vma) {
88359- vm_unacct_memory(len >> PAGE_SHIFT);
88360+ vm_unacct_memory(charged);
88361 return -ENOMEM;
88362 }
88363
88364@@ -2686,9 +3182,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
88365 vma_link(mm, vma, prev, rb_link, rb_parent);
88366 out:
88367 perf_event_mmap(vma);
88368- mm->total_vm += len >> PAGE_SHIFT;
88369+ mm->total_vm += charged;
88370 if (flags & VM_LOCKED)
88371- mm->locked_vm += (len >> PAGE_SHIFT);
88372+ mm->locked_vm += charged;
88373+ track_exec_limit(mm, addr, addr + len, flags);
88374 return addr;
88375 }
88376
88377@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
88378 while (vma) {
88379 if (vma->vm_flags & VM_ACCOUNT)
88380 nr_accounted += vma_pages(vma);
88381+ vma->vm_mirror = NULL;
88382 vma = remove_vma(vma);
88383 }
88384 vm_unacct_memory(nr_accounted);
88385@@ -2766,6 +3264,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
88386 struct vm_area_struct *prev;
88387 struct rb_node **rb_link, *rb_parent;
88388
88389+#ifdef CONFIG_PAX_SEGMEXEC
88390+ struct vm_area_struct *vma_m = NULL;
88391+#endif
88392+
88393+ if (security_mmap_addr(vma->vm_start))
88394+ return -EPERM;
88395+
88396 /*
88397 * The vm_pgoff of a purely anonymous vma should be irrelevant
88398 * until its first write fault, when page's anon_vma and index
88399@@ -2789,7 +3294,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
88400 security_vm_enough_memory_mm(mm, vma_pages(vma)))
88401 return -ENOMEM;
88402
88403+#ifdef CONFIG_PAX_SEGMEXEC
88404+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
88405+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88406+ if (!vma_m)
88407+ return -ENOMEM;
88408+ }
88409+#endif
88410+
88411 vma_link(mm, vma, prev, rb_link, rb_parent);
88412+
88413+#ifdef CONFIG_PAX_SEGMEXEC
88414+ if (vma_m)
88415+ BUG_ON(pax_mirror_vma(vma_m, vma));
88416+#endif
88417+
88418 return 0;
88419 }
88420
88421@@ -2809,6 +3328,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
88422 struct mempolicy *pol;
88423 bool faulted_in_anon_vma = true;
88424
88425+ BUG_ON(vma->vm_mirror);
88426+
88427 /*
88428 * If anonymous vma has not yet been faulted, update new pgoff
88429 * to match new location, to increase its chance of merging.
88430@@ -2875,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
88431 return NULL;
88432 }
88433
88434+#ifdef CONFIG_PAX_SEGMEXEC
88435+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
88436+{
88437+ struct vm_area_struct *prev_m;
88438+ struct rb_node **rb_link_m, *rb_parent_m;
88439+ struct mempolicy *pol_m;
88440+
88441+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
88442+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
88443+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
88444+ *vma_m = *vma;
88445+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
88446+ if (anon_vma_clone(vma_m, vma))
88447+ return -ENOMEM;
88448+ pol_m = vma_policy(vma_m);
88449+ mpol_get(pol_m);
88450+ vma_set_policy(vma_m, pol_m);
88451+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
88452+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
88453+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
88454+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
88455+ if (vma_m->vm_file)
88456+ get_file(vma_m->vm_file);
88457+ if (vma_m->vm_ops && vma_m->vm_ops->open)
88458+ vma_m->vm_ops->open(vma_m);
88459+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
88460+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
88461+ vma_m->vm_mirror = vma;
88462+ vma->vm_mirror = vma_m;
88463+ return 0;
88464+}
88465+#endif
88466+
88467 /*
88468 * Return true if the calling process may expand its vm space by the passed
88469 * number of pages
88470@@ -2886,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
88471
88472 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
88473
88474+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
88475 if (cur + npages > lim)
88476 return 0;
88477 return 1;
88478@@ -2956,6 +3511,22 @@ int install_special_mapping(struct mm_struct *mm,
88479 vma->vm_start = addr;
88480 vma->vm_end = addr + len;
88481
88482+#ifdef CONFIG_PAX_MPROTECT
88483+ if (mm->pax_flags & MF_PAX_MPROTECT) {
88484+#ifndef CONFIG_PAX_MPROTECT_COMPAT
88485+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
88486+ return -EPERM;
88487+ if (!(vm_flags & VM_EXEC))
88488+ vm_flags &= ~VM_MAYEXEC;
88489+#else
88490+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
88491+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
88492+#endif
88493+ else
88494+ vm_flags &= ~VM_MAYWRITE;
88495+ }
88496+#endif
88497+
88498 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
88499 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
88500
88501diff --git a/mm/mprotect.c b/mm/mprotect.c
88502index 94722a4..e661e29 100644
88503--- a/mm/mprotect.c
88504+++ b/mm/mprotect.c
88505@@ -23,10 +23,18 @@
88506 #include <linux/mmu_notifier.h>
88507 #include <linux/migrate.h>
88508 #include <linux/perf_event.h>
88509+#include <linux/sched/sysctl.h>
88510+
88511+#ifdef CONFIG_PAX_MPROTECT
88512+#include <linux/elf.h>
88513+#include <linux/binfmts.h>
88514+#endif
88515+
88516 #include <asm/uaccess.h>
88517 #include <asm/pgtable.h>
88518 #include <asm/cacheflush.h>
88519 #include <asm/tlbflush.h>
88520+#include <asm/mmu_context.h>
88521
88522 #ifndef pgprot_modify
88523 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
88524@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
88525 return pages;
88526 }
88527
88528+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
88529+/* called while holding the mmap semaphor for writing except stack expansion */
88530+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
88531+{
88532+ unsigned long oldlimit, newlimit = 0UL;
88533+
88534+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
88535+ return;
88536+
88537+ spin_lock(&mm->page_table_lock);
88538+ oldlimit = mm->context.user_cs_limit;
88539+ if ((prot & VM_EXEC) && oldlimit < end)
88540+ /* USER_CS limit moved up */
88541+ newlimit = end;
88542+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
88543+ /* USER_CS limit moved down */
88544+ newlimit = start;
88545+
88546+ if (newlimit) {
88547+ mm->context.user_cs_limit = newlimit;
88548+
88549+#ifdef CONFIG_SMP
88550+ wmb();
88551+ cpus_clear(mm->context.cpu_user_cs_mask);
88552+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
88553+#endif
88554+
88555+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
88556+ }
88557+ spin_unlock(&mm->page_table_lock);
88558+ if (newlimit == end) {
88559+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
88560+
88561+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
88562+ if (is_vm_hugetlb_page(vma))
88563+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
88564+ else
88565+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
88566+ }
88567+}
88568+#endif
88569+
88570 int
88571 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88572 unsigned long start, unsigned long end, unsigned long newflags)
88573@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88574 int error;
88575 int dirty_accountable = 0;
88576
88577+#ifdef CONFIG_PAX_SEGMEXEC
88578+ struct vm_area_struct *vma_m = NULL;
88579+ unsigned long start_m, end_m;
88580+
88581+ start_m = start + SEGMEXEC_TASK_SIZE;
88582+ end_m = end + SEGMEXEC_TASK_SIZE;
88583+#endif
88584+
88585 if (newflags == oldflags) {
88586 *pprev = vma;
88587 return 0;
88588 }
88589
88590+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
88591+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
88592+
88593+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
88594+ return -ENOMEM;
88595+
88596+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
88597+ return -ENOMEM;
88598+ }
88599+
88600 /*
88601 * If we make a private mapping writable we increase our commit;
88602 * but (without finer accounting) cannot reduce our commit if we
88603@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
88604 }
88605 }
88606
88607+#ifdef CONFIG_PAX_SEGMEXEC
88608+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
88609+ if (start != vma->vm_start) {
88610+ error = split_vma(mm, vma, start, 1);
88611+ if (error)
88612+ goto fail;
88613+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
88614+ *pprev = (*pprev)->vm_next;
88615+ }
88616+
88617+ if (end != vma->vm_end) {
88618+ error = split_vma(mm, vma, end, 0);
88619+ if (error)
88620+ goto fail;
88621+ }
88622+
88623+ if (pax_find_mirror_vma(vma)) {
88624+ error = __do_munmap(mm, start_m, end_m - start_m);
88625+ if (error)
88626+ goto fail;
88627+ } else {
88628+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
88629+ if (!vma_m) {
88630+ error = -ENOMEM;
88631+ goto fail;
88632+ }
88633+ vma->vm_flags = newflags;
88634+ error = pax_mirror_vma(vma_m, vma);
88635+ if (error) {
88636+ vma->vm_flags = oldflags;
88637+ goto fail;
88638+ }
88639+ }
88640+ }
88641+#endif
88642+
88643 /*
88644 * First try to merge with previous and/or next vma.
88645 */
88646@@ -296,9 +400,21 @@ success:
88647 * vm_flags and vm_page_prot are protected by the mmap_sem
88648 * held in write mode.
88649 */
88650+
88651+#ifdef CONFIG_PAX_SEGMEXEC
88652+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
88653+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
88654+#endif
88655+
88656 vma->vm_flags = newflags;
88657+
88658+#ifdef CONFIG_PAX_MPROTECT
88659+ if (mm->binfmt && mm->binfmt->handle_mprotect)
88660+ mm->binfmt->handle_mprotect(vma, newflags);
88661+#endif
88662+
88663 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
88664- vm_get_page_prot(newflags));
88665+ vm_get_page_prot(vma->vm_flags));
88666
88667 if (vma_wants_writenotify(vma)) {
88668 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
88669@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88670 end = start + len;
88671 if (end <= start)
88672 return -ENOMEM;
88673+
88674+#ifdef CONFIG_PAX_SEGMEXEC
88675+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
88676+ if (end > SEGMEXEC_TASK_SIZE)
88677+ return -EINVAL;
88678+ } else
88679+#endif
88680+
88681+ if (end > TASK_SIZE)
88682+ return -EINVAL;
88683+
88684 if (!arch_validate_prot(prot))
88685 return -EINVAL;
88686
88687@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88688 /*
88689 * Does the application expect PROT_READ to imply PROT_EXEC:
88690 */
88691- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
88692+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
88693 prot |= PROT_EXEC;
88694
88695 vm_flags = calc_vm_prot_bits(prot);
88696@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88697 if (start > vma->vm_start)
88698 prev = vma;
88699
88700+#ifdef CONFIG_PAX_MPROTECT
88701+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
88702+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
88703+#endif
88704+
88705 for (nstart = start ; ; ) {
88706 unsigned long newflags;
88707
88708@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88709
88710 /* newflags >> 4 shift VM_MAY% in place of VM_% */
88711 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
88712+ if (prot & (PROT_WRITE | PROT_EXEC))
88713+ gr_log_rwxmprotect(vma);
88714+
88715+ error = -EACCES;
88716+ goto out;
88717+ }
88718+
88719+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
88720 error = -EACCES;
88721 goto out;
88722 }
88723@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
88724 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
88725 if (error)
88726 goto out;
88727+
88728+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
88729+
88730 nstart = tmp;
88731
88732 if (nstart < prev->vm_end)
88733diff --git a/mm/mremap.c b/mm/mremap.c
88734index 463a257..c0c7a92 100644
88735--- a/mm/mremap.c
88736+++ b/mm/mremap.c
88737@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
88738 continue;
88739 pte = ptep_get_and_clear(mm, old_addr, old_pte);
88740 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
88741+
88742+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
88743+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
88744+ pte = pte_exprotect(pte);
88745+#endif
88746+
88747 set_pte_at(mm, new_addr, new_pte, pte);
88748 }
88749
88750@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
88751 if (is_vm_hugetlb_page(vma))
88752 goto Einval;
88753
88754+#ifdef CONFIG_PAX_SEGMEXEC
88755+ if (pax_find_mirror_vma(vma))
88756+ goto Einval;
88757+#endif
88758+
88759 /* We can't remap across vm area boundaries */
88760 if (old_len > vma->vm_end - addr)
88761 goto Efault;
88762@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
88763 unsigned long ret = -EINVAL;
88764 unsigned long charged = 0;
88765 unsigned long map_flags;
88766+ unsigned long pax_task_size = TASK_SIZE;
88767
88768 if (new_addr & ~PAGE_MASK)
88769 goto out;
88770
88771- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
88772+#ifdef CONFIG_PAX_SEGMEXEC
88773+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
88774+ pax_task_size = SEGMEXEC_TASK_SIZE;
88775+#endif
88776+
88777+ pax_task_size -= PAGE_SIZE;
88778+
88779+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
88780 goto out;
88781
88782 /* Check if the location we're moving into overlaps the
88783 * old location at all, and fail if it does.
88784 */
88785- if ((new_addr <= addr) && (new_addr+new_len) > addr)
88786- goto out;
88787-
88788- if ((addr <= new_addr) && (addr+old_len) > new_addr)
88789+ if (addr + old_len > new_addr && new_addr + new_len > addr)
88790 goto out;
88791
88792 ret = do_munmap(mm, new_addr, new_len);
88793@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88794 unsigned long ret = -EINVAL;
88795 unsigned long charged = 0;
88796 bool locked = false;
88797+ unsigned long pax_task_size = TASK_SIZE;
88798
88799 down_write(&current->mm->mmap_sem);
88800
88801@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88802 if (!new_len)
88803 goto out;
88804
88805+#ifdef CONFIG_PAX_SEGMEXEC
88806+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
88807+ pax_task_size = SEGMEXEC_TASK_SIZE;
88808+#endif
88809+
88810+ pax_task_size -= PAGE_SIZE;
88811+
88812+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
88813+ old_len > pax_task_size || addr > pax_task_size-old_len)
88814+ goto out;
88815+
88816 if (flags & MREMAP_FIXED) {
88817 if (flags & MREMAP_MAYMOVE)
88818 ret = mremap_to(addr, old_len, new_addr, new_len,
88819@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88820 new_addr = addr;
88821 }
88822 ret = addr;
88823+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
88824 goto out;
88825 }
88826 }
88827@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
88828 goto out;
88829 }
88830
88831+ map_flags = vma->vm_flags;
88832 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
88833+ if (!(ret & ~PAGE_MASK)) {
88834+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
88835+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
88836+ }
88837 }
88838 out:
88839 if (ret & ~PAGE_MASK)
88840diff --git a/mm/nommu.c b/mm/nommu.c
88841index 298884d..5f74980 100644
88842--- a/mm/nommu.c
88843+++ b/mm/nommu.c
88844@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
88845 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
88846 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
88847 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
88848-int heap_stack_gap = 0;
88849
88850 atomic_long_t mmap_pages_allocated;
88851
88852@@ -842,15 +841,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
88853 EXPORT_SYMBOL(find_vma);
88854
88855 /*
88856- * find a VMA
88857- * - we don't extend stack VMAs under NOMMU conditions
88858- */
88859-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
88860-{
88861- return find_vma(mm, addr);
88862-}
88863-
88864-/*
88865 * expand a stack to a given address
88866 * - not supported under NOMMU conditions
88867 */
88868@@ -1561,6 +1551,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
88869
88870 /* most fields are the same, copy all, and then fixup */
88871 *new = *vma;
88872+ INIT_LIST_HEAD(&new->anon_vma_chain);
88873 *region = *vma->vm_region;
88874 new->vm_region = region;
88875
88876@@ -1995,8 +1986,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
88877 }
88878 EXPORT_SYMBOL(generic_file_remap_pages);
88879
88880-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88881- unsigned long addr, void *buf, int len, int write)
88882+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88883+ unsigned long addr, void *buf, size_t len, int write)
88884 {
88885 struct vm_area_struct *vma;
88886
88887@@ -2037,8 +2028,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
88888 *
88889 * The caller must hold a reference on @mm.
88890 */
88891-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
88892- void *buf, int len, int write)
88893+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
88894+ void *buf, size_t len, int write)
88895 {
88896 return __access_remote_vm(NULL, mm, addr, buf, len, write);
88897 }
88898@@ -2047,7 +2038,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
88899 * Access another process' address space.
88900 * - source/target buffer must be kernel space
88901 */
88902-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
88903+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
88904 {
88905 struct mm_struct *mm;
88906
88907diff --git a/mm/page-writeback.c b/mm/page-writeback.c
88908index 4514ad7..92eaa1c 100644
88909--- a/mm/page-writeback.c
88910+++ b/mm/page-writeback.c
88911@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
88912 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
88913 * - the bdi dirty thresh drops quickly due to change of JBOD workload
88914 */
88915-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
88916+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
88917 unsigned long thresh,
88918 unsigned long bg_thresh,
88919 unsigned long dirty,
88920@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
88921 }
88922 }
88923
88924-static struct notifier_block __cpuinitdata ratelimit_nb = {
88925+static struct notifier_block ratelimit_nb = {
88926 .notifier_call = ratelimit_handler,
88927 .next = NULL,
88928 };
88929diff --git a/mm/page_alloc.c b/mm/page_alloc.c
88930index 2ee0fd3..6e2edfb 100644
88931--- a/mm/page_alloc.c
88932+++ b/mm/page_alloc.c
88933@@ -60,6 +60,7 @@
88934 #include <linux/page-debug-flags.h>
88935 #include <linux/hugetlb.h>
88936 #include <linux/sched/rt.h>
88937+#include <linux/random.h>
88938
88939 #include <asm/tlbflush.h>
88940 #include <asm/div64.h>
88941@@ -345,7 +346,7 @@ out:
88942 * This usage means that zero-order pages may not be compound.
88943 */
88944
88945-static void free_compound_page(struct page *page)
88946+void free_compound_page(struct page *page)
88947 {
88948 __free_pages_ok(page, compound_order(page));
88949 }
88950@@ -702,6 +703,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
88951 int i;
88952 int bad = 0;
88953
88954+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88955+ unsigned long index = 1UL << order;
88956+#endif
88957+
88958 trace_mm_page_free(page, order);
88959 kmemcheck_free_shadow(page, order);
88960
88961@@ -717,6 +722,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
88962 debug_check_no_obj_freed(page_address(page),
88963 PAGE_SIZE << order);
88964 }
88965+
88966+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88967+ for (; index; --index)
88968+ sanitize_highpage(page + index - 1);
88969+#endif
88970+
88971 arch_free_page(page, order);
88972 kernel_map_pages(page, 1 << order, 0);
88973
88974@@ -739,6 +750,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
88975 local_irq_restore(flags);
88976 }
88977
88978+#ifdef CONFIG_PAX_LATENT_ENTROPY
88979+bool __meminitdata extra_latent_entropy;
88980+
88981+static int __init setup_pax_extra_latent_entropy(char *str)
88982+{
88983+ extra_latent_entropy = true;
88984+ return 0;
88985+}
88986+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
88987+
88988+volatile u64 latent_entropy;
88989+#endif
88990+
88991 /*
88992 * Read access to zone->managed_pages is safe because it's unsigned long,
88993 * but we still need to serialize writers. Currently all callers of
88994@@ -761,6 +785,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
88995 set_page_count(p, 0);
88996 }
88997
88998+#ifdef CONFIG_PAX_LATENT_ENTROPY
88999+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
89000+ u64 hash = 0;
89001+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
89002+ const u64 *data = lowmem_page_address(page);
89003+
89004+ for (index = 0; index < end; index++)
89005+ hash ^= hash + data[index];
89006+ latent_entropy ^= hash;
89007+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
89008+ }
89009+#endif
89010+
89011 page_zone(page)->managed_pages += 1 << order;
89012 set_page_refcounted(page);
89013 __free_pages(page, order);
89014@@ -870,8 +907,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
89015 arch_alloc_page(page, order);
89016 kernel_map_pages(page, 1 << order, 1);
89017
89018+#ifndef CONFIG_PAX_MEMORY_SANITIZE
89019 if (gfp_flags & __GFP_ZERO)
89020 prep_zero_page(page, order, gfp_flags);
89021+#endif
89022
89023 if (order && (gfp_flags & __GFP_COMP))
89024 prep_compound_page(page, order);
89025diff --git a/mm/page_io.c b/mm/page_io.c
89026index a8a3ef4..7260a60 100644
89027--- a/mm/page_io.c
89028+++ b/mm/page_io.c
89029@@ -214,7 +214,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
89030 struct file *swap_file = sis->swap_file;
89031 struct address_space *mapping = swap_file->f_mapping;
89032 struct iovec iov = {
89033- .iov_base = kmap(page),
89034+ .iov_base = (void __force_user *)kmap(page),
89035 .iov_len = PAGE_SIZE,
89036 };
89037
89038diff --git a/mm/percpu.c b/mm/percpu.c
89039index 8c8e08f..73a5cda 100644
89040--- a/mm/percpu.c
89041+++ b/mm/percpu.c
89042@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
89043 static unsigned int pcpu_high_unit_cpu __read_mostly;
89044
89045 /* the address of the first chunk which starts with the kernel static area */
89046-void *pcpu_base_addr __read_mostly;
89047+void *pcpu_base_addr __read_only;
89048 EXPORT_SYMBOL_GPL(pcpu_base_addr);
89049
89050 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
89051diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
89052index fd26d04..0cea1b0 100644
89053--- a/mm/process_vm_access.c
89054+++ b/mm/process_vm_access.c
89055@@ -13,6 +13,7 @@
89056 #include <linux/uio.h>
89057 #include <linux/sched.h>
89058 #include <linux/highmem.h>
89059+#include <linux/security.h>
89060 #include <linux/ptrace.h>
89061 #include <linux/slab.h>
89062 #include <linux/syscalls.h>
89063@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
89064 size_t iov_l_curr_offset = 0;
89065 ssize_t iov_len;
89066
89067+ return -ENOSYS; // PaX: until properly audited
89068+
89069 /*
89070 * Work out how many pages of struct pages we're going to need
89071 * when eventually calling get_user_pages
89072 */
89073 for (i = 0; i < riovcnt; i++) {
89074 iov_len = rvec[i].iov_len;
89075- if (iov_len > 0) {
89076- nr_pages_iov = ((unsigned long)rvec[i].iov_base
89077- + iov_len)
89078- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
89079- / PAGE_SIZE + 1;
89080- nr_pages = max(nr_pages, nr_pages_iov);
89081- }
89082+ if (iov_len <= 0)
89083+ continue;
89084+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
89085+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
89086+ nr_pages = max(nr_pages, nr_pages_iov);
89087 }
89088
89089 if (nr_pages == 0)
89090@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
89091 goto free_proc_pages;
89092 }
89093
89094+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
89095+ rc = -EPERM;
89096+ goto put_task_struct;
89097+ }
89098+
89099 mm = mm_access(task, PTRACE_MODE_ATTACH);
89100 if (!mm || IS_ERR(mm)) {
89101 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
89102diff --git a/mm/rmap.c b/mm/rmap.c
89103index 6280da8..b5c090e 100644
89104--- a/mm/rmap.c
89105+++ b/mm/rmap.c
89106@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89107 struct anon_vma *anon_vma = vma->anon_vma;
89108 struct anon_vma_chain *avc;
89109
89110+#ifdef CONFIG_PAX_SEGMEXEC
89111+ struct anon_vma_chain *avc_m = NULL;
89112+#endif
89113+
89114 might_sleep();
89115 if (unlikely(!anon_vma)) {
89116 struct mm_struct *mm = vma->vm_mm;
89117@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89118 if (!avc)
89119 goto out_enomem;
89120
89121+#ifdef CONFIG_PAX_SEGMEXEC
89122+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
89123+ if (!avc_m)
89124+ goto out_enomem_free_avc;
89125+#endif
89126+
89127 anon_vma = find_mergeable_anon_vma(vma);
89128 allocated = NULL;
89129 if (!anon_vma) {
89130@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89131 /* page_table_lock to protect against threads */
89132 spin_lock(&mm->page_table_lock);
89133 if (likely(!vma->anon_vma)) {
89134+
89135+#ifdef CONFIG_PAX_SEGMEXEC
89136+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
89137+
89138+ if (vma_m) {
89139+ BUG_ON(vma_m->anon_vma);
89140+ vma_m->anon_vma = anon_vma;
89141+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
89142+ avc_m = NULL;
89143+ }
89144+#endif
89145+
89146 vma->anon_vma = anon_vma;
89147 anon_vma_chain_link(vma, avc, anon_vma);
89148 allocated = NULL;
89149@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
89150
89151 if (unlikely(allocated))
89152 put_anon_vma(allocated);
89153+
89154+#ifdef CONFIG_PAX_SEGMEXEC
89155+ if (unlikely(avc_m))
89156+ anon_vma_chain_free(avc_m);
89157+#endif
89158+
89159 if (unlikely(avc))
89160 anon_vma_chain_free(avc);
89161 }
89162 return 0;
89163
89164 out_enomem_free_avc:
89165+
89166+#ifdef CONFIG_PAX_SEGMEXEC
89167+ if (avc_m)
89168+ anon_vma_chain_free(avc_m);
89169+#endif
89170+
89171 anon_vma_chain_free(avc);
89172 out_enomem:
89173 return -ENOMEM;
89174@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
89175 * Attach the anon_vmas from src to dst.
89176 * Returns 0 on success, -ENOMEM on failure.
89177 */
89178-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
89179+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
89180 {
89181 struct anon_vma_chain *avc, *pavc;
89182 struct anon_vma *root = NULL;
89183@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
89184 * the corresponding VMA in the parent process is attached to.
89185 * Returns 0 on success, non-zero on failure.
89186 */
89187-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
89188+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
89189 {
89190 struct anon_vma_chain *avc;
89191 struct anon_vma *anon_vma;
89192@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
89193 void __init anon_vma_init(void)
89194 {
89195 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
89196- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
89197- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
89198+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
89199+ anon_vma_ctor);
89200+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
89201+ SLAB_PANIC|SLAB_NO_SANITIZE);
89202 }
89203
89204 /*
89205diff --git a/mm/shmem.c b/mm/shmem.c
89206index 5e6a842..b41916e 100644
89207--- a/mm/shmem.c
89208+++ b/mm/shmem.c
89209@@ -33,7 +33,7 @@
89210 #include <linux/swap.h>
89211 #include <linux/aio.h>
89212
89213-static struct vfsmount *shm_mnt;
89214+struct vfsmount *shm_mnt;
89215
89216 #ifdef CONFIG_SHMEM
89217 /*
89218@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
89219 #define BOGO_DIRENT_SIZE 20
89220
89221 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
89222-#define SHORT_SYMLINK_LEN 128
89223+#define SHORT_SYMLINK_LEN 64
89224
89225 /*
89226 * shmem_fallocate and shmem_writepage communicate via inode->i_private
89227@@ -2203,6 +2203,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
89228 static int shmem_xattr_validate(const char *name)
89229 {
89230 struct { const char *prefix; size_t len; } arr[] = {
89231+
89232+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
89233+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
89234+#endif
89235+
89236 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
89237 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
89238 };
89239@@ -2258,6 +2263,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
89240 if (err)
89241 return err;
89242
89243+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
89244+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
89245+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
89246+ return -EOPNOTSUPP;
89247+ if (size > 8)
89248+ return -EINVAL;
89249+ }
89250+#endif
89251+
89252 return simple_xattr_set(&info->xattrs, name, value, size, flags);
89253 }
89254
89255@@ -2570,8 +2584,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
89256 int err = -ENOMEM;
89257
89258 /* Round up to L1_CACHE_BYTES to resist false sharing */
89259- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
89260- L1_CACHE_BYTES), GFP_KERNEL);
89261+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
89262 if (!sbinfo)
89263 return -ENOMEM;
89264
89265diff --git a/mm/slab.c b/mm/slab.c
89266index bd88411..2d46fd6 100644
89267--- a/mm/slab.c
89268+++ b/mm/slab.c
89269@@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
89270 if ((x)->max_freeable < i) \
89271 (x)->max_freeable = i; \
89272 } while (0)
89273-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
89274-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
89275-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
89276-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
89277+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
89278+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
89279+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
89280+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
89281+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
89282+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
89283 #else
89284 #define STATS_INC_ACTIVE(x) do { } while (0)
89285 #define STATS_DEC_ACTIVE(x) do { } while (0)
89286@@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
89287 #define STATS_INC_ALLOCMISS(x) do { } while (0)
89288 #define STATS_INC_FREEHIT(x) do { } while (0)
89289 #define STATS_INC_FREEMISS(x) do { } while (0)
89290+#define STATS_INC_SANITIZED(x) do { } while (0)
89291+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
89292 #endif
89293
89294 #if DEBUG
89295@@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
89296 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
89297 */
89298 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
89299- const struct slab *slab, void *obj)
89300+ const struct slab *slab, const void *obj)
89301 {
89302 u32 offset = (obj - slab->s_mem);
89303 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
89304@@ -1384,7 +1388,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
89305 return notifier_from_errno(err);
89306 }
89307
89308-static struct notifier_block __cpuinitdata cpucache_notifier = {
89309+static struct notifier_block cpucache_notifier = {
89310 &cpuup_callback, NULL, 0
89311 };
89312
89313@@ -1565,12 +1569,12 @@ void __init kmem_cache_init(void)
89314 */
89315
89316 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
89317- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
89318+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
89319
89320 if (INDEX_AC != INDEX_NODE)
89321 kmalloc_caches[INDEX_NODE] =
89322 create_kmalloc_cache("kmalloc-node",
89323- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
89324+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
89325
89326 slab_early_init = 0;
89327
89328@@ -3583,6 +3587,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
89329 struct array_cache *ac = cpu_cache_get(cachep);
89330
89331 check_irq_off();
89332+
89333+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89334+ if (pax_sanitize_slab) {
89335+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
89336+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
89337+
89338+ if (cachep->ctor)
89339+ cachep->ctor(objp);
89340+
89341+ STATS_INC_SANITIZED(cachep);
89342+ } else
89343+ STATS_INC_NOT_SANITIZED(cachep);
89344+ }
89345+#endif
89346+
89347 kmemleak_free_recursive(objp, cachep->flags);
89348 objp = cache_free_debugcheck(cachep, objp, caller);
89349
89350@@ -3800,6 +3819,7 @@ void kfree(const void *objp)
89351
89352 if (unlikely(ZERO_OR_NULL_PTR(objp)))
89353 return;
89354+ VM_BUG_ON(!virt_addr_valid(objp));
89355 local_irq_save(flags);
89356 kfree_debugcheck(objp);
89357 c = virt_to_cache(objp);
89358@@ -4241,14 +4261,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
89359 }
89360 /* cpu stats */
89361 {
89362- unsigned long allochit = atomic_read(&cachep->allochit);
89363- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
89364- unsigned long freehit = atomic_read(&cachep->freehit);
89365- unsigned long freemiss = atomic_read(&cachep->freemiss);
89366+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
89367+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
89368+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
89369+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
89370
89371 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
89372 allochit, allocmiss, freehit, freemiss);
89373 }
89374+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89375+ {
89376+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
89377+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
89378+
89379+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
89380+ }
89381+#endif
89382 #endif
89383 }
89384
89385@@ -4476,13 +4504,71 @@ static const struct file_operations proc_slabstats_operations = {
89386 static int __init slab_proc_init(void)
89387 {
89388 #ifdef CONFIG_DEBUG_SLAB_LEAK
89389- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
89390+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
89391 #endif
89392 return 0;
89393 }
89394 module_init(slab_proc_init);
89395 #endif
89396
89397+bool is_usercopy_object(const void *ptr)
89398+{
89399+ struct page *page;
89400+ struct kmem_cache *cachep;
89401+
89402+ if (ZERO_OR_NULL_PTR(ptr))
89403+ return false;
89404+
89405+ if (!slab_is_available())
89406+ return false;
89407+
89408+ if (!virt_addr_valid(ptr))
89409+ return false;
89410+
89411+ page = virt_to_head_page(ptr);
89412+
89413+ if (!PageSlab(page))
89414+ return false;
89415+
89416+ cachep = page->slab_cache;
89417+ return cachep->flags & SLAB_USERCOPY;
89418+}
89419+
89420+#ifdef CONFIG_PAX_USERCOPY
89421+const char *check_heap_object(const void *ptr, unsigned long n)
89422+{
89423+ struct page *page;
89424+ struct kmem_cache *cachep;
89425+ struct slab *slabp;
89426+ unsigned int objnr;
89427+ unsigned long offset;
89428+
89429+ if (ZERO_OR_NULL_PTR(ptr))
89430+ return "<null>";
89431+
89432+ if (!virt_addr_valid(ptr))
89433+ return NULL;
89434+
89435+ page = virt_to_head_page(ptr);
89436+
89437+ if (!PageSlab(page))
89438+ return NULL;
89439+
89440+ cachep = page->slab_cache;
89441+ if (!(cachep->flags & SLAB_USERCOPY))
89442+ return cachep->name;
89443+
89444+ slabp = page->slab_page;
89445+ objnr = obj_to_index(cachep, slabp, ptr);
89446+ BUG_ON(objnr >= cachep->num);
89447+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
89448+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
89449+ return NULL;
89450+
89451+ return cachep->name;
89452+}
89453+#endif
89454+
89455 /**
89456 * ksize - get the actual amount of memory allocated for a given object
89457 * @objp: Pointer to the object
89458diff --git a/mm/slab.h b/mm/slab.h
89459index f96b49e..db1d204 100644
89460--- a/mm/slab.h
89461+++ b/mm/slab.h
89462@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
89463 /* The slab cache that manages slab cache information */
89464 extern struct kmem_cache *kmem_cache;
89465
89466+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89467+#ifdef CONFIG_X86_64
89468+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
89469+#else
89470+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
89471+#endif
89472+extern bool pax_sanitize_slab;
89473+#endif
89474+
89475 unsigned long calculate_alignment(unsigned long flags,
89476 unsigned long align, unsigned long size);
89477
89478@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
89479
89480 /* Legal flag mask for kmem_cache_create(), for various configurations */
89481 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
89482- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
89483+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
89484+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
89485
89486 #if defined(CONFIG_DEBUG_SLAB)
89487 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
89488@@ -229,6 +239,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
89489 return s;
89490
89491 page = virt_to_head_page(x);
89492+
89493+ BUG_ON(!PageSlab(page));
89494+
89495 cachep = page->slab_cache;
89496 if (slab_equal_or_root(cachep, s))
89497 return cachep;
89498diff --git a/mm/slab_common.c b/mm/slab_common.c
89499index 2d41450..4efe6ee 100644
89500--- a/mm/slab_common.c
89501+++ b/mm/slab_common.c
89502@@ -22,11 +22,22 @@
89503
89504 #include "slab.h"
89505
89506-enum slab_state slab_state;
89507+enum slab_state slab_state __read_only;
89508 LIST_HEAD(slab_caches);
89509 DEFINE_MUTEX(slab_mutex);
89510 struct kmem_cache *kmem_cache;
89511
89512+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89513+bool pax_sanitize_slab __read_only = true;
89514+static int __init pax_sanitize_slab_setup(char *str)
89515+{
89516+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
89517+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
89518+ return 1;
89519+}
89520+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
89521+#endif
89522+
89523 #ifdef CONFIG_DEBUG_VM
89524 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
89525 size_t size)
89526@@ -209,7 +220,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
89527
89528 err = __kmem_cache_create(s, flags);
89529 if (!err) {
89530- s->refcount = 1;
89531+ atomic_set(&s->refcount, 1);
89532 list_add(&s->list, &slab_caches);
89533 memcg_cache_list_add(memcg, s);
89534 } else {
89535@@ -255,8 +266,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
89536
89537 get_online_cpus();
89538 mutex_lock(&slab_mutex);
89539- s->refcount--;
89540- if (!s->refcount) {
89541+ if (atomic_dec_and_test(&s->refcount)) {
89542 list_del(&s->list);
89543
89544 if (!__kmem_cache_shutdown(s)) {
89545@@ -302,7 +312,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
89546 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
89547 name, size, err);
89548
89549- s->refcount = -1; /* Exempt from merging for now */
89550+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
89551 }
89552
89553 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
89554@@ -315,7 +325,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
89555
89556 create_boot_cache(s, name, size, flags);
89557 list_add(&s->list, &slab_caches);
89558- s->refcount = 1;
89559+ atomic_set(&s->refcount, 1);
89560 return s;
89561 }
89562
89563@@ -327,6 +337,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
89564 EXPORT_SYMBOL(kmalloc_dma_caches);
89565 #endif
89566
89567+#ifdef CONFIG_PAX_USERCOPY_SLABS
89568+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
89569+EXPORT_SYMBOL(kmalloc_usercopy_caches);
89570+#endif
89571+
89572 /*
89573 * Conversion table for small slabs sizes / 8 to the index in the
89574 * kmalloc array. This is necessary for slabs < 192 since we have non power
89575@@ -391,6 +406,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
89576 return kmalloc_dma_caches[index];
89577
89578 #endif
89579+
89580+#ifdef CONFIG_PAX_USERCOPY_SLABS
89581+ if (unlikely((flags & GFP_USERCOPY)))
89582+ return kmalloc_usercopy_caches[index];
89583+
89584+#endif
89585+
89586 return kmalloc_caches[index];
89587 }
89588
89589@@ -447,7 +469,7 @@ void __init create_kmalloc_caches(unsigned long flags)
89590 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
89591 if (!kmalloc_caches[i]) {
89592 kmalloc_caches[i] = create_kmalloc_cache(NULL,
89593- 1 << i, flags);
89594+ 1 << i, SLAB_USERCOPY | flags);
89595 }
89596
89597 /*
89598@@ -456,10 +478,10 @@ void __init create_kmalloc_caches(unsigned long flags)
89599 * earlier power of two caches
89600 */
89601 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
89602- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
89603+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
89604
89605 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
89606- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
89607+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
89608 }
89609
89610 /* Kmalloc array is now usable */
89611@@ -492,6 +514,23 @@ void __init create_kmalloc_caches(unsigned long flags)
89612 }
89613 }
89614 #endif
89615+
89616+#ifdef CONFIG_PAX_USERCOPY_SLABS
89617+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
89618+ struct kmem_cache *s = kmalloc_caches[i];
89619+
89620+ if (s) {
89621+ int size = kmalloc_size(i);
89622+ char *n = kasprintf(GFP_NOWAIT,
89623+ "usercopy-kmalloc-%d", size);
89624+
89625+ BUG_ON(!n);
89626+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
89627+ size, SLAB_USERCOPY | flags);
89628+ }
89629+ }
89630+#endif
89631+
89632 }
89633 #endif /* !CONFIG_SLOB */
89634
89635@@ -516,6 +555,9 @@ void print_slabinfo_header(struct seq_file *m)
89636 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
89637 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
89638 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
89639+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89640+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
89641+#endif
89642 #endif
89643 seq_putc(m, '\n');
89644 }
89645diff --git a/mm/slob.c b/mm/slob.c
89646index eeed4a0..bb0e9ab 100644
89647--- a/mm/slob.c
89648+++ b/mm/slob.c
89649@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
89650 /*
89651 * Return the size of a slob block.
89652 */
89653-static slobidx_t slob_units(slob_t *s)
89654+static slobidx_t slob_units(const slob_t *s)
89655 {
89656 if (s->units > 0)
89657 return s->units;
89658@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
89659 /*
89660 * Return the next free slob block pointer after this one.
89661 */
89662-static slob_t *slob_next(slob_t *s)
89663+static slob_t *slob_next(const slob_t *s)
89664 {
89665 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
89666 slobidx_t next;
89667@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
89668 /*
89669 * Returns true if s is the last free block in its page.
89670 */
89671-static int slob_last(slob_t *s)
89672+static int slob_last(const slob_t *s)
89673 {
89674 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
89675 }
89676
89677-static void *slob_new_pages(gfp_t gfp, int order, int node)
89678+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
89679 {
89680- void *page;
89681+ struct page *page;
89682
89683 #ifdef CONFIG_NUMA
89684 if (node != NUMA_NO_NODE)
89685@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
89686 if (!page)
89687 return NULL;
89688
89689- return page_address(page);
89690+ __SetPageSlab(page);
89691+ return page;
89692 }
89693
89694-static void slob_free_pages(void *b, int order)
89695+static void slob_free_pages(struct page *sp, int order)
89696 {
89697 if (current->reclaim_state)
89698 current->reclaim_state->reclaimed_slab += 1 << order;
89699- free_pages((unsigned long)b, order);
89700+ __ClearPageSlab(sp);
89701+ page_mapcount_reset(sp);
89702+ sp->private = 0;
89703+ __free_pages(sp, order);
89704 }
89705
89706 /*
89707@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
89708
89709 /* Not enough space: must allocate a new page */
89710 if (!b) {
89711- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
89712- if (!b)
89713+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
89714+ if (!sp)
89715 return NULL;
89716- sp = virt_to_page(b);
89717- __SetPageSlab(sp);
89718+ b = page_address(sp);
89719
89720 spin_lock_irqsave(&slob_lock, flags);
89721 sp->units = SLOB_UNITS(PAGE_SIZE);
89722 sp->freelist = b;
89723+ sp->private = 0;
89724 INIT_LIST_HEAD(&sp->list);
89725 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
89726 set_slob_page_free(sp, slob_list);
89727@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
89728 if (slob_page_free(sp))
89729 clear_slob_page_free(sp);
89730 spin_unlock_irqrestore(&slob_lock, flags);
89731- __ClearPageSlab(sp);
89732- page_mapcount_reset(sp);
89733- slob_free_pages(b, 0);
89734+ slob_free_pages(sp, 0);
89735 return;
89736 }
89737
89738+#ifdef CONFIG_PAX_MEMORY_SANITIZE
89739+ if (pax_sanitize_slab)
89740+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
89741+#endif
89742+
89743 if (!slob_page_free(sp)) {
89744 /* This slob page is about to become partially free. Easy! */
89745 sp->units = units;
89746@@ -424,11 +431,10 @@ out:
89747 */
89748
89749 static __always_inline void *
89750-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89751+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
89752 {
89753- unsigned int *m;
89754- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89755- void *ret;
89756+ slob_t *m;
89757+ void *ret = NULL;
89758
89759 gfp &= gfp_allowed_mask;
89760
89761@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89762
89763 if (!m)
89764 return NULL;
89765- *m = size;
89766+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
89767+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
89768+ m[0].units = size;
89769+ m[1].units = align;
89770 ret = (void *)m + align;
89771
89772 trace_kmalloc_node(caller, ret,
89773 size, size + align, gfp, node);
89774 } else {
89775 unsigned int order = get_order(size);
89776+ struct page *page;
89777
89778 if (likely(order))
89779 gfp |= __GFP_COMP;
89780- ret = slob_new_pages(gfp, order, node);
89781+ page = slob_new_pages(gfp, order, node);
89782+ if (page) {
89783+ ret = page_address(page);
89784+ page->private = size;
89785+ }
89786
89787 trace_kmalloc_node(caller, ret,
89788 size, PAGE_SIZE << order, gfp, node);
89789 }
89790
89791- kmemleak_alloc(ret, size, 1, gfp);
89792+ return ret;
89793+}
89794+
89795+static __always_inline void *
89796+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
89797+{
89798+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89799+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
89800+
89801+ if (!ZERO_OR_NULL_PTR(ret))
89802+ kmemleak_alloc(ret, size, 1, gfp);
89803 return ret;
89804 }
89805
89806@@ -493,34 +517,112 @@ void kfree(const void *block)
89807 return;
89808 kmemleak_free(block);
89809
89810+ VM_BUG_ON(!virt_addr_valid(block));
89811 sp = virt_to_page(block);
89812- if (PageSlab(sp)) {
89813+ VM_BUG_ON(!PageSlab(sp));
89814+ if (!sp->private) {
89815 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89816- unsigned int *m = (unsigned int *)(block - align);
89817- slob_free(m, *m + align);
89818- } else
89819+ slob_t *m = (slob_t *)(block - align);
89820+ slob_free(m, m[0].units + align);
89821+ } else {
89822+ __ClearPageSlab(sp);
89823+ page_mapcount_reset(sp);
89824+ sp->private = 0;
89825 __free_pages(sp, compound_order(sp));
89826+ }
89827 }
89828 EXPORT_SYMBOL(kfree);
89829
89830+bool is_usercopy_object(const void *ptr)
89831+{
89832+ if (!slab_is_available())
89833+ return false;
89834+
89835+ // PAX: TODO
89836+
89837+ return false;
89838+}
89839+
89840+#ifdef CONFIG_PAX_USERCOPY
89841+const char *check_heap_object(const void *ptr, unsigned long n)
89842+{
89843+ struct page *page;
89844+ const slob_t *free;
89845+ const void *base;
89846+ unsigned long flags;
89847+
89848+ if (ZERO_OR_NULL_PTR(ptr))
89849+ return "<null>";
89850+
89851+ if (!virt_addr_valid(ptr))
89852+ return NULL;
89853+
89854+ page = virt_to_head_page(ptr);
89855+ if (!PageSlab(page))
89856+ return NULL;
89857+
89858+ if (page->private) {
89859+ base = page;
89860+ if (base <= ptr && n <= page->private - (ptr - base))
89861+ return NULL;
89862+ return "<slob>";
89863+ }
89864+
89865+ /* some tricky double walking to find the chunk */
89866+ spin_lock_irqsave(&slob_lock, flags);
89867+ base = (void *)((unsigned long)ptr & PAGE_MASK);
89868+ free = page->freelist;
89869+
89870+ while (!slob_last(free) && (void *)free <= ptr) {
89871+ base = free + slob_units(free);
89872+ free = slob_next(free);
89873+ }
89874+
89875+ while (base < (void *)free) {
89876+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
89877+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
89878+ int offset;
89879+
89880+ if (ptr < base + align)
89881+ break;
89882+
89883+ offset = ptr - base - align;
89884+ if (offset >= m) {
89885+ base += size;
89886+ continue;
89887+ }
89888+
89889+ if (n > m - offset)
89890+ break;
89891+
89892+ spin_unlock_irqrestore(&slob_lock, flags);
89893+ return NULL;
89894+ }
89895+
89896+ spin_unlock_irqrestore(&slob_lock, flags);
89897+ return "<slob>";
89898+}
89899+#endif
89900+
89901 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
89902 size_t ksize(const void *block)
89903 {
89904 struct page *sp;
89905 int align;
89906- unsigned int *m;
89907+ slob_t *m;
89908
89909 BUG_ON(!block);
89910 if (unlikely(block == ZERO_SIZE_PTR))
89911 return 0;
89912
89913 sp = virt_to_page(block);
89914- if (unlikely(!PageSlab(sp)))
89915- return PAGE_SIZE << compound_order(sp);
89916+ VM_BUG_ON(!PageSlab(sp));
89917+ if (sp->private)
89918+ return sp->private;
89919
89920 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
89921- m = (unsigned int *)(block - align);
89922- return SLOB_UNITS(*m) * SLOB_UNIT;
89923+ m = (slob_t *)(block - align);
89924+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
89925 }
89926 EXPORT_SYMBOL(ksize);
89927
89928@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
89929
89930 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
89931 {
89932- void *b;
89933+ void *b = NULL;
89934
89935 flags &= gfp_allowed_mask;
89936
89937 lockdep_trace_alloc(flags);
89938
89939+#ifdef CONFIG_PAX_USERCOPY_SLABS
89940+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
89941+#else
89942 if (c->size < PAGE_SIZE) {
89943 b = slob_alloc(c->size, flags, c->align, node);
89944 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
89945 SLOB_UNITS(c->size) * SLOB_UNIT,
89946 flags, node);
89947 } else {
89948- b = slob_new_pages(flags, get_order(c->size), node);
89949+ struct page *sp;
89950+
89951+ sp = slob_new_pages(flags, get_order(c->size), node);
89952+ if (sp) {
89953+ b = page_address(sp);
89954+ sp->private = c->size;
89955+ }
89956 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
89957 PAGE_SIZE << get_order(c->size),
89958 flags, node);
89959 }
89960+#endif
89961
89962 if (c->ctor)
89963 c->ctor(b);
89964@@ -564,10 +676,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
89965
89966 static void __kmem_cache_free(void *b, int size)
89967 {
89968- if (size < PAGE_SIZE)
89969+ struct page *sp;
89970+
89971+ sp = virt_to_page(b);
89972+ BUG_ON(!PageSlab(sp));
89973+ if (!sp->private)
89974 slob_free(b, size);
89975 else
89976- slob_free_pages(b, get_order(size));
89977+ slob_free_pages(sp, get_order(size));
89978 }
89979
89980 static void kmem_rcu_free(struct rcu_head *head)
89981@@ -580,17 +696,31 @@ static void kmem_rcu_free(struct rcu_head *head)
89982
89983 void kmem_cache_free(struct kmem_cache *c, void *b)
89984 {
89985+ int size = c->size;
89986+
89987+#ifdef CONFIG_PAX_USERCOPY_SLABS
89988+ if (size + c->align < PAGE_SIZE) {
89989+ size += c->align;
89990+ b -= c->align;
89991+ }
89992+#endif
89993+
89994 kmemleak_free_recursive(b, c->flags);
89995 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
89996 struct slob_rcu *slob_rcu;
89997- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
89998- slob_rcu->size = c->size;
89999+ slob_rcu = b + (size - sizeof(struct slob_rcu));
90000+ slob_rcu->size = size;
90001 call_rcu(&slob_rcu->head, kmem_rcu_free);
90002 } else {
90003- __kmem_cache_free(b, c->size);
90004+ __kmem_cache_free(b, size);
90005 }
90006
90007+#ifdef CONFIG_PAX_USERCOPY_SLABS
90008+ trace_kfree(_RET_IP_, b);
90009+#else
90010 trace_kmem_cache_free(_RET_IP_, b);
90011+#endif
90012+
90013 }
90014 EXPORT_SYMBOL(kmem_cache_free);
90015
90016diff --git a/mm/slub.c b/mm/slub.c
90017index 57707f0..7857bd3 100644
90018--- a/mm/slub.c
90019+++ b/mm/slub.c
90020@@ -198,7 +198,7 @@ struct track {
90021
90022 enum track_item { TRACK_ALLOC, TRACK_FREE };
90023
90024-#ifdef CONFIG_SYSFS
90025+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90026 static int sysfs_slab_add(struct kmem_cache *);
90027 static int sysfs_slab_alias(struct kmem_cache *, const char *);
90028 static void sysfs_slab_remove(struct kmem_cache *);
90029@@ -519,7 +519,7 @@ static void print_track(const char *s, struct track *t)
90030 if (!t->addr)
90031 return;
90032
90033- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
90034+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
90035 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
90036 #ifdef CONFIG_STACKTRACE
90037 {
90038@@ -2594,6 +2594,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
90039
90040 slab_free_hook(s, x);
90041
90042+#ifdef CONFIG_PAX_MEMORY_SANITIZE
90043+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
90044+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
90045+ if (s->ctor)
90046+ s->ctor(x);
90047+ }
90048+#endif
90049+
90050 redo:
90051 /*
90052 * Determine the currently cpus per cpu slab.
90053@@ -2661,7 +2669,7 @@ static int slub_min_objects;
90054 * Merge control. If this is set then no merging of slab caches will occur.
90055 * (Could be removed. This was introduced to pacify the merge skeptics.)
90056 */
90057-static int slub_nomerge;
90058+static int slub_nomerge = 1;
90059
90060 /*
90061 * Calculate the order of allocation given an slab object size.
90062@@ -2938,6 +2946,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
90063 s->inuse = size;
90064
90065 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
90066+#ifdef CONFIG_PAX_MEMORY_SANITIZE
90067+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
90068+#endif
90069 s->ctor)) {
90070 /*
90071 * Relocate free pointer after the object if it is not
90072@@ -3283,6 +3294,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
90073 EXPORT_SYMBOL(__kmalloc_node);
90074 #endif
90075
90076+bool is_usercopy_object(const void *ptr)
90077+{
90078+ struct page *page;
90079+ struct kmem_cache *s;
90080+
90081+ if (ZERO_OR_NULL_PTR(ptr))
90082+ return false;
90083+
90084+ if (!slab_is_available())
90085+ return false;
90086+
90087+ if (!virt_addr_valid(ptr))
90088+ return false;
90089+
90090+ page = virt_to_head_page(ptr);
90091+
90092+ if (!PageSlab(page))
90093+ return false;
90094+
90095+ s = page->slab_cache;
90096+ return s->flags & SLAB_USERCOPY;
90097+}
90098+
90099+#ifdef CONFIG_PAX_USERCOPY
90100+const char *check_heap_object(const void *ptr, unsigned long n)
90101+{
90102+ struct page *page;
90103+ struct kmem_cache *s;
90104+ unsigned long offset;
90105+
90106+ if (ZERO_OR_NULL_PTR(ptr))
90107+ return "<null>";
90108+
90109+ if (!virt_addr_valid(ptr))
90110+ return NULL;
90111+
90112+ page = virt_to_head_page(ptr);
90113+
90114+ if (!PageSlab(page))
90115+ return NULL;
90116+
90117+ s = page->slab_cache;
90118+ if (!(s->flags & SLAB_USERCOPY))
90119+ return s->name;
90120+
90121+ offset = (ptr - page_address(page)) % s->size;
90122+ if (offset <= s->object_size && n <= s->object_size - offset)
90123+ return NULL;
90124+
90125+ return s->name;
90126+}
90127+#endif
90128+
90129 size_t ksize(const void *object)
90130 {
90131 struct page *page;
90132@@ -3347,6 +3411,7 @@ void kfree(const void *x)
90133 if (unlikely(ZERO_OR_NULL_PTR(x)))
90134 return;
90135
90136+ VM_BUG_ON(!virt_addr_valid(x));
90137 page = virt_to_head_page(x);
90138 if (unlikely(!PageSlab(page))) {
90139 BUG_ON(!PageCompound(page));
90140@@ -3652,7 +3717,7 @@ static int slab_unmergeable(struct kmem_cache *s)
90141 /*
90142 * We may have set a slab to be unmergeable during bootstrap.
90143 */
90144- if (s->refcount < 0)
90145+ if (atomic_read(&s->refcount) < 0)
90146 return 1;
90147
90148 return 0;
90149@@ -3710,7 +3775,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
90150
90151 s = find_mergeable(memcg, size, align, flags, name, ctor);
90152 if (s) {
90153- s->refcount++;
90154+ atomic_inc(&s->refcount);
90155 /*
90156 * Adjust the object sizes so that we clear
90157 * the complete object on kzalloc.
90158@@ -3719,7 +3784,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
90159 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
90160
90161 if (sysfs_slab_alias(s, name)) {
90162- s->refcount--;
90163+ atomic_dec(&s->refcount);
90164 s = NULL;
90165 }
90166 }
90167@@ -3781,7 +3846,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
90168 return NOTIFY_OK;
90169 }
90170
90171-static struct notifier_block __cpuinitdata slab_notifier = {
90172+static struct notifier_block slab_notifier = {
90173 .notifier_call = slab_cpuup_callback
90174 };
90175
90176@@ -3839,7 +3904,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
90177 }
90178 #endif
90179
90180-#ifdef CONFIG_SYSFS
90181+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90182 static int count_inuse(struct page *page)
90183 {
90184 return page->inuse;
90185@@ -4226,12 +4291,12 @@ static void resiliency_test(void)
90186 validate_slab_cache(kmalloc_caches[9]);
90187 }
90188 #else
90189-#ifdef CONFIG_SYSFS
90190+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90191 static void resiliency_test(void) {};
90192 #endif
90193 #endif
90194
90195-#ifdef CONFIG_SYSFS
90196+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90197 enum slab_stat_type {
90198 SL_ALL, /* All slabs */
90199 SL_PARTIAL, /* Only partially allocated slabs */
90200@@ -4475,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
90201
90202 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
90203 {
90204- return sprintf(buf, "%d\n", s->refcount - 1);
90205+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
90206 }
90207 SLAB_ATTR_RO(aliases);
90208
90209@@ -4563,6 +4628,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
90210 SLAB_ATTR_RO(cache_dma);
90211 #endif
90212
90213+#ifdef CONFIG_PAX_USERCOPY_SLABS
90214+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
90215+{
90216+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
90217+}
90218+SLAB_ATTR_RO(usercopy);
90219+#endif
90220+
90221 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
90222 {
90223 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
90224@@ -4897,6 +4970,9 @@ static struct attribute *slab_attrs[] = {
90225 #ifdef CONFIG_ZONE_DMA
90226 &cache_dma_attr.attr,
90227 #endif
90228+#ifdef CONFIG_PAX_USERCOPY_SLABS
90229+ &usercopy_attr.attr,
90230+#endif
90231 #ifdef CONFIG_NUMA
90232 &remote_node_defrag_ratio_attr.attr,
90233 #endif
90234@@ -5128,6 +5204,7 @@ static char *create_unique_id(struct kmem_cache *s)
90235 return name;
90236 }
90237
90238+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90239 static int sysfs_slab_add(struct kmem_cache *s)
90240 {
90241 int err;
90242@@ -5151,7 +5228,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
90243 }
90244
90245 s->kobj.kset = slab_kset;
90246- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
90247+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
90248 if (err) {
90249 kobject_put(&s->kobj);
90250 return err;
90251@@ -5185,6 +5262,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
90252 kobject_del(&s->kobj);
90253 kobject_put(&s->kobj);
90254 }
90255+#endif
90256
90257 /*
90258 * Need to buffer aliases during bootup until sysfs becomes
90259@@ -5198,6 +5276,7 @@ struct saved_alias {
90260
90261 static struct saved_alias *alias_list;
90262
90263+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
90264 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
90265 {
90266 struct saved_alias *al;
90267@@ -5220,6 +5299,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
90268 alias_list = al;
90269 return 0;
90270 }
90271+#endif
90272
90273 static int __init slab_sysfs_init(void)
90274 {
90275diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
90276index 27eeab3..7c3f7f2 100644
90277--- a/mm/sparse-vmemmap.c
90278+++ b/mm/sparse-vmemmap.c
90279@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
90280 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
90281 if (!p)
90282 return NULL;
90283- pud_populate(&init_mm, pud, p);
90284+ pud_populate_kernel(&init_mm, pud, p);
90285 }
90286 return pud;
90287 }
90288@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
90289 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
90290 if (!p)
90291 return NULL;
90292- pgd_populate(&init_mm, pgd, p);
90293+ pgd_populate_kernel(&init_mm, pgd, p);
90294 }
90295 return pgd;
90296 }
90297diff --git a/mm/sparse.c b/mm/sparse.c
90298index 1c91f0d3..485470a 100644
90299--- a/mm/sparse.c
90300+++ b/mm/sparse.c
90301@@ -761,7 +761,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
90302
90303 for (i = 0; i < PAGES_PER_SECTION; i++) {
90304 if (PageHWPoison(&memmap[i])) {
90305- atomic_long_sub(1, &num_poisoned_pages);
90306+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
90307 ClearPageHWPoison(&memmap[i]);
90308 }
90309 }
90310diff --git a/mm/swap.c b/mm/swap.c
90311index dfd7d71..ccdf688 100644
90312--- a/mm/swap.c
90313+++ b/mm/swap.c
90314@@ -31,6 +31,7 @@
90315 #include <linux/memcontrol.h>
90316 #include <linux/gfp.h>
90317 #include <linux/uio.h>
90318+#include <linux/hugetlb.h>
90319
90320 #include "internal.h"
90321
90322@@ -73,6 +74,8 @@ static void __put_compound_page(struct page *page)
90323
90324 __page_cache_release(page);
90325 dtor = get_compound_page_dtor(page);
90326+ if (!PageHuge(page))
90327+ BUG_ON(dtor != free_compound_page);
90328 (*dtor)(page);
90329 }
90330
90331diff --git a/mm/swapfile.c b/mm/swapfile.c
90332index 746af55b..7ac94ae 100644
90333--- a/mm/swapfile.c
90334+++ b/mm/swapfile.c
90335@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
90336
90337 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
90338 /* Activity counter to indicate that a swapon or swapoff has occurred */
90339-static atomic_t proc_poll_event = ATOMIC_INIT(0);
90340+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
90341
90342 static inline unsigned char swap_count(unsigned char ent)
90343 {
90344@@ -1684,7 +1684,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
90345 }
90346 filp_close(swap_file, NULL);
90347 err = 0;
90348- atomic_inc(&proc_poll_event);
90349+ atomic_inc_unchecked(&proc_poll_event);
90350 wake_up_interruptible(&proc_poll_wait);
90351
90352 out_dput:
90353@@ -1701,8 +1701,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
90354
90355 poll_wait(file, &proc_poll_wait, wait);
90356
90357- if (seq->poll_event != atomic_read(&proc_poll_event)) {
90358- seq->poll_event = atomic_read(&proc_poll_event);
90359+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
90360+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
90361 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
90362 }
90363
90364@@ -1800,7 +1800,7 @@ static int swaps_open(struct inode *inode, struct file *file)
90365 return ret;
90366
90367 seq = file->private_data;
90368- seq->poll_event = atomic_read(&proc_poll_event);
90369+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
90370 return 0;
90371 }
90372
90373@@ -2143,7 +2143,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
90374 (frontswap_map) ? "FS" : "");
90375
90376 mutex_unlock(&swapon_mutex);
90377- atomic_inc(&proc_poll_event);
90378+ atomic_inc_unchecked(&proc_poll_event);
90379 wake_up_interruptible(&proc_poll_wait);
90380
90381 if (S_ISREG(inode->i_mode))
90382diff --git a/mm/util.c b/mm/util.c
90383index ab1424d..7c5bd5a 100644
90384--- a/mm/util.c
90385+++ b/mm/util.c
90386@@ -294,6 +294,12 @@ done:
90387 void arch_pick_mmap_layout(struct mm_struct *mm)
90388 {
90389 mm->mmap_base = TASK_UNMAPPED_BASE;
90390+
90391+#ifdef CONFIG_PAX_RANDMMAP
90392+ if (mm->pax_flags & MF_PAX_RANDMMAP)
90393+ mm->mmap_base += mm->delta_mmap;
90394+#endif
90395+
90396 mm->get_unmapped_area = arch_get_unmapped_area;
90397 mm->unmap_area = arch_unmap_area;
90398 }
90399diff --git a/mm/vmalloc.c b/mm/vmalloc.c
90400index d365724..6cae7c2 100644
90401--- a/mm/vmalloc.c
90402+++ b/mm/vmalloc.c
90403@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
90404
90405 pte = pte_offset_kernel(pmd, addr);
90406 do {
90407- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
90408- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
90409+
90410+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90411+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
90412+ BUG_ON(!pte_exec(*pte));
90413+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
90414+ continue;
90415+ }
90416+#endif
90417+
90418+ {
90419+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
90420+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
90421+ }
90422 } while (pte++, addr += PAGE_SIZE, addr != end);
90423 }
90424
90425@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
90426 pte = pte_alloc_kernel(pmd, addr);
90427 if (!pte)
90428 return -ENOMEM;
90429+
90430+ pax_open_kernel();
90431 do {
90432 struct page *page = pages[*nr];
90433
90434- if (WARN_ON(!pte_none(*pte)))
90435+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90436+ if (pgprot_val(prot) & _PAGE_NX)
90437+#endif
90438+
90439+ if (!pte_none(*pte)) {
90440+ pax_close_kernel();
90441+ WARN_ON(1);
90442 return -EBUSY;
90443- if (WARN_ON(!page))
90444+ }
90445+ if (!page) {
90446+ pax_close_kernel();
90447+ WARN_ON(1);
90448 return -ENOMEM;
90449+ }
90450 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
90451 (*nr)++;
90452 } while (pte++, addr += PAGE_SIZE, addr != end);
90453+ pax_close_kernel();
90454 return 0;
90455 }
90456
90457@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
90458 pmd_t *pmd;
90459 unsigned long next;
90460
90461- pmd = pmd_alloc(&init_mm, pud, addr);
90462+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
90463 if (!pmd)
90464 return -ENOMEM;
90465 do {
90466@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
90467 pud_t *pud;
90468 unsigned long next;
90469
90470- pud = pud_alloc(&init_mm, pgd, addr);
90471+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
90472 if (!pud)
90473 return -ENOMEM;
90474 do {
90475@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
90476 if (addr >= MODULES_VADDR && addr < MODULES_END)
90477 return 1;
90478 #endif
90479+
90480+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90481+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
90482+ return 1;
90483+#endif
90484+
90485 return is_vmalloc_addr(x);
90486 }
90487
90488@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
90489
90490 if (!pgd_none(*pgd)) {
90491 pud_t *pud = pud_offset(pgd, addr);
90492+#ifdef CONFIG_X86
90493+ if (!pud_large(*pud))
90494+#endif
90495 if (!pud_none(*pud)) {
90496 pmd_t *pmd = pmd_offset(pud, addr);
90497+#ifdef CONFIG_X86
90498+ if (!pmd_large(*pmd))
90499+#endif
90500 if (!pmd_none(*pmd)) {
90501 pte_t *ptep, pte;
90502
90503@@ -339,7 +375,7 @@ static void purge_vmap_area_lazy(void);
90504 * Allocate a region of KVA of the specified size and alignment, within the
90505 * vstart and vend.
90506 */
90507-static struct vmap_area *alloc_vmap_area(unsigned long size,
90508+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
90509 unsigned long align,
90510 unsigned long vstart, unsigned long vend,
90511 int node, gfp_t gfp_mask)
90512@@ -1337,6 +1373,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
90513 struct vm_struct *area;
90514
90515 BUG_ON(in_interrupt());
90516+
90517+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90518+ if (flags & VM_KERNEXEC) {
90519+ if (start != VMALLOC_START || end != VMALLOC_END)
90520+ return NULL;
90521+ start = (unsigned long)MODULES_EXEC_VADDR;
90522+ end = (unsigned long)MODULES_EXEC_END;
90523+ }
90524+#endif
90525+
90526 if (flags & VM_IOREMAP) {
90527 int bit = fls(size);
90528
90529@@ -1581,6 +1627,11 @@ void *vmap(struct page **pages, unsigned int count,
90530 if (count > totalram_pages)
90531 return NULL;
90532
90533+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90534+ if (!(pgprot_val(prot) & _PAGE_NX))
90535+ flags |= VM_KERNEXEC;
90536+#endif
90537+
90538 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
90539 __builtin_return_address(0));
90540 if (!area)
90541@@ -1682,6 +1733,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
90542 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
90543 goto fail;
90544
90545+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
90546+ if (!(pgprot_val(prot) & _PAGE_NX))
90547+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
90548+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
90549+ else
90550+#endif
90551+
90552 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
90553 start, end, node, gfp_mask, caller);
90554 if (!area)
90555@@ -1858,10 +1916,9 @@ EXPORT_SYMBOL(vzalloc_node);
90556 * For tight control over page level allocator and protection flags
90557 * use __vmalloc() instead.
90558 */
90559-
90560 void *vmalloc_exec(unsigned long size)
90561 {
90562- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
90563+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
90564 NUMA_NO_NODE, __builtin_return_address(0));
90565 }
90566
90567@@ -2168,6 +2225,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
90568 unsigned long uaddr = vma->vm_start;
90569 unsigned long usize = vma->vm_end - vma->vm_start;
90570
90571+ BUG_ON(vma->vm_mirror);
90572+
90573 if ((PAGE_SIZE-1) & (unsigned long)addr)
90574 return -EINVAL;
90575
90576@@ -2629,7 +2688,11 @@ static int s_show(struct seq_file *m, void *p)
90577 v->addr, v->addr + v->size, v->size);
90578
90579 if (v->caller)
90580+#ifdef CONFIG_GRKERNSEC_HIDESYM
90581+ seq_printf(m, " %pK", v->caller);
90582+#else
90583 seq_printf(m, " %pS", v->caller);
90584+#endif
90585
90586 if (v->nr_pages)
90587 seq_printf(m, " pages=%d", v->nr_pages);
90588diff --git a/mm/vmstat.c b/mm/vmstat.c
90589index f42745e..62f8346 100644
90590--- a/mm/vmstat.c
90591+++ b/mm/vmstat.c
90592@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
90593 *
90594 * vm_stat contains the global counters
90595 */
90596-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90597+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90598 EXPORT_SYMBOL(vm_stat);
90599
90600 #ifdef CONFIG_SMP
90601@@ -452,7 +452,7 @@ void refresh_cpu_vm_stats(int cpu)
90602 v = p->vm_stat_diff[i];
90603 p->vm_stat_diff[i] = 0;
90604 local_irq_restore(flags);
90605- atomic_long_add(v, &zone->vm_stat[i]);
90606+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
90607 global_diff[i] += v;
90608 #ifdef CONFIG_NUMA
90609 /* 3 seconds idle till flush */
90610@@ -490,7 +490,7 @@ void refresh_cpu_vm_stats(int cpu)
90611
90612 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
90613 if (global_diff[i])
90614- atomic_long_add(global_diff[i], &vm_stat[i]);
90615+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
90616 }
90617
90618 /*
90619@@ -505,8 +505,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
90620 if (pset->vm_stat_diff[i]) {
90621 int v = pset->vm_stat_diff[i];
90622 pset->vm_stat_diff[i] = 0;
90623- atomic_long_add(v, &zone->vm_stat[i]);
90624- atomic_long_add(v, &vm_stat[i]);
90625+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
90626+ atomic_long_add_unchecked(v, &vm_stat[i]);
90627 }
90628 }
90629 #endif
90630@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
90631 return NOTIFY_OK;
90632 }
90633
90634-static struct notifier_block __cpuinitdata vmstat_notifier =
90635+static struct notifier_block vmstat_notifier =
90636 { &vmstat_cpuup_callback, NULL, 0 };
90637 #endif
90638
90639@@ -1241,10 +1241,20 @@ static int __init setup_vmstat(void)
90640 start_cpu_timer(cpu);
90641 #endif
90642 #ifdef CONFIG_PROC_FS
90643- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
90644- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
90645- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
90646- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
90647+ {
90648+ mode_t gr_mode = S_IRUGO;
90649+#ifdef CONFIG_GRKERNSEC_PROC_ADD
90650+ gr_mode = S_IRUSR;
90651+#endif
90652+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
90653+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
90654+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
90655+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
90656+#else
90657+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
90658+#endif
90659+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
90660+ }
90661 #endif
90662 return 0;
90663 }
90664diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
90665index 9424f37..6aabf19 100644
90666--- a/net/8021q/vlan.c
90667+++ b/net/8021q/vlan.c
90668@@ -469,7 +469,7 @@ out:
90669 return NOTIFY_DONE;
90670 }
90671
90672-static struct notifier_block vlan_notifier_block __read_mostly = {
90673+static struct notifier_block vlan_notifier_block = {
90674 .notifier_call = vlan_device_event,
90675 };
90676
90677@@ -544,8 +544,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
90678 err = -EPERM;
90679 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
90680 break;
90681- if ((args.u.name_type >= 0) &&
90682- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
90683+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
90684 struct vlan_net *vn;
90685
90686 vn = net_generic(net, vlan_net_id);
90687diff --git a/net/9p/mod.c b/net/9p/mod.c
90688index 6ab36ae..6f1841b 100644
90689--- a/net/9p/mod.c
90690+++ b/net/9p/mod.c
90691@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
90692 void v9fs_register_trans(struct p9_trans_module *m)
90693 {
90694 spin_lock(&v9fs_trans_lock);
90695- list_add_tail(&m->list, &v9fs_trans_list);
90696+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
90697 spin_unlock(&v9fs_trans_lock);
90698 }
90699 EXPORT_SYMBOL(v9fs_register_trans);
90700@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
90701 void v9fs_unregister_trans(struct p9_trans_module *m)
90702 {
90703 spin_lock(&v9fs_trans_lock);
90704- list_del_init(&m->list);
90705+ pax_list_del_init((struct list_head *)&m->list);
90706 spin_unlock(&v9fs_trans_lock);
90707 }
90708 EXPORT_SYMBOL(v9fs_unregister_trans);
90709diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
90710index 02efb25..41541a9 100644
90711--- a/net/9p/trans_fd.c
90712+++ b/net/9p/trans_fd.c
90713@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
90714 oldfs = get_fs();
90715 set_fs(get_ds());
90716 /* The cast to a user pointer is valid due to the set_fs() */
90717- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
90718+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
90719 set_fs(oldfs);
90720
90721 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
90722diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
90723index 876fbe8..8bbea9f 100644
90724--- a/net/atm/atm_misc.c
90725+++ b/net/atm/atm_misc.c
90726@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
90727 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
90728 return 1;
90729 atm_return(vcc, truesize);
90730- atomic_inc(&vcc->stats->rx_drop);
90731+ atomic_inc_unchecked(&vcc->stats->rx_drop);
90732 return 0;
90733 }
90734 EXPORT_SYMBOL(atm_charge);
90735@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
90736 }
90737 }
90738 atm_return(vcc, guess);
90739- atomic_inc(&vcc->stats->rx_drop);
90740+ atomic_inc_unchecked(&vcc->stats->rx_drop);
90741 return NULL;
90742 }
90743 EXPORT_SYMBOL(atm_alloc_charge);
90744@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
90745
90746 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
90747 {
90748-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
90749+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
90750 __SONET_ITEMS
90751 #undef __HANDLE_ITEM
90752 }
90753@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
90754
90755 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
90756 {
90757-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
90758+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
90759 __SONET_ITEMS
90760 #undef __HANDLE_ITEM
90761 }
90762diff --git a/net/atm/lec.h b/net/atm/lec.h
90763index 4149db1..f2ab682 100644
90764--- a/net/atm/lec.h
90765+++ b/net/atm/lec.h
90766@@ -48,7 +48,7 @@ struct lane2_ops {
90767 const u8 *tlvs, u32 sizeoftlvs);
90768 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
90769 const u8 *tlvs, u32 sizeoftlvs);
90770-};
90771+} __no_const;
90772
90773 /*
90774 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
90775diff --git a/net/atm/proc.c b/net/atm/proc.c
90776index bbb6461..cf04016 100644
90777--- a/net/atm/proc.c
90778+++ b/net/atm/proc.c
90779@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
90780 const struct k_atm_aal_stats *stats)
90781 {
90782 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
90783- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
90784- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
90785- atomic_read(&stats->rx_drop));
90786+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
90787+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
90788+ atomic_read_unchecked(&stats->rx_drop));
90789 }
90790
90791 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
90792diff --git a/net/atm/resources.c b/net/atm/resources.c
90793index 0447d5d..3cf4728 100644
90794--- a/net/atm/resources.c
90795+++ b/net/atm/resources.c
90796@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
90797 static void copy_aal_stats(struct k_atm_aal_stats *from,
90798 struct atm_aal_stats *to)
90799 {
90800-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
90801+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
90802 __AAL_STAT_ITEMS
90803 #undef __HANDLE_ITEM
90804 }
90805@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
90806 static void subtract_aal_stats(struct k_atm_aal_stats *from,
90807 struct atm_aal_stats *to)
90808 {
90809-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
90810+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
90811 __AAL_STAT_ITEMS
90812 #undef __HANDLE_ITEM
90813 }
90814diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
90815index d5744b7..506bae3 100644
90816--- a/net/ax25/sysctl_net_ax25.c
90817+++ b/net/ax25/sysctl_net_ax25.c
90818@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
90819 {
90820 char path[sizeof("net/ax25/") + IFNAMSIZ];
90821 int k;
90822- struct ctl_table *table;
90823+ ctl_table_no_const *table;
90824
90825 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
90826 if (!table)
90827diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
90828index f680ee1..97e3542 100644
90829--- a/net/batman-adv/bat_iv_ogm.c
90830+++ b/net/batman-adv/bat_iv_ogm.c
90831@@ -79,7 +79,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
90832
90833 /* randomize initial seqno to avoid collision */
90834 get_random_bytes(&random_seqno, sizeof(random_seqno));
90835- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
90836+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
90837
90838 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
90839 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
90840@@ -627,9 +627,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
90841 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
90842
90843 /* change sequence number to network order */
90844- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
90845+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
90846 batadv_ogm_packet->seqno = htonl(seqno);
90847- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
90848+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
90849
90850 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
90851 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
90852@@ -1037,7 +1037,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
90853 return;
90854
90855 /* could be changed by schedule_own_packet() */
90856- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
90857+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
90858
90859 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
90860 has_directlink_flag = 1;
90861diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
90862index de27b31..7058bfe 100644
90863--- a/net/batman-adv/bridge_loop_avoidance.c
90864+++ b/net/batman-adv/bridge_loop_avoidance.c
90865@@ -1522,6 +1522,8 @@ out:
90866 * in these cases, the skb is further handled by this function and
90867 * returns 1, otherwise it returns 0 and the caller shall further
90868 * process the skb.
90869+ *
90870+ * This call might reallocate skb data.
90871 */
90872 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
90873 {
90874diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
90875index f105219..7614af3 100644
90876--- a/net/batman-adv/gateway_client.c
90877+++ b/net/batman-adv/gateway_client.c
90878@@ -508,6 +508,7 @@ out:
90879 return 0;
90880 }
90881
90882+/* this call might reallocate skb data */
90883 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
90884 {
90885 int ret = false;
90886@@ -568,6 +569,7 @@ out:
90887 return ret;
90888 }
90889
90890+/* this call might reallocate skb data */
90891 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90892 {
90893 struct ethhdr *ethhdr;
90894@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90895
90896 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
90897 return false;
90898+
90899+ /* skb->data might have been reallocated by pskb_may_pull() */
90900+ ethhdr = (struct ethhdr *)skb->data;
90901+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
90902+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
90903+
90904 udphdr = (struct udphdr *)(skb->data + *header_len);
90905 *header_len += sizeof(*udphdr);
90906
90907@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
90908 return true;
90909 }
90910
90911+/* this call might reallocate skb data */
90912 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
90913- struct sk_buff *skb, struct ethhdr *ethhdr)
90914+ struct sk_buff *skb)
90915 {
90916 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
90917 struct batadv_orig_node *orig_dst_node = NULL;
90918 struct batadv_gw_node *curr_gw = NULL;
90919+ struct ethhdr *ethhdr;
90920 bool ret, out_of_range = false;
90921 unsigned int header_len = 0;
90922 uint8_t curr_tq_avg;
90923@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
90924 if (!ret)
90925 goto out;
90926
90927+ ethhdr = (struct ethhdr *)skb->data;
90928 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
90929 ethhdr->h_dest);
90930 if (!orig_dst_node)
90931diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
90932index 039902d..1037d75 100644
90933--- a/net/batman-adv/gateway_client.h
90934+++ b/net/batman-adv/gateway_client.h
90935@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
90936 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
90937 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
90938 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
90939-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
90940- struct sk_buff *skb, struct ethhdr *ethhdr);
90941+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
90942
90943 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
90944diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
90945index 522243a..b48c0ef 100644
90946--- a/net/batman-adv/hard-interface.c
90947+++ b/net/batman-adv/hard-interface.c
90948@@ -401,7 +401,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
90949 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
90950 dev_add_pack(&hard_iface->batman_adv_ptype);
90951
90952- atomic_set(&hard_iface->frag_seqno, 1);
90953+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
90954 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
90955 hard_iface->net_dev->name);
90956
90957@@ -550,7 +550,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
90958 /* This can't be called via a bat_priv callback because
90959 * we have no bat_priv yet.
90960 */
90961- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
90962+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
90963 hard_iface->bat_iv.ogm_buff = NULL;
90964
90965 return hard_iface;
90966diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
90967index 819dfb0..226bacd 100644
90968--- a/net/batman-adv/soft-interface.c
90969+++ b/net/batman-adv/soft-interface.c
90970@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
90971 if (batadv_bla_tx(bat_priv, skb, vid))
90972 goto dropped;
90973
90974+ /* skb->data might have been reallocated by batadv_bla_tx() */
90975+ ethhdr = (struct ethhdr *)skb->data;
90976+
90977 /* Register the client MAC in the transtable */
90978 if (!is_multicast_ether_addr(ethhdr->h_source))
90979 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
90980@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
90981 default:
90982 break;
90983 }
90984+
90985+ /* reminder: ethhdr might have become unusable from here on
90986+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
90987+ */
90988 }
90989
90990 /* ethernet packet should be broadcasted */
90991@@ -253,7 +260,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
90992 primary_if->net_dev->dev_addr, ETH_ALEN);
90993
90994 /* set broadcast sequence number */
90995- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
90996+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
90997 bcast_packet->seqno = htonl(seqno);
90998
90999 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
91000@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
91001 /* unicast packet */
91002 } else {
91003 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
91004- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
91005+ ret = batadv_gw_out_of_range(bat_priv, skb);
91006 if (ret)
91007 goto dropped;
91008 }
91009@@ -472,7 +479,7 @@ static int batadv_softif_init_late(struct net_device *dev)
91010 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
91011
91012 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
91013- atomic_set(&bat_priv->bcast_seqno, 1);
91014+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
91015 atomic_set(&bat_priv->tt.vn, 0);
91016 atomic_set(&bat_priv->tt.local_changes, 0);
91017 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
91018diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
91019index aba8364..50fcbb8 100644
91020--- a/net/batman-adv/types.h
91021+++ b/net/batman-adv/types.h
91022@@ -51,7 +51,7 @@
91023 struct batadv_hard_iface_bat_iv {
91024 unsigned char *ogm_buff;
91025 int ogm_buff_len;
91026- atomic_t ogm_seqno;
91027+ atomic_unchecked_t ogm_seqno;
91028 };
91029
91030 /**
91031@@ -75,7 +75,7 @@ struct batadv_hard_iface {
91032 int16_t if_num;
91033 char if_status;
91034 struct net_device *net_dev;
91035- atomic_t frag_seqno;
91036+ atomic_unchecked_t frag_seqno;
91037 struct kobject *hardif_obj;
91038 atomic_t refcount;
91039 struct packet_type batman_adv_ptype;
91040@@ -558,7 +558,7 @@ struct batadv_priv {
91041 #ifdef CONFIG_BATMAN_ADV_DEBUG
91042 atomic_t log_level;
91043 #endif
91044- atomic_t bcast_seqno;
91045+ atomic_unchecked_t bcast_seqno;
91046 atomic_t bcast_queue_left;
91047 atomic_t batman_queue_left;
91048 char num_ifaces;
91049diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
91050index 0bb3b59..0e3052e 100644
91051--- a/net/batman-adv/unicast.c
91052+++ b/net/batman-adv/unicast.c
91053@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
91054 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
91055 frag2->flags = large_tail;
91056
91057- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
91058+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
91059 frag1->seqno = htons(seqno - 1);
91060 frag2->seqno = htons(seqno);
91061
91062@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
91063 * @skb: the skb containing the payload to encapsulate
91064 * @orig_node: the destination node
91065 *
91066- * Returns false if the payload could not be encapsulated or true otherwise
91067+ * Returns false if the payload could not be encapsulated or true otherwise.
91068+ *
91069+ * This call might reallocate skb data.
91070 */
91071 static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
91072 struct batadv_orig_node *orig_node)
91073@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
91074 * @orig_node: the destination node
91075 * @packet_subtype: the batman 4addr packet subtype to use
91076 *
91077- * Returns false if the payload could not be encapsulated or true otherwise
91078+ * Returns false if the payload could not be encapsulated or true otherwise.
91079+ *
91080+ * This call might reallocate skb data.
91081 */
91082 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
91083 struct sk_buff *skb,
91084@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
91085 struct batadv_neigh_node *neigh_node;
91086 int data_len = skb->len;
91087 int ret = NET_RX_DROP;
91088- unsigned int dev_mtu;
91089+ unsigned int dev_mtu, header_len;
91090
91091 /* get routing information */
91092 if (is_multicast_ether_addr(ethhdr->h_dest)) {
91093@@ -429,10 +433,12 @@ find_router:
91094 switch (packet_type) {
91095 case BATADV_UNICAST:
91096 batadv_unicast_prepare_skb(skb, orig_node);
91097+ header_len = sizeof(struct batadv_unicast_packet);
91098 break;
91099 case BATADV_UNICAST_4ADDR:
91100 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
91101 packet_subtype);
91102+ header_len = sizeof(struct batadv_unicast_4addr_packet);
91103 break;
91104 default:
91105 /* this function supports UNICAST and UNICAST_4ADDR only. It
91106@@ -441,6 +447,7 @@ find_router:
91107 goto out;
91108 }
91109
91110+ ethhdr = (struct ethhdr *)(skb->data + header_len);
91111 unicast_packet = (struct batadv_unicast_packet *)skb->data;
91112
91113 /* inform the destination node that we are still missing a correct route
91114diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
91115index ace5e55..a65a1c0 100644
91116--- a/net/bluetooth/hci_core.c
91117+++ b/net/bluetooth/hci_core.c
91118@@ -2211,16 +2211,16 @@ int hci_register_dev(struct hci_dev *hdev)
91119 list_add(&hdev->list, &hci_dev_list);
91120 write_unlock(&hci_dev_list_lock);
91121
91122- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
91123- WQ_MEM_RECLAIM, 1);
91124+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
91125+ WQ_MEM_RECLAIM, 1, hdev->name);
91126 if (!hdev->workqueue) {
91127 error = -ENOMEM;
91128 goto err;
91129 }
91130
91131- hdev->req_workqueue = alloc_workqueue(hdev->name,
91132+ hdev->req_workqueue = alloc_workqueue("%s",
91133 WQ_HIGHPRI | WQ_UNBOUND |
91134- WQ_MEM_RECLAIM, 1);
91135+ WQ_MEM_RECLAIM, 1, hdev->name);
91136 if (!hdev->req_workqueue) {
91137 destroy_workqueue(hdev->workqueue);
91138 error = -ENOMEM;
91139diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
91140index 9bd7d95..6c4884f 100644
91141--- a/net/bluetooth/hci_sock.c
91142+++ b/net/bluetooth/hci_sock.c
91143@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
91144 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
91145 }
91146
91147- len = min_t(unsigned int, len, sizeof(uf));
91148+ len = min((size_t)len, sizeof(uf));
91149 if (copy_from_user(&uf, optval, len)) {
91150 err = -EFAULT;
91151 break;
91152diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
91153index 68843a2..30e9342 100644
91154--- a/net/bluetooth/l2cap_core.c
91155+++ b/net/bluetooth/l2cap_core.c
91156@@ -3507,8 +3507,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
91157 break;
91158
91159 case L2CAP_CONF_RFC:
91160- if (olen == sizeof(rfc))
91161- memcpy(&rfc, (void *)val, olen);
91162+ if (olen != sizeof(rfc))
91163+ break;
91164+
91165+ memcpy(&rfc, (void *)val, olen);
91166
91167 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
91168 rfc.mode != chan->mode)
91169diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
91170index 36fed40..be2eeb2 100644
91171--- a/net/bluetooth/l2cap_sock.c
91172+++ b/net/bluetooth/l2cap_sock.c
91173@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
91174 struct sock *sk = sock->sk;
91175 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
91176 struct l2cap_options opts;
91177- int len, err = 0;
91178+ int err = 0;
91179+ size_t len = optlen;
91180 u32 opt;
91181
91182 BT_DBG("sk %p", sk);
91183@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
91184 opts.max_tx = chan->max_tx;
91185 opts.txwin_size = chan->tx_win;
91186
91187- len = min_t(unsigned int, sizeof(opts), optlen);
91188+ len = min(sizeof(opts), len);
91189 if (copy_from_user((char *) &opts, optval, len)) {
91190 err = -EFAULT;
91191 break;
91192@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91193 struct bt_security sec;
91194 struct bt_power pwr;
91195 struct l2cap_conn *conn;
91196- int len, err = 0;
91197+ int err = 0;
91198+ size_t len = optlen;
91199 u32 opt;
91200
91201 BT_DBG("sk %p", sk);
91202@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91203
91204 sec.level = BT_SECURITY_LOW;
91205
91206- len = min_t(unsigned int, sizeof(sec), optlen);
91207+ len = min(sizeof(sec), len);
91208 if (copy_from_user((char *) &sec, optval, len)) {
91209 err = -EFAULT;
91210 break;
91211@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
91212
91213 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
91214
91215- len = min_t(unsigned int, sizeof(pwr), optlen);
91216+ len = min(sizeof(pwr), len);
91217 if (copy_from_user((char *) &pwr, optval, len)) {
91218 err = -EFAULT;
91219 break;
91220diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
91221index 30b3721..c1bd0a0 100644
91222--- a/net/bluetooth/rfcomm/sock.c
91223+++ b/net/bluetooth/rfcomm/sock.c
91224@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
91225 struct sock *sk = sock->sk;
91226 struct bt_security sec;
91227 int err = 0;
91228- size_t len;
91229+ size_t len = optlen;
91230 u32 opt;
91231
91232 BT_DBG("sk %p", sk);
91233@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
91234
91235 sec.level = BT_SECURITY_LOW;
91236
91237- len = min_t(unsigned int, sizeof(sec), optlen);
91238+ len = min(sizeof(sec), len);
91239 if (copy_from_user((char *) &sec, optval, len)) {
91240 err = -EFAULT;
91241 break;
91242diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
91243index b6e44ad..5b0d514 100644
91244--- a/net/bluetooth/rfcomm/tty.c
91245+++ b/net/bluetooth/rfcomm/tty.c
91246@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
91247 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
91248
91249 spin_lock_irqsave(&dev->port.lock, flags);
91250- if (dev->port.count > 0) {
91251+ if (atomic_read(&dev->port.count) > 0) {
91252 spin_unlock_irqrestore(&dev->port.lock, flags);
91253 return;
91254 }
91255@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
91256 return -ENODEV;
91257
91258 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
91259- dev->channel, dev->port.count);
91260+ dev->channel, atomic_read(&dev->port.count));
91261
91262 spin_lock_irqsave(&dev->port.lock, flags);
91263- if (++dev->port.count > 1) {
91264+ if (atomic_inc_return(&dev->port.count) > 1) {
91265 spin_unlock_irqrestore(&dev->port.lock, flags);
91266 return 0;
91267 }
91268@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
91269 return;
91270
91271 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
91272- dev->port.count);
91273+ atomic_read(&dev->port.count));
91274
91275 spin_lock_irqsave(&dev->port.lock, flags);
91276- if (!--dev->port.count) {
91277+ if (!atomic_dec_return(&dev->port.count)) {
91278 spin_unlock_irqrestore(&dev->port.lock, flags);
91279 if (dev->tty_dev->parent)
91280 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
91281diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
91282index 3d110c4..4e1b2eb 100644
91283--- a/net/bridge/netfilter/ebtables.c
91284+++ b/net/bridge/netfilter/ebtables.c
91285@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
91286 tmp.valid_hooks = t->table->valid_hooks;
91287 }
91288 mutex_unlock(&ebt_mutex);
91289- if (copy_to_user(user, &tmp, *len) != 0){
91290+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
91291 BUGPRINT("c2u Didn't work\n");
91292 ret = -EFAULT;
91293 break;
91294@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
91295 goto out;
91296 tmp.valid_hooks = t->valid_hooks;
91297
91298- if (copy_to_user(user, &tmp, *len) != 0) {
91299+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
91300 ret = -EFAULT;
91301 break;
91302 }
91303@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
91304 tmp.entries_size = t->table->entries_size;
91305 tmp.valid_hooks = t->table->valid_hooks;
91306
91307- if (copy_to_user(user, &tmp, *len) != 0) {
91308+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
91309 ret = -EFAULT;
91310 break;
91311 }
91312diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
91313index 2bd4b58..0dc30a1 100644
91314--- a/net/caif/cfctrl.c
91315+++ b/net/caif/cfctrl.c
91316@@ -10,6 +10,7 @@
91317 #include <linux/spinlock.h>
91318 #include <linux/slab.h>
91319 #include <linux/pkt_sched.h>
91320+#include <linux/sched.h>
91321 #include <net/caif/caif_layer.h>
91322 #include <net/caif/cfpkt.h>
91323 #include <net/caif/cfctrl.h>
91324@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
91325 memset(&dev_info, 0, sizeof(dev_info));
91326 dev_info.id = 0xff;
91327 cfsrvl_init(&this->serv, 0, &dev_info, false);
91328- atomic_set(&this->req_seq_no, 1);
91329- atomic_set(&this->rsp_seq_no, 1);
91330+ atomic_set_unchecked(&this->req_seq_no, 1);
91331+ atomic_set_unchecked(&this->rsp_seq_no, 1);
91332 this->serv.layer.receive = cfctrl_recv;
91333 sprintf(this->serv.layer.name, "ctrl");
91334 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
91335@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
91336 struct cfctrl_request_info *req)
91337 {
91338 spin_lock_bh(&ctrl->info_list_lock);
91339- atomic_inc(&ctrl->req_seq_no);
91340- req->sequence_no = atomic_read(&ctrl->req_seq_no);
91341+ atomic_inc_unchecked(&ctrl->req_seq_no);
91342+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
91343 list_add_tail(&req->list, &ctrl->list);
91344 spin_unlock_bh(&ctrl->info_list_lock);
91345 }
91346@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
91347 if (p != first)
91348 pr_warn("Requests are not received in order\n");
91349
91350- atomic_set(&ctrl->rsp_seq_no,
91351+ atomic_set_unchecked(&ctrl->rsp_seq_no,
91352 p->sequence_no);
91353 list_del(&p->list);
91354 goto out;
91355diff --git a/net/can/af_can.c b/net/can/af_can.c
91356index c4e5085..aa9efdf 100644
91357--- a/net/can/af_can.c
91358+++ b/net/can/af_can.c
91359@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
91360 };
91361
91362 /* notifier block for netdevice event */
91363-static struct notifier_block can_netdev_notifier __read_mostly = {
91364+static struct notifier_block can_netdev_notifier = {
91365 .notifier_call = can_notifier,
91366 };
91367
91368diff --git a/net/can/gw.c b/net/can/gw.c
91369index 3ee690e..00d581b 100644
91370--- a/net/can/gw.c
91371+++ b/net/can/gw.c
91372@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
91373 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
91374
91375 static HLIST_HEAD(cgw_list);
91376-static struct notifier_block notifier;
91377
91378 static struct kmem_cache *cgw_cache __read_mostly;
91379
91380@@ -927,6 +926,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
91381 return err;
91382 }
91383
91384+static struct notifier_block notifier = {
91385+ .notifier_call = cgw_notifier
91386+};
91387+
91388 static __init int cgw_module_init(void)
91389 {
91390 /* sanitize given module parameter */
91391@@ -942,7 +945,6 @@ static __init int cgw_module_init(void)
91392 return -ENOMEM;
91393
91394 /* set notifier */
91395- notifier.notifier_call = cgw_notifier;
91396 register_netdevice_notifier(&notifier);
91397
91398 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
91399diff --git a/net/compat.c b/net/compat.c
91400index f0a1ba6..0541331 100644
91401--- a/net/compat.c
91402+++ b/net/compat.c
91403@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
91404 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
91405 __get_user(kmsg->msg_flags, &umsg->msg_flags))
91406 return -EFAULT;
91407- kmsg->msg_name = compat_ptr(tmp1);
91408- kmsg->msg_iov = compat_ptr(tmp2);
91409- kmsg->msg_control = compat_ptr(tmp3);
91410+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
91411+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
91412+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
91413 return 0;
91414 }
91415
91416@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91417
91418 if (kern_msg->msg_namelen) {
91419 if (mode == VERIFY_READ) {
91420- int err = move_addr_to_kernel(kern_msg->msg_name,
91421+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
91422 kern_msg->msg_namelen,
91423 kern_address);
91424 if (err < 0)
91425@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91426 kern_msg->msg_name = NULL;
91427
91428 tot_len = iov_from_user_compat_to_kern(kern_iov,
91429- (struct compat_iovec __user *)kern_msg->msg_iov,
91430+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
91431 kern_msg->msg_iovlen);
91432 if (tot_len >= 0)
91433 kern_msg->msg_iov = kern_iov;
91434@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
91435
91436 #define CMSG_COMPAT_FIRSTHDR(msg) \
91437 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
91438- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
91439+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
91440 (struct compat_cmsghdr __user *)NULL)
91441
91442 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
91443 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
91444 (ucmlen) <= (unsigned long) \
91445 ((mhdr)->msg_controllen - \
91446- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
91447+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
91448
91449 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
91450 struct compat_cmsghdr __user *cmsg, int cmsg_len)
91451 {
91452 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
91453- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
91454+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
91455 msg->msg_controllen)
91456 return NULL;
91457 return (struct compat_cmsghdr __user *)ptr;
91458@@ -219,7 +219,7 @@ Efault:
91459
91460 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
91461 {
91462- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
91463+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
91464 struct compat_cmsghdr cmhdr;
91465 struct compat_timeval ctv;
91466 struct compat_timespec cts[3];
91467@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
91468
91469 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
91470 {
91471- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
91472+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
91473 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
91474 int fdnum = scm->fp->count;
91475 struct file **fp = scm->fp->fp;
91476@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
91477 return -EFAULT;
91478 old_fs = get_fs();
91479 set_fs(KERNEL_DS);
91480- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
91481+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
91482 set_fs(old_fs);
91483
91484 return err;
91485@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
91486 len = sizeof(ktime);
91487 old_fs = get_fs();
91488 set_fs(KERNEL_DS);
91489- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
91490+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
91491 set_fs(old_fs);
91492
91493 if (!err) {
91494@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91495 case MCAST_JOIN_GROUP:
91496 case MCAST_LEAVE_GROUP:
91497 {
91498- struct compat_group_req __user *gr32 = (void *)optval;
91499+ struct compat_group_req __user *gr32 = (void __user *)optval;
91500 struct group_req __user *kgr =
91501 compat_alloc_user_space(sizeof(struct group_req));
91502 u32 interface;
91503@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91504 case MCAST_BLOCK_SOURCE:
91505 case MCAST_UNBLOCK_SOURCE:
91506 {
91507- struct compat_group_source_req __user *gsr32 = (void *)optval;
91508+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
91509 struct group_source_req __user *kgsr = compat_alloc_user_space(
91510 sizeof(struct group_source_req));
91511 u32 interface;
91512@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
91513 }
91514 case MCAST_MSFILTER:
91515 {
91516- struct compat_group_filter __user *gf32 = (void *)optval;
91517+ struct compat_group_filter __user *gf32 = (void __user *)optval;
91518 struct group_filter __user *kgf;
91519 u32 interface, fmode, numsrc;
91520
91521@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
91522 char __user *optval, int __user *optlen,
91523 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
91524 {
91525- struct compat_group_filter __user *gf32 = (void *)optval;
91526+ struct compat_group_filter __user *gf32 = (void __user *)optval;
91527 struct group_filter __user *kgf;
91528 int __user *koptlen;
91529 u32 interface, fmode, numsrc;
91530@@ -805,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
91531
91532 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
91533 return -EINVAL;
91534- if (copy_from_user(a, args, nas[call]))
91535+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
91536 return -EFAULT;
91537 a0 = a[0];
91538 a1 = a[1];
91539diff --git a/net/core/datagram.c b/net/core/datagram.c
91540index b71423d..0360434 100644
91541--- a/net/core/datagram.c
91542+++ b/net/core/datagram.c
91543@@ -295,7 +295,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
91544 }
91545
91546 kfree_skb(skb);
91547- atomic_inc(&sk->sk_drops);
91548+ atomic_inc_unchecked(&sk->sk_drops);
91549 sk_mem_reclaim_partial(sk);
91550
91551 return err;
91552diff --git a/net/core/dev.c b/net/core/dev.c
91553index 7ddbb31..3902452 100644
91554--- a/net/core/dev.c
91555+++ b/net/core/dev.c
91556@@ -1649,7 +1649,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
91557 {
91558 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
91559 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
91560- atomic_long_inc(&dev->rx_dropped);
91561+ atomic_long_inc_unchecked(&dev->rx_dropped);
91562 kfree_skb(skb);
91563 return NET_RX_DROP;
91564 }
91565@@ -1658,7 +1658,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
91566 skb_orphan(skb);
91567
91568 if (unlikely(!is_skb_forwardable(dev, skb))) {
91569- atomic_long_inc(&dev->rx_dropped);
91570+ atomic_long_inc_unchecked(&dev->rx_dropped);
91571 kfree_skb(skb);
91572 return NET_RX_DROP;
91573 }
91574@@ -2404,7 +2404,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
91575
91576 struct dev_gso_cb {
91577 void (*destructor)(struct sk_buff *skb);
91578-};
91579+} __no_const;
91580
91581 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
91582
91583@@ -3139,7 +3139,7 @@ enqueue:
91584
91585 local_irq_restore(flags);
91586
91587- atomic_long_inc(&skb->dev->rx_dropped);
91588+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
91589 kfree_skb(skb);
91590 return NET_RX_DROP;
91591 }
91592@@ -3211,7 +3211,7 @@ int netif_rx_ni(struct sk_buff *skb)
91593 }
91594 EXPORT_SYMBOL(netif_rx_ni);
91595
91596-static void net_tx_action(struct softirq_action *h)
91597+static void net_tx_action(void)
91598 {
91599 struct softnet_data *sd = &__get_cpu_var(softnet_data);
91600
91601@@ -3545,7 +3545,7 @@ ncls:
91602 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
91603 } else {
91604 drop:
91605- atomic_long_inc(&skb->dev->rx_dropped);
91606+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
91607 kfree_skb(skb);
91608 /* Jamal, now you will not able to escape explaining
91609 * me how you were going to use this. :-)
91610@@ -4153,7 +4153,7 @@ void netif_napi_del(struct napi_struct *napi)
91611 }
91612 EXPORT_SYMBOL(netif_napi_del);
91613
91614-static void net_rx_action(struct softirq_action *h)
91615+static void net_rx_action(void)
91616 {
91617 struct softnet_data *sd = &__get_cpu_var(softnet_data);
91618 unsigned long time_limit = jiffies + 2;
91619@@ -5590,7 +5590,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
91620 } else {
91621 netdev_stats_to_stats64(storage, &dev->stats);
91622 }
91623- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
91624+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
91625 return storage;
91626 }
91627 EXPORT_SYMBOL(dev_get_stats);
91628diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
91629index 5b7d0e1..cb960fc 100644
91630--- a/net/core/dev_ioctl.c
91631+++ b/net/core/dev_ioctl.c
91632@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
91633 if (no_module && capable(CAP_NET_ADMIN))
91634 no_module = request_module("netdev-%s", name);
91635 if (no_module && capable(CAP_SYS_MODULE)) {
91636+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91637+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
91638+#else
91639 if (!request_module("%s", name))
91640 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
91641 name);
91642+#endif
91643 }
91644 }
91645 EXPORT_SYMBOL(dev_load);
91646diff --git a/net/core/ethtool.c b/net/core/ethtool.c
91647index ce91766..3b71cdb 100644
91648--- a/net/core/ethtool.c
91649+++ b/net/core/ethtool.c
91650@@ -1319,10 +1319,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
91651 if (ret)
91652 return ret;
91653
91654- len = (tmp.len > dump.len) ? dump.len : tmp.len;
91655+ len = min(tmp.len, dump.len);
91656 if (!len)
91657 return -EFAULT;
91658
91659+ /* Don't ever let the driver think there's more space available
91660+ * than it requested with .get_dump_flag().
91661+ */
91662+ dump.len = len;
91663+
91664+ /* Always allocate enough space to hold the whole thing so that the
91665+ * driver does not need to check the length and bother with partial
91666+ * dumping.
91667+ */
91668 data = vzalloc(tmp.len);
91669 if (!data)
91670 return -ENOMEM;
91671@@ -1330,6 +1339,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
91672 if (ret)
91673 goto out;
91674
91675+ /* There are two sane possibilities:
91676+ * 1. The driver's .get_dump_data() does not touch dump.len.
91677+ * 2. Or it may set dump.len to how much it really writes, which
91678+ * should be tmp.len (or len if it can do a partial dump).
91679+ * In any case respond to userspace with the actual length of data
91680+ * it's receiving.
91681+ */
91682+ WARN_ON(dump.len != len && dump.len != tmp.len);
91683+ dump.len = len;
91684+
91685 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
91686 ret = -EFAULT;
91687 goto out;
91688diff --git a/net/core/flow.c b/net/core/flow.c
91689index 7102f16..146b4bd 100644
91690--- a/net/core/flow.c
91691+++ b/net/core/flow.c
91692@@ -61,7 +61,7 @@ struct flow_cache {
91693 struct timer_list rnd_timer;
91694 };
91695
91696-atomic_t flow_cache_genid = ATOMIC_INIT(0);
91697+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
91698 EXPORT_SYMBOL(flow_cache_genid);
91699 static struct flow_cache flow_cache_global;
91700 static struct kmem_cache *flow_cachep __read_mostly;
91701@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
91702
91703 static int flow_entry_valid(struct flow_cache_entry *fle)
91704 {
91705- if (atomic_read(&flow_cache_genid) != fle->genid)
91706+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
91707 return 0;
91708 if (fle->object && !fle->object->ops->check(fle->object))
91709 return 0;
91710@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
91711 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
91712 fcp->hash_count++;
91713 }
91714- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
91715+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
91716 flo = fle->object;
91717 if (!flo)
91718 goto ret_object;
91719@@ -279,7 +279,7 @@ nocache:
91720 }
91721 flo = resolver(net, key, family, dir, flo, ctx);
91722 if (fle) {
91723- fle->genid = atomic_read(&flow_cache_genid);
91724+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
91725 if (!IS_ERR(flo))
91726 fle->object = flo;
91727 else
91728diff --git a/net/core/iovec.c b/net/core/iovec.c
91729index de178e4..1dabd8b 100644
91730--- a/net/core/iovec.c
91731+++ b/net/core/iovec.c
91732@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
91733 if (m->msg_namelen) {
91734 if (mode == VERIFY_READ) {
91735 void __user *namep;
91736- namep = (void __user __force *) m->msg_name;
91737+ namep = (void __force_user *) m->msg_name;
91738 err = move_addr_to_kernel(namep, m->msg_namelen,
91739 address);
91740 if (err < 0)
91741@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
91742 }
91743
91744 size = m->msg_iovlen * sizeof(struct iovec);
91745- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
91746+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
91747 return -EFAULT;
91748
91749 m->msg_iov = iov;
91750diff --git a/net/core/neighbour.c b/net/core/neighbour.c
91751index ce90b02..8752627 100644
91752--- a/net/core/neighbour.c
91753+++ b/net/core/neighbour.c
91754@@ -2771,7 +2771,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
91755 size_t *lenp, loff_t *ppos)
91756 {
91757 int size, ret;
91758- ctl_table tmp = *ctl;
91759+ ctl_table_no_const tmp = *ctl;
91760
91761 tmp.extra1 = &zero;
91762 tmp.extra2 = &unres_qlen_max;
91763diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
91764index 569d355..79cf2d0 100644
91765--- a/net/core/net-procfs.c
91766+++ b/net/core/net-procfs.c
91767@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
91768 else
91769 seq_printf(seq, "%04x", ntohs(pt->type));
91770
91771+#ifdef CONFIG_GRKERNSEC_HIDESYM
91772+ seq_printf(seq, " %-8s %pf\n",
91773+ pt->dev ? pt->dev->name : "", NULL);
91774+#else
91775 seq_printf(seq, " %-8s %pf\n",
91776 pt->dev ? pt->dev->name : "", pt->func);
91777+#endif
91778 }
91779
91780 return 0;
91781diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
91782index 981fed3..536af34 100644
91783--- a/net/core/net-sysfs.c
91784+++ b/net/core/net-sysfs.c
91785@@ -1311,7 +1311,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
91786 }
91787 EXPORT_SYMBOL(netdev_class_remove_file);
91788
91789-int netdev_kobject_init(void)
91790+int __init netdev_kobject_init(void)
91791 {
91792 kobj_ns_type_register(&net_ns_type_operations);
91793 return class_register(&net_class);
91794diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
91795index f9765203..9feaef8 100644
91796--- a/net/core/net_namespace.c
91797+++ b/net/core/net_namespace.c
91798@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
91799 int error;
91800 LIST_HEAD(net_exit_list);
91801
91802- list_add_tail(&ops->list, list);
91803+ pax_list_add_tail((struct list_head *)&ops->list, list);
91804 if (ops->init || (ops->id && ops->size)) {
91805 for_each_net(net) {
91806 error = ops_init(ops, net);
91807@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
91808
91809 out_undo:
91810 /* If I have an error cleanup all namespaces I initialized */
91811- list_del(&ops->list);
91812+ pax_list_del((struct list_head *)&ops->list);
91813 ops_exit_list(ops, &net_exit_list);
91814 ops_free_list(ops, &net_exit_list);
91815 return error;
91816@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
91817 struct net *net;
91818 LIST_HEAD(net_exit_list);
91819
91820- list_del(&ops->list);
91821+ pax_list_del((struct list_head *)&ops->list);
91822 for_each_net(net)
91823 list_add_tail(&net->exit_list, &net_exit_list);
91824 ops_exit_list(ops, &net_exit_list);
91825@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
91826 mutex_lock(&net_mutex);
91827 error = register_pernet_operations(&pernet_list, ops);
91828 if (!error && (first_device == &pernet_list))
91829- first_device = &ops->list;
91830+ first_device = (struct list_head *)&ops->list;
91831 mutex_unlock(&net_mutex);
91832 return error;
91833 }
91834diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
91835index a08bd2b..c59bd7c 100644
91836--- a/net/core/rtnetlink.c
91837+++ b/net/core/rtnetlink.c
91838@@ -58,7 +58,7 @@ struct rtnl_link {
91839 rtnl_doit_func doit;
91840 rtnl_dumpit_func dumpit;
91841 rtnl_calcit_func calcit;
91842-};
91843+} __no_const;
91844
91845 static DEFINE_MUTEX(rtnl_mutex);
91846
91847@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
91848 if (rtnl_link_ops_get(ops->kind))
91849 return -EEXIST;
91850
91851- if (!ops->dellink)
91852- ops->dellink = unregister_netdevice_queue;
91853+ if (!ops->dellink) {
91854+ pax_open_kernel();
91855+ *(void **)&ops->dellink = unregister_netdevice_queue;
91856+ pax_close_kernel();
91857+ }
91858
91859- list_add_tail(&ops->list, &link_ops);
91860+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
91861 return 0;
91862 }
91863 EXPORT_SYMBOL_GPL(__rtnl_link_register);
91864@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
91865 for_each_net(net) {
91866 __rtnl_kill_links(net, ops);
91867 }
91868- list_del(&ops->list);
91869+ pax_list_del((struct list_head *)&ops->list);
91870 }
91871 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
91872
91873@@ -2374,7 +2377,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
91874 struct nlattr *extfilt;
91875 u32 filter_mask = 0;
91876
91877- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
91878+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
91879 IFLA_EXT_MASK);
91880 if (extfilt)
91881 filter_mask = nla_get_u32(extfilt);
91882diff --git a/net/core/scm.c b/net/core/scm.c
91883index 03795d0..98d6bdb 100644
91884--- a/net/core/scm.c
91885+++ b/net/core/scm.c
91886@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
91887 return -EINVAL;
91888
91889 if ((creds->pid == task_tgid_vnr(current) ||
91890- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
91891+ ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
91892 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
91893 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
91894 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
91895@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
91896 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
91897 {
91898 struct cmsghdr __user *cm
91899- = (__force struct cmsghdr __user *)msg->msg_control;
91900+ = (struct cmsghdr __force_user *)msg->msg_control;
91901 struct cmsghdr cmhdr;
91902 int cmlen = CMSG_LEN(len);
91903 int err;
91904@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
91905 err = -EFAULT;
91906 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
91907 goto out;
91908- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
91909+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
91910 goto out;
91911 cmlen = CMSG_SPACE(len);
91912 if (msg->msg_controllen < cmlen)
91913@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
91914 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
91915 {
91916 struct cmsghdr __user *cm
91917- = (__force struct cmsghdr __user*)msg->msg_control;
91918+ = (struct cmsghdr __force_user *)msg->msg_control;
91919
91920 int fdmax = 0;
91921 int fdnum = scm->fp->count;
91922@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
91923 if (fdnum < fdmax)
91924 fdmax = fdnum;
91925
91926- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
91927+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
91928 i++, cmfptr++)
91929 {
91930 struct socket *sock;
91931diff --git a/net/core/skbuff.c b/net/core/skbuff.c
91932index 1c1738c..4cab7f0 100644
91933--- a/net/core/skbuff.c
91934+++ b/net/core/skbuff.c
91935@@ -3087,13 +3087,15 @@ void __init skb_init(void)
91936 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
91937 sizeof(struct sk_buff),
91938 0,
91939- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
91940+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
91941+ SLAB_NO_SANITIZE,
91942 NULL);
91943 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
91944 (2*sizeof(struct sk_buff)) +
91945 sizeof(atomic_t),
91946 0,
91947- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
91948+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
91949+ SLAB_NO_SANITIZE,
91950 NULL);
91951 }
91952
91953diff --git a/net/core/sock.c b/net/core/sock.c
91954index d6d024c..6ea7ab4 100644
91955--- a/net/core/sock.c
91956+++ b/net/core/sock.c
91957@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
91958 struct sk_buff_head *list = &sk->sk_receive_queue;
91959
91960 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
91961- atomic_inc(&sk->sk_drops);
91962+ atomic_inc_unchecked(&sk->sk_drops);
91963 trace_sock_rcvqueue_full(sk, skb);
91964 return -ENOMEM;
91965 }
91966@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
91967 return err;
91968
91969 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
91970- atomic_inc(&sk->sk_drops);
91971+ atomic_inc_unchecked(&sk->sk_drops);
91972 return -ENOBUFS;
91973 }
91974
91975@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
91976 skb_dst_force(skb);
91977
91978 spin_lock_irqsave(&list->lock, flags);
91979- skb->dropcount = atomic_read(&sk->sk_drops);
91980+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
91981 __skb_queue_tail(list, skb);
91982 spin_unlock_irqrestore(&list->lock, flags);
91983
91984@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
91985 skb->dev = NULL;
91986
91987 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
91988- atomic_inc(&sk->sk_drops);
91989+ atomic_inc_unchecked(&sk->sk_drops);
91990 goto discard_and_relse;
91991 }
91992 if (nested)
91993@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
91994 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
91995 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
91996 bh_unlock_sock(sk);
91997- atomic_inc(&sk->sk_drops);
91998+ atomic_inc_unchecked(&sk->sk_drops);
91999 goto discard_and_relse;
92000 }
92001
92002@@ -933,12 +933,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
92003 struct timeval tm;
92004 } v;
92005
92006- int lv = sizeof(int);
92007- int len;
92008+ unsigned int lv = sizeof(int);
92009+ unsigned int len;
92010
92011 if (get_user(len, optlen))
92012 return -EFAULT;
92013- if (len < 0)
92014+ if (len > INT_MAX)
92015 return -EINVAL;
92016
92017 memset(&v, 0, sizeof(v));
92018@@ -1090,11 +1090,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
92019
92020 case SO_PEERNAME:
92021 {
92022- char address[128];
92023+ char address[_K_SS_MAXSIZE];
92024
92025 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
92026 return -ENOTCONN;
92027- if (lv < len)
92028+ if (lv < len || sizeof address < len)
92029 return -EINVAL;
92030 if (copy_to_user(optval, address, len))
92031 return -EFAULT;
92032@@ -1161,7 +1161,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
92033
92034 if (len > lv)
92035 len = lv;
92036- if (copy_to_user(optval, &v, len))
92037+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
92038 return -EFAULT;
92039 lenout:
92040 if (put_user(len, optlen))
92041@@ -2277,7 +2277,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
92042 */
92043 smp_wmb();
92044 atomic_set(&sk->sk_refcnt, 1);
92045- atomic_set(&sk->sk_drops, 0);
92046+ atomic_set_unchecked(&sk->sk_drops, 0);
92047 }
92048 EXPORT_SYMBOL(sock_init_data);
92049
92050diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
92051index a0e9cf6..ef7f9ed 100644
92052--- a/net/core/sock_diag.c
92053+++ b/net/core/sock_diag.c
92054@@ -9,26 +9,33 @@
92055 #include <linux/inet_diag.h>
92056 #include <linux/sock_diag.h>
92057
92058-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
92059+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
92060 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
92061 static DEFINE_MUTEX(sock_diag_table_mutex);
92062
92063 int sock_diag_check_cookie(void *sk, __u32 *cookie)
92064 {
92065+#ifndef CONFIG_GRKERNSEC_HIDESYM
92066 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
92067 cookie[1] != INET_DIAG_NOCOOKIE) &&
92068 ((u32)(unsigned long)sk != cookie[0] ||
92069 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
92070 return -ESTALE;
92071 else
92072+#endif
92073 return 0;
92074 }
92075 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
92076
92077 void sock_diag_save_cookie(void *sk, __u32 *cookie)
92078 {
92079+#ifdef CONFIG_GRKERNSEC_HIDESYM
92080+ cookie[0] = 0;
92081+ cookie[1] = 0;
92082+#else
92083 cookie[0] = (u32)(unsigned long)sk;
92084 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
92085+#endif
92086 }
92087 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
92088
92089@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
92090 mutex_lock(&sock_diag_table_mutex);
92091 if (sock_diag_handlers[hndl->family])
92092 err = -EBUSY;
92093- else
92094+ else {
92095+ pax_open_kernel();
92096 sock_diag_handlers[hndl->family] = hndl;
92097+ pax_close_kernel();
92098+ }
92099 mutex_unlock(&sock_diag_table_mutex);
92100
92101 return err;
92102@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
92103
92104 mutex_lock(&sock_diag_table_mutex);
92105 BUG_ON(sock_diag_handlers[family] != hnld);
92106+ pax_open_kernel();
92107 sock_diag_handlers[family] = NULL;
92108+ pax_close_kernel();
92109 mutex_unlock(&sock_diag_table_mutex);
92110 }
92111 EXPORT_SYMBOL_GPL(sock_diag_unregister);
92112diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
92113index cfdb46a..cef55e1 100644
92114--- a/net/core/sysctl_net_core.c
92115+++ b/net/core/sysctl_net_core.c
92116@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
92117 {
92118 unsigned int orig_size, size;
92119 int ret, i;
92120- ctl_table tmp = {
92121+ ctl_table_no_const tmp = {
92122 .data = &size,
92123 .maxlen = sizeof(size),
92124 .mode = table->mode
92125@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
92126
92127 static __net_init int sysctl_core_net_init(struct net *net)
92128 {
92129- struct ctl_table *tbl;
92130+ ctl_table_no_const *tbl = NULL;
92131
92132 net->core.sysctl_somaxconn = SOMAXCONN;
92133
92134- tbl = netns_core_table;
92135 if (!net_eq(net, &init_net)) {
92136- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
92137+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
92138 if (tbl == NULL)
92139 goto err_dup;
92140
92141@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
92142 if (net->user_ns != &init_user_ns) {
92143 tbl[0].procname = NULL;
92144 }
92145- }
92146-
92147- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
92148+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
92149+ } else
92150+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
92151 if (net->core.sysctl_hdr == NULL)
92152 goto err_reg;
92153
92154 return 0;
92155
92156 err_reg:
92157- if (tbl != netns_core_table)
92158- kfree(tbl);
92159+ kfree(tbl);
92160 err_dup:
92161 return -ENOMEM;
92162 }
92163@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
92164 kfree(tbl);
92165 }
92166
92167-static __net_initdata struct pernet_operations sysctl_core_ops = {
92168+static __net_initconst struct pernet_operations sysctl_core_ops = {
92169 .init = sysctl_core_net_init,
92170 .exit = sysctl_core_net_exit,
92171 };
92172diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
92173index c21f200..bc4565b 100644
92174--- a/net/decnet/af_decnet.c
92175+++ b/net/decnet/af_decnet.c
92176@@ -465,6 +465,7 @@ static struct proto dn_proto = {
92177 .sysctl_rmem = sysctl_decnet_rmem,
92178 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
92179 .obj_size = sizeof(struct dn_sock),
92180+ .slab_flags = SLAB_USERCOPY,
92181 };
92182
92183 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
92184diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
92185index a55eecc..dd8428c 100644
92186--- a/net/decnet/sysctl_net_decnet.c
92187+++ b/net/decnet/sysctl_net_decnet.c
92188@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
92189
92190 if (len > *lenp) len = *lenp;
92191
92192- if (copy_to_user(buffer, addr, len))
92193+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
92194 return -EFAULT;
92195
92196 *lenp = len;
92197@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
92198
92199 if (len > *lenp) len = *lenp;
92200
92201- if (copy_to_user(buffer, devname, len))
92202+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
92203 return -EFAULT;
92204
92205 *lenp = len;
92206diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
92207index 55e1fd5..fd602b8 100644
92208--- a/net/ieee802154/6lowpan.c
92209+++ b/net/ieee802154/6lowpan.c
92210@@ -459,7 +459,7 @@ static int lowpan_header_create(struct sk_buff *skb,
92211 hc06_ptr += 3;
92212 } else {
92213 /* compress nothing */
92214- memcpy(hc06_ptr, &hdr, 4);
92215+ memcpy(hc06_ptr, hdr, 4);
92216 /* replace the top byte with new ECN | DSCP format */
92217 *hc06_ptr = tmp;
92218 hc06_ptr += 4;
92219diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
92220index d01be2a..8976537 100644
92221--- a/net/ipv4/af_inet.c
92222+++ b/net/ipv4/af_inet.c
92223@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
92224
92225 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
92226
92227- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
92228- if (!sysctl_local_reserved_ports)
92229- goto out;
92230-
92231 rc = proto_register(&tcp_prot, 1);
92232 if (rc)
92233- goto out_free_reserved_ports;
92234+ goto out;
92235
92236 rc = proto_register(&udp_prot, 1);
92237 if (rc)
92238@@ -1818,8 +1814,6 @@ out_unregister_udp_proto:
92239 proto_unregister(&udp_prot);
92240 out_unregister_tcp_proto:
92241 proto_unregister(&tcp_prot);
92242-out_free_reserved_ports:
92243- kfree(sysctl_local_reserved_ports);
92244 goto out;
92245 }
92246
92247diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
92248index 2e7f194..0fa4d6d 100644
92249--- a/net/ipv4/ah4.c
92250+++ b/net/ipv4/ah4.c
92251@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
92252 return;
92253
92254 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92255- atomic_inc(&flow_cache_genid);
92256+ atomic_inc_unchecked(&flow_cache_genid);
92257 rt_genid_bump(net);
92258
92259 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
92260diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
92261index dfc39d4..0d4fa52 100644
92262--- a/net/ipv4/devinet.c
92263+++ b/net/ipv4/devinet.c
92264@@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
92265 ci = nla_data(tb[IFA_CACHEINFO]);
92266 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
92267 err = -EINVAL;
92268- goto errout;
92269+ goto errout_free;
92270 }
92271 *pvalid_lft = ci->ifa_valid;
92272 *pprefered_lft = ci->ifa_prefered;
92273@@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
92274
92275 return ifa;
92276
92277+errout_free:
92278+ inet_free_ifa(ifa);
92279 errout:
92280 return ERR_PTR(err);
92281 }
92282@@ -1529,7 +1531,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
92283 idx = 0;
92284 head = &net->dev_index_head[h];
92285 rcu_read_lock();
92286- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
92287+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
92288 net->dev_base_seq;
92289 hlist_for_each_entry_rcu(dev, head, index_hlist) {
92290 if (idx < s_idx)
92291@@ -1840,7 +1842,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
92292 idx = 0;
92293 head = &net->dev_index_head[h];
92294 rcu_read_lock();
92295- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
92296+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
92297 net->dev_base_seq;
92298 hlist_for_each_entry_rcu(dev, head, index_hlist) {
92299 if (idx < s_idx)
92300@@ -2065,7 +2067,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
92301 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
92302 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
92303
92304-static struct devinet_sysctl_table {
92305+static const struct devinet_sysctl_table {
92306 struct ctl_table_header *sysctl_header;
92307 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
92308 } devinet_sysctl = {
92309@@ -2183,7 +2185,7 @@ static __net_init int devinet_init_net(struct net *net)
92310 int err;
92311 struct ipv4_devconf *all, *dflt;
92312 #ifdef CONFIG_SYSCTL
92313- struct ctl_table *tbl = ctl_forward_entry;
92314+ ctl_table_no_const *tbl = NULL;
92315 struct ctl_table_header *forw_hdr;
92316 #endif
92317
92318@@ -2201,7 +2203,7 @@ static __net_init int devinet_init_net(struct net *net)
92319 goto err_alloc_dflt;
92320
92321 #ifdef CONFIG_SYSCTL
92322- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
92323+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
92324 if (tbl == NULL)
92325 goto err_alloc_ctl;
92326
92327@@ -2221,7 +2223,10 @@ static __net_init int devinet_init_net(struct net *net)
92328 goto err_reg_dflt;
92329
92330 err = -ENOMEM;
92331- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
92332+ if (!net_eq(net, &init_net))
92333+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
92334+ else
92335+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
92336 if (forw_hdr == NULL)
92337 goto err_reg_ctl;
92338 net->ipv4.forw_hdr = forw_hdr;
92339@@ -2237,8 +2242,7 @@ err_reg_ctl:
92340 err_reg_dflt:
92341 __devinet_sysctl_unregister(all);
92342 err_reg_all:
92343- if (tbl != ctl_forward_entry)
92344- kfree(tbl);
92345+ kfree(tbl);
92346 err_alloc_ctl:
92347 #endif
92348 if (dflt != &ipv4_devconf_dflt)
92349diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
92350index 4cfe34d..d2fac8a 100644
92351--- a/net/ipv4/esp4.c
92352+++ b/net/ipv4/esp4.c
92353@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
92354 }
92355
92356 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
92357- net_adj) & ~(align - 1)) + (net_adj - 2);
92358+ net_adj) & ~(align - 1)) + net_adj - 2;
92359 }
92360
92361 static void esp4_err(struct sk_buff *skb, u32 info)
92362@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
92363 return;
92364
92365 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92366- atomic_inc(&flow_cache_genid);
92367+ atomic_inc_unchecked(&flow_cache_genid);
92368 rt_genid_bump(net);
92369
92370 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
92371diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
92372index c7629a2..b62d139 100644
92373--- a/net/ipv4/fib_frontend.c
92374+++ b/net/ipv4/fib_frontend.c
92375@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
92376 #ifdef CONFIG_IP_ROUTE_MULTIPATH
92377 fib_sync_up(dev);
92378 #endif
92379- atomic_inc(&net->ipv4.dev_addr_genid);
92380+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92381 rt_cache_flush(dev_net(dev));
92382 break;
92383 case NETDEV_DOWN:
92384 fib_del_ifaddr(ifa, NULL);
92385- atomic_inc(&net->ipv4.dev_addr_genid);
92386+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92387 if (ifa->ifa_dev->ifa_list == NULL) {
92388 /* Last address was deleted from this interface.
92389 * Disable IP.
92390@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
92391 #ifdef CONFIG_IP_ROUTE_MULTIPATH
92392 fib_sync_up(dev);
92393 #endif
92394- atomic_inc(&net->ipv4.dev_addr_genid);
92395+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
92396 rt_cache_flush(net);
92397 break;
92398 case NETDEV_DOWN:
92399diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
92400index 8f6cb7a..34507f9 100644
92401--- a/net/ipv4/fib_semantics.c
92402+++ b/net/ipv4/fib_semantics.c
92403@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
92404 nh->nh_saddr = inet_select_addr(nh->nh_dev,
92405 nh->nh_gw,
92406 nh->nh_parent->fib_scope);
92407- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
92408+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
92409
92410 return nh->nh_saddr;
92411 }
92412diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
92413index 49616fe..6e8a13d 100644
92414--- a/net/ipv4/fib_trie.c
92415+++ b/net/ipv4/fib_trie.c
92416@@ -71,7 +71,6 @@
92417 #include <linux/init.h>
92418 #include <linux/list.h>
92419 #include <linux/slab.h>
92420-#include <linux/prefetch.h>
92421 #include <linux/export.h>
92422 #include <net/net_namespace.h>
92423 #include <net/ip.h>
92424@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
92425 if (!c)
92426 continue;
92427
92428- if (IS_LEAF(c)) {
92429- prefetch(rcu_dereference_rtnl(p->child[idx]));
92430+ if (IS_LEAF(c))
92431 return (struct leaf *) c;
92432- }
92433
92434 /* Rescan start scanning in new node */
92435 p = (struct tnode *) c;
92436diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
92437index 6acb541..9ea617d 100644
92438--- a/net/ipv4/inet_connection_sock.c
92439+++ b/net/ipv4/inet_connection_sock.c
92440@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
92441 .range = { 32768, 61000 },
92442 };
92443
92444-unsigned long *sysctl_local_reserved_ports;
92445+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
92446 EXPORT_SYMBOL(sysctl_local_reserved_ports);
92447
92448 void inet_get_local_port_range(int *low, int *high)
92449diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
92450index 6af375a..c493c74 100644
92451--- a/net/ipv4/inet_hashtables.c
92452+++ b/net/ipv4/inet_hashtables.c
92453@@ -18,12 +18,15 @@
92454 #include <linux/sched.h>
92455 #include <linux/slab.h>
92456 #include <linux/wait.h>
92457+#include <linux/security.h>
92458
92459 #include <net/inet_connection_sock.h>
92460 #include <net/inet_hashtables.h>
92461 #include <net/secure_seq.h>
92462 #include <net/ip.h>
92463
92464+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
92465+
92466 /*
92467 * Allocate and initialize a new local port bind bucket.
92468 * The bindhash mutex for snum's hash chain must be held here.
92469@@ -554,6 +557,8 @@ ok:
92470 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
92471 spin_unlock(&head->lock);
92472
92473+ gr_update_task_in_ip_table(current, inet_sk(sk));
92474+
92475 if (tw) {
92476 inet_twsk_deschedule(tw, death_row);
92477 while (twrefcnt) {
92478diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
92479index 000e3d2..5472da3 100644
92480--- a/net/ipv4/inetpeer.c
92481+++ b/net/ipv4/inetpeer.c
92482@@ -503,8 +503,8 @@ relookup:
92483 if (p) {
92484 p->daddr = *daddr;
92485 atomic_set(&p->refcnt, 1);
92486- atomic_set(&p->rid, 0);
92487- atomic_set(&p->ip_id_count,
92488+ atomic_set_unchecked(&p->rid, 0);
92489+ atomic_set_unchecked(&p->ip_id_count,
92490 (daddr->family == AF_INET) ?
92491 secure_ip_id(daddr->addr.a4) :
92492 secure_ipv6_id(daddr->addr.a6));
92493diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
92494index b66910a..cfe416e 100644
92495--- a/net/ipv4/ip_fragment.c
92496+++ b/net/ipv4/ip_fragment.c
92497@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
92498 return 0;
92499
92500 start = qp->rid;
92501- end = atomic_inc_return(&peer->rid);
92502+ end = atomic_inc_return_unchecked(&peer->rid);
92503 qp->rid = end;
92504
92505 rc = qp->q.fragments && (end - start) > max;
92506@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
92507
92508 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92509 {
92510- struct ctl_table *table;
92511+ ctl_table_no_const *table = NULL;
92512 struct ctl_table_header *hdr;
92513
92514- table = ip4_frags_ns_ctl_table;
92515 if (!net_eq(net, &init_net)) {
92516- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
92517+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
92518 if (table == NULL)
92519 goto err_alloc;
92520
92521@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92522 /* Don't export sysctls to unprivileged users */
92523 if (net->user_ns != &init_user_ns)
92524 table[0].procname = NULL;
92525- }
92526+ hdr = register_net_sysctl(net, "net/ipv4", table);
92527+ } else
92528+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
92529
92530- hdr = register_net_sysctl(net, "net/ipv4", table);
92531 if (hdr == NULL)
92532 goto err_reg;
92533
92534@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
92535 return 0;
92536
92537 err_reg:
92538- if (!net_eq(net, &init_net))
92539- kfree(table);
92540+ kfree(table);
92541 err_alloc:
92542 return -ENOMEM;
92543 }
92544diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
92545index 855004f..9644112 100644
92546--- a/net/ipv4/ip_gre.c
92547+++ b/net/ipv4/ip_gre.c
92548@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
92549 module_param(log_ecn_error, bool, 0644);
92550 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
92551
92552-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
92553+static struct rtnl_link_ops ipgre_link_ops;
92554 static int ipgre_tunnel_init(struct net_device *dev);
92555
92556 static int ipgre_net_id __read_mostly;
92557@@ -572,7 +572,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
92558 if (daddr)
92559 memcpy(&iph->daddr, daddr, 4);
92560 if (iph->daddr)
92561- return t->hlen;
92562+ return t->hlen + sizeof(*iph);
92563
92564 return -(t->hlen + sizeof(*iph));
92565 }
92566@@ -919,7 +919,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
92567 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
92568 };
92569
92570-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
92571+static struct rtnl_link_ops ipgre_link_ops = {
92572 .kind = "gre",
92573 .maxtype = IFLA_GRE_MAX,
92574 .policy = ipgre_policy,
92575@@ -933,7 +933,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
92576 .fill_info = ipgre_fill_info,
92577 };
92578
92579-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
92580+static struct rtnl_link_ops ipgre_tap_ops = {
92581 .kind = "gretap",
92582 .maxtype = IFLA_GRE_MAX,
92583 .policy = ipgre_policy,
92584diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
92585index d9c4f11..02b82dbc 100644
92586--- a/net/ipv4/ip_sockglue.c
92587+++ b/net/ipv4/ip_sockglue.c
92588@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
92589 len = min_t(unsigned int, len, opt->optlen);
92590 if (put_user(len, optlen))
92591 return -EFAULT;
92592- if (copy_to_user(optval, opt->__data, len))
92593+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
92594+ copy_to_user(optval, opt->__data, len))
92595 return -EFAULT;
92596 return 0;
92597 }
92598@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
92599 if (sk->sk_type != SOCK_STREAM)
92600 return -ENOPROTOOPT;
92601
92602- msg.msg_control = optval;
92603+ msg.msg_control = (void __force_kernel *)optval;
92604 msg.msg_controllen = len;
92605 msg.msg_flags = flags;
92606
92607diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
92608index 17cc0ff..63856c4 100644
92609--- a/net/ipv4/ip_vti.c
92610+++ b/net/ipv4/ip_vti.c
92611@@ -47,7 +47,7 @@
92612 #define HASH_SIZE 16
92613 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
92614
92615-static struct rtnl_link_ops vti_link_ops __read_mostly;
92616+static struct rtnl_link_ops vti_link_ops;
92617
92618 static int vti_net_id __read_mostly;
92619 struct vti_net {
92620@@ -840,7 +840,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
92621 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
92622 };
92623
92624-static struct rtnl_link_ops vti_link_ops __read_mostly = {
92625+static struct rtnl_link_ops vti_link_ops = {
92626 .kind = "vti",
92627 .maxtype = IFLA_VTI_MAX,
92628 .policy = vti_policy,
92629diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
92630index 59cb8c7..a72160c 100644
92631--- a/net/ipv4/ipcomp.c
92632+++ b/net/ipv4/ipcomp.c
92633@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
92634 return;
92635
92636 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
92637- atomic_inc(&flow_cache_genid);
92638+ atomic_inc_unchecked(&flow_cache_genid);
92639 rt_genid_bump(net);
92640
92641 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
92642diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
92643index efa1138..20dbba0 100644
92644--- a/net/ipv4/ipconfig.c
92645+++ b/net/ipv4/ipconfig.c
92646@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
92647
92648 mm_segment_t oldfs = get_fs();
92649 set_fs(get_ds());
92650- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
92651+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
92652 set_fs(oldfs);
92653 return res;
92654 }
92655@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
92656
92657 mm_segment_t oldfs = get_fs();
92658 set_fs(get_ds());
92659- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
92660+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
92661 set_fs(oldfs);
92662 return res;
92663 }
92664@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
92665
92666 mm_segment_t oldfs = get_fs();
92667 set_fs(get_ds());
92668- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
92669+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
92670 set_fs(oldfs);
92671 return res;
92672 }
92673diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
92674index 7cfc456..e726868 100644
92675--- a/net/ipv4/ipip.c
92676+++ b/net/ipv4/ipip.c
92677@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
92678 static int ipip_net_id __read_mostly;
92679
92680 static int ipip_tunnel_init(struct net_device *dev);
92681-static struct rtnl_link_ops ipip_link_ops __read_mostly;
92682+static struct rtnl_link_ops ipip_link_ops;
92683
92684 static int ipip_err(struct sk_buff *skb, u32 info)
92685 {
92686@@ -406,7 +406,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
92687 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
92688 };
92689
92690-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
92691+static struct rtnl_link_ops ipip_link_ops = {
92692 .kind = "ipip",
92693 .maxtype = IFLA_IPTUN_MAX,
92694 .policy = ipip_policy,
92695diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
92696index 85a4f21..1beb1f5 100644
92697--- a/net/ipv4/netfilter/arp_tables.c
92698+++ b/net/ipv4/netfilter/arp_tables.c
92699@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
92700 #endif
92701
92702 static int get_info(struct net *net, void __user *user,
92703- const int *len, int compat)
92704+ int len, int compat)
92705 {
92706 char name[XT_TABLE_MAXNAMELEN];
92707 struct xt_table *t;
92708 int ret;
92709
92710- if (*len != sizeof(struct arpt_getinfo)) {
92711- duprintf("length %u != %Zu\n", *len,
92712+ if (len != sizeof(struct arpt_getinfo)) {
92713+ duprintf("length %u != %Zu\n", len,
92714 sizeof(struct arpt_getinfo));
92715 return -EINVAL;
92716 }
92717@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
92718 info.size = private->size;
92719 strcpy(info.name, name);
92720
92721- if (copy_to_user(user, &info, *len) != 0)
92722+ if (copy_to_user(user, &info, len) != 0)
92723 ret = -EFAULT;
92724 else
92725 ret = 0;
92726@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
92727
92728 switch (cmd) {
92729 case ARPT_SO_GET_INFO:
92730- ret = get_info(sock_net(sk), user, len, 1);
92731+ ret = get_info(sock_net(sk), user, *len, 1);
92732 break;
92733 case ARPT_SO_GET_ENTRIES:
92734 ret = compat_get_entries(sock_net(sk), user, len);
92735@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
92736
92737 switch (cmd) {
92738 case ARPT_SO_GET_INFO:
92739- ret = get_info(sock_net(sk), user, len, 0);
92740+ ret = get_info(sock_net(sk), user, *len, 0);
92741 break;
92742
92743 case ARPT_SO_GET_ENTRIES:
92744diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
92745index d23118d..6ad7277 100644
92746--- a/net/ipv4/netfilter/ip_tables.c
92747+++ b/net/ipv4/netfilter/ip_tables.c
92748@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
92749 #endif
92750
92751 static int get_info(struct net *net, void __user *user,
92752- const int *len, int compat)
92753+ int len, int compat)
92754 {
92755 char name[XT_TABLE_MAXNAMELEN];
92756 struct xt_table *t;
92757 int ret;
92758
92759- if (*len != sizeof(struct ipt_getinfo)) {
92760- duprintf("length %u != %zu\n", *len,
92761+ if (len != sizeof(struct ipt_getinfo)) {
92762+ duprintf("length %u != %zu\n", len,
92763 sizeof(struct ipt_getinfo));
92764 return -EINVAL;
92765 }
92766@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
92767 info.size = private->size;
92768 strcpy(info.name, name);
92769
92770- if (copy_to_user(user, &info, *len) != 0)
92771+ if (copy_to_user(user, &info, len) != 0)
92772 ret = -EFAULT;
92773 else
92774 ret = 0;
92775@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92776
92777 switch (cmd) {
92778 case IPT_SO_GET_INFO:
92779- ret = get_info(sock_net(sk), user, len, 1);
92780+ ret = get_info(sock_net(sk), user, *len, 1);
92781 break;
92782 case IPT_SO_GET_ENTRIES:
92783 ret = compat_get_entries(sock_net(sk), user, len);
92784@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92785
92786 switch (cmd) {
92787 case IPT_SO_GET_INFO:
92788- ret = get_info(sock_net(sk), user, len, 0);
92789+ ret = get_info(sock_net(sk), user, *len, 0);
92790 break;
92791
92792 case IPT_SO_GET_ENTRIES:
92793diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
92794index 7d93d62..cbbf2a3 100644
92795--- a/net/ipv4/ping.c
92796+++ b/net/ipv4/ping.c
92797@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
92798 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
92799 0, sock_i_ino(sp),
92800 atomic_read(&sp->sk_refcnt), sp,
92801- atomic_read(&sp->sk_drops), len);
92802+ atomic_read_unchecked(&sp->sk_drops), len);
92803 }
92804
92805 static int ping_seq_show(struct seq_file *seq, void *v)
92806diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
92807index dd44e0a..06dcca4 100644
92808--- a/net/ipv4/raw.c
92809+++ b/net/ipv4/raw.c
92810@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
92811 int raw_rcv(struct sock *sk, struct sk_buff *skb)
92812 {
92813 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
92814- atomic_inc(&sk->sk_drops);
92815+ atomic_inc_unchecked(&sk->sk_drops);
92816 kfree_skb(skb);
92817 return NET_RX_DROP;
92818 }
92819@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
92820
92821 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
92822 {
92823+ struct icmp_filter filter;
92824+
92825 if (optlen > sizeof(struct icmp_filter))
92826 optlen = sizeof(struct icmp_filter);
92827- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
92828+ if (copy_from_user(&filter, optval, optlen))
92829 return -EFAULT;
92830+ raw_sk(sk)->filter = filter;
92831 return 0;
92832 }
92833
92834 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
92835 {
92836 int len, ret = -EFAULT;
92837+ struct icmp_filter filter;
92838
92839 if (get_user(len, optlen))
92840 goto out;
92841@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
92842 if (len > sizeof(struct icmp_filter))
92843 len = sizeof(struct icmp_filter);
92844 ret = -EFAULT;
92845- if (put_user(len, optlen) ||
92846- copy_to_user(optval, &raw_sk(sk)->filter, len))
92847+ filter = raw_sk(sk)->filter;
92848+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
92849 goto out;
92850 ret = 0;
92851 out: return ret;
92852@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
92853 0, 0L, 0,
92854 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
92855 0, sock_i_ino(sp),
92856- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
92857+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
92858 }
92859
92860 static int raw_seq_show(struct seq_file *seq, void *v)
92861diff --git a/net/ipv4/route.c b/net/ipv4/route.c
92862index d35bbf0..faa3ab8 100644
92863--- a/net/ipv4/route.c
92864+++ b/net/ipv4/route.c
92865@@ -2558,34 +2558,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
92866 .maxlen = sizeof(int),
92867 .mode = 0200,
92868 .proc_handler = ipv4_sysctl_rtcache_flush,
92869+ .extra1 = &init_net,
92870 },
92871 { },
92872 };
92873
92874 static __net_init int sysctl_route_net_init(struct net *net)
92875 {
92876- struct ctl_table *tbl;
92877+ ctl_table_no_const *tbl = NULL;
92878
92879- tbl = ipv4_route_flush_table;
92880 if (!net_eq(net, &init_net)) {
92881- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
92882+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
92883 if (tbl == NULL)
92884 goto err_dup;
92885
92886 /* Don't export sysctls to unprivileged users */
92887 if (net->user_ns != &init_user_ns)
92888 tbl[0].procname = NULL;
92889- }
92890- tbl[0].extra1 = net;
92891+ tbl[0].extra1 = net;
92892+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
92893+ } else
92894+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
92895
92896- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
92897 if (net->ipv4.route_hdr == NULL)
92898 goto err_reg;
92899 return 0;
92900
92901 err_reg:
92902- if (tbl != ipv4_route_flush_table)
92903- kfree(tbl);
92904+ kfree(tbl);
92905 err_dup:
92906 return -ENOMEM;
92907 }
92908@@ -2608,7 +2608,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
92909
92910 static __net_init int rt_genid_init(struct net *net)
92911 {
92912- atomic_set(&net->rt_genid, 0);
92913+ atomic_set_unchecked(&net->rt_genid, 0);
92914 get_random_bytes(&net->ipv4.dev_addr_genid,
92915 sizeof(net->ipv4.dev_addr_genid));
92916 return 0;
92917diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
92918index 3f25e75..3ae0f4d 100644
92919--- a/net/ipv4/sysctl_net_ipv4.c
92920+++ b/net/ipv4/sysctl_net_ipv4.c
92921@@ -57,7 +57,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
92922 {
92923 int ret;
92924 int range[2];
92925- ctl_table tmp = {
92926+ ctl_table_no_const tmp = {
92927 .data = &range,
92928 .maxlen = sizeof(range),
92929 .mode = table->mode,
92930@@ -110,7 +110,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
92931 int ret;
92932 gid_t urange[2];
92933 kgid_t low, high;
92934- ctl_table tmp = {
92935+ ctl_table_no_const tmp = {
92936 .data = &urange,
92937 .maxlen = sizeof(urange),
92938 .mode = table->mode,
92939@@ -141,7 +141,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
92940 void __user *buffer, size_t *lenp, loff_t *ppos)
92941 {
92942 char val[TCP_CA_NAME_MAX];
92943- ctl_table tbl = {
92944+ ctl_table_no_const tbl = {
92945 .data = val,
92946 .maxlen = TCP_CA_NAME_MAX,
92947 };
92948@@ -160,7 +160,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
92949 void __user *buffer, size_t *lenp,
92950 loff_t *ppos)
92951 {
92952- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
92953+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
92954 int ret;
92955
92956 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
92957@@ -177,7 +177,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
92958 void __user *buffer, size_t *lenp,
92959 loff_t *ppos)
92960 {
92961- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
92962+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
92963 int ret;
92964
92965 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
92966@@ -203,15 +203,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
92967 struct mem_cgroup *memcg;
92968 #endif
92969
92970- ctl_table tmp = {
92971+ ctl_table_no_const tmp = {
92972 .data = &vec,
92973 .maxlen = sizeof(vec),
92974 .mode = ctl->mode,
92975 };
92976
92977 if (!write) {
92978- ctl->data = &net->ipv4.sysctl_tcp_mem;
92979- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
92980+ ctl_table_no_const tcp_mem = *ctl;
92981+
92982+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
92983+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
92984 }
92985
92986 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
92987@@ -238,7 +240,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
92988 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
92989 size_t *lenp, loff_t *ppos)
92990 {
92991- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
92992+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
92993 struct tcp_fastopen_context *ctxt;
92994 int ret;
92995 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
92996@@ -481,7 +483,7 @@ static struct ctl_table ipv4_table[] = {
92997 },
92998 {
92999 .procname = "ip_local_reserved_ports",
93000- .data = NULL, /* initialized in sysctl_ipv4_init */
93001+ .data = sysctl_local_reserved_ports,
93002 .maxlen = 65536,
93003 .mode = 0644,
93004 .proc_handler = proc_do_large_bitmap,
93005@@ -846,11 +848,10 @@ static struct ctl_table ipv4_net_table[] = {
93006
93007 static __net_init int ipv4_sysctl_init_net(struct net *net)
93008 {
93009- struct ctl_table *table;
93010+ ctl_table_no_const *table = NULL;
93011
93012- table = ipv4_net_table;
93013 if (!net_eq(net, &init_net)) {
93014- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
93015+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
93016 if (table == NULL)
93017 goto err_alloc;
93018
93019@@ -885,15 +886,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
93020
93021 tcp_init_mem(net);
93022
93023- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
93024+ if (!net_eq(net, &init_net))
93025+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
93026+ else
93027+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
93028 if (net->ipv4.ipv4_hdr == NULL)
93029 goto err_reg;
93030
93031 return 0;
93032
93033 err_reg:
93034- if (!net_eq(net, &init_net))
93035- kfree(table);
93036+ kfree(table);
93037 err_alloc:
93038 return -ENOMEM;
93039 }
93040@@ -915,16 +918,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
93041 static __init int sysctl_ipv4_init(void)
93042 {
93043 struct ctl_table_header *hdr;
93044- struct ctl_table *i;
93045-
93046- for (i = ipv4_table; i->procname; i++) {
93047- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
93048- i->data = sysctl_local_reserved_ports;
93049- break;
93050- }
93051- }
93052- if (!i->procname)
93053- return -EINVAL;
93054
93055 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
93056 if (hdr == NULL)
93057diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
93058index 9c62257..651cc27 100644
93059--- a/net/ipv4/tcp_input.c
93060+++ b/net/ipv4/tcp_input.c
93061@@ -4436,7 +4436,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
93062 * simplifies code)
93063 */
93064 static void
93065-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
93066+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
93067 struct sk_buff *head, struct sk_buff *tail,
93068 u32 start, u32 end)
93069 {
93070@@ -5522,6 +5522,7 @@ discard:
93071 tcp_paws_reject(&tp->rx_opt, 0))
93072 goto discard_and_undo;
93073
93074+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
93075 if (th->syn) {
93076 /* We see SYN without ACK. It is attempt of
93077 * simultaneous connect with crossed SYNs.
93078@@ -5572,6 +5573,7 @@ discard:
93079 goto discard;
93080 #endif
93081 }
93082+#endif
93083 /* "fifth, if neither of the SYN or RST bits is set then
93084 * drop the segment and return."
93085 */
93086@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
93087 goto discard;
93088
93089 if (th->syn) {
93090- if (th->fin)
93091+ if (th->fin || th->urg || th->psh)
93092 goto discard;
93093 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
93094 return 1;
93095diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
93096index 7999fc5..c812f42 100644
93097--- a/net/ipv4/tcp_ipv4.c
93098+++ b/net/ipv4/tcp_ipv4.c
93099@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
93100 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93101
93102
93103+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93104+extern int grsec_enable_blackhole;
93105+#endif
93106+
93107 #ifdef CONFIG_TCP_MD5SIG
93108 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
93109 __be32 daddr, __be32 saddr, const struct tcphdr *th);
93110@@ -1855,6 +1859,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
93111 return 0;
93112
93113 reset:
93114+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93115+ if (!grsec_enable_blackhole)
93116+#endif
93117 tcp_v4_send_reset(rsk, skb);
93118 discard:
93119 kfree_skb(skb);
93120@@ -2000,12 +2007,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
93121 TCP_SKB_CB(skb)->sacked = 0;
93122
93123 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
93124- if (!sk)
93125+ if (!sk) {
93126+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93127+ ret = 1;
93128+#endif
93129 goto no_tcp_socket;
93130-
93131+ }
93132 process:
93133- if (sk->sk_state == TCP_TIME_WAIT)
93134+ if (sk->sk_state == TCP_TIME_WAIT) {
93135+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93136+ ret = 2;
93137+#endif
93138 goto do_time_wait;
93139+ }
93140
93141 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
93142 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
93143@@ -2058,6 +2072,10 @@ csum_error:
93144 bad_packet:
93145 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
93146 } else {
93147+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93148+ if (!grsec_enable_blackhole || (ret == 1 &&
93149+ (skb->dev->flags & IFF_LOOPBACK)))
93150+#endif
93151 tcp_v4_send_reset(NULL, skb);
93152 }
93153
93154diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
93155index 0f01788..d52a859 100644
93156--- a/net/ipv4/tcp_minisocks.c
93157+++ b/net/ipv4/tcp_minisocks.c
93158@@ -27,6 +27,10 @@
93159 #include <net/inet_common.h>
93160 #include <net/xfrm.h>
93161
93162+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93163+extern int grsec_enable_blackhole;
93164+#endif
93165+
93166 int sysctl_tcp_syncookies __read_mostly = 1;
93167 EXPORT_SYMBOL(sysctl_tcp_syncookies);
93168
93169@@ -717,7 +721,10 @@ embryonic_reset:
93170 * avoid becoming vulnerable to outside attack aiming at
93171 * resetting legit local connections.
93172 */
93173- req->rsk_ops->send_reset(sk, skb);
93174+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93175+ if (!grsec_enable_blackhole)
93176+#endif
93177+ req->rsk_ops->send_reset(sk, skb);
93178 } else if (fastopen) { /* received a valid RST pkt */
93179 reqsk_fastopen_remove(sk, req, true);
93180 tcp_reset(sk);
93181diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
93182index d4943f6..e7a74a5 100644
93183--- a/net/ipv4/tcp_probe.c
93184+++ b/net/ipv4/tcp_probe.c
93185@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
93186 if (cnt + width >= len)
93187 break;
93188
93189- if (copy_to_user(buf + cnt, tbuf, width))
93190+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
93191 return -EFAULT;
93192 cnt += width;
93193 }
93194diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
93195index 4b85e6f..22f9ac9 100644
93196--- a/net/ipv4/tcp_timer.c
93197+++ b/net/ipv4/tcp_timer.c
93198@@ -22,6 +22,10 @@
93199 #include <linux/gfp.h>
93200 #include <net/tcp.h>
93201
93202+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93203+extern int grsec_lastack_retries;
93204+#endif
93205+
93206 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
93207 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
93208 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
93209@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
93210 }
93211 }
93212
93213+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93214+ if ((sk->sk_state == TCP_LAST_ACK) &&
93215+ (grsec_lastack_retries > 0) &&
93216+ (grsec_lastack_retries < retry_until))
93217+ retry_until = grsec_lastack_retries;
93218+#endif
93219+
93220 if (retransmits_timed_out(sk, retry_until,
93221 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
93222 /* Has it gone just too far? */
93223diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
93224index 93b731d..5a2dd92 100644
93225--- a/net/ipv4/udp.c
93226+++ b/net/ipv4/udp.c
93227@@ -87,6 +87,7 @@
93228 #include <linux/types.h>
93229 #include <linux/fcntl.h>
93230 #include <linux/module.h>
93231+#include <linux/security.h>
93232 #include <linux/socket.h>
93233 #include <linux/sockios.h>
93234 #include <linux/igmp.h>
93235@@ -111,6 +112,10 @@
93236 #include <trace/events/skb.h>
93237 #include "udp_impl.h"
93238
93239+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93240+extern int grsec_enable_blackhole;
93241+#endif
93242+
93243 struct udp_table udp_table __read_mostly;
93244 EXPORT_SYMBOL(udp_table);
93245
93246@@ -594,6 +599,9 @@ found:
93247 return s;
93248 }
93249
93250+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
93251+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
93252+
93253 /*
93254 * This routine is called by the ICMP module when it gets some
93255 * sort of error condition. If err < 0 then the socket should
93256@@ -890,9 +898,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
93257 dport = usin->sin_port;
93258 if (dport == 0)
93259 return -EINVAL;
93260+
93261+ err = gr_search_udp_sendmsg(sk, usin);
93262+ if (err)
93263+ return err;
93264 } else {
93265 if (sk->sk_state != TCP_ESTABLISHED)
93266 return -EDESTADDRREQ;
93267+
93268+ err = gr_search_udp_sendmsg(sk, NULL);
93269+ if (err)
93270+ return err;
93271+
93272 daddr = inet->inet_daddr;
93273 dport = inet->inet_dport;
93274 /* Open fast path for connected socket.
93275@@ -1136,7 +1153,7 @@ static unsigned int first_packet_length(struct sock *sk)
93276 IS_UDPLITE(sk));
93277 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
93278 IS_UDPLITE(sk));
93279- atomic_inc(&sk->sk_drops);
93280+ atomic_inc_unchecked(&sk->sk_drops);
93281 __skb_unlink(skb, rcvq);
93282 __skb_queue_tail(&list_kill, skb);
93283 }
93284@@ -1222,6 +1239,10 @@ try_again:
93285 if (!skb)
93286 goto out;
93287
93288+ err = gr_search_udp_recvmsg(sk, skb);
93289+ if (err)
93290+ goto out_free;
93291+
93292 ulen = skb->len - sizeof(struct udphdr);
93293 copied = len;
93294 if (copied > ulen)
93295@@ -1255,7 +1276,7 @@ try_again:
93296 if (unlikely(err)) {
93297 trace_kfree_skb(skb, udp_recvmsg);
93298 if (!peeked) {
93299- atomic_inc(&sk->sk_drops);
93300+ atomic_inc_unchecked(&sk->sk_drops);
93301 UDP_INC_STATS_USER(sock_net(sk),
93302 UDP_MIB_INERRORS, is_udplite);
93303 }
93304@@ -1542,7 +1563,7 @@ csum_error:
93305 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
93306 drop:
93307 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
93308- atomic_inc(&sk->sk_drops);
93309+ atomic_inc_unchecked(&sk->sk_drops);
93310 kfree_skb(skb);
93311 return -1;
93312 }
93313@@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
93314 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
93315
93316 if (!skb1) {
93317- atomic_inc(&sk->sk_drops);
93318+ atomic_inc_unchecked(&sk->sk_drops);
93319 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
93320 IS_UDPLITE(sk));
93321 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
93322@@ -1730,6 +1751,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
93323 goto csum_error;
93324
93325 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
93326+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93327+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
93328+#endif
93329 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
93330
93331 /*
93332@@ -2160,7 +2184,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
93333 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
93334 0, sock_i_ino(sp),
93335 atomic_read(&sp->sk_refcnt), sp,
93336- atomic_read(&sp->sk_drops), len);
93337+ atomic_read_unchecked(&sp->sk_drops), len);
93338 }
93339
93340 int udp4_seq_show(struct seq_file *seq, void *v)
93341diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
93342index 9a459be..086b866 100644
93343--- a/net/ipv4/xfrm4_policy.c
93344+++ b/net/ipv4/xfrm4_policy.c
93345@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
93346
93347 static int __net_init xfrm4_net_init(struct net *net)
93348 {
93349- struct ctl_table *table;
93350+ ctl_table_no_const *table = NULL;
93351 struct ctl_table_header *hdr;
93352
93353- table = xfrm4_policy_table;
93354 if (!net_eq(net, &init_net)) {
93355- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
93356+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
93357 if (!table)
93358 goto err_alloc;
93359
93360 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
93361- }
93362-
93363- hdr = register_net_sysctl(net, "net/ipv4", table);
93364+ hdr = register_net_sysctl(net, "net/ipv4", table);
93365+ } else
93366+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
93367 if (!hdr)
93368 goto err_reg;
93369
93370@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
93371 return 0;
93372
93373 err_reg:
93374- if (!net_eq(net, &init_net))
93375- kfree(table);
93376+ kfree(table);
93377 err_alloc:
93378 return -ENOMEM;
93379 }
93380diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
93381index fb8c94c..80a31d8 100644
93382--- a/net/ipv6/addrconf.c
93383+++ b/net/ipv6/addrconf.c
93384@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
93385 idx = 0;
93386 head = &net->dev_index_head[h];
93387 rcu_read_lock();
93388- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
93389+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
93390 net->dev_base_seq;
93391 hlist_for_each_entry_rcu(dev, head, index_hlist) {
93392 if (idx < s_idx)
93393@@ -1124,12 +1124,10 @@ retry:
93394 if (ifp->flags & IFA_F_OPTIMISTIC)
93395 addr_flags |= IFA_F_OPTIMISTIC;
93396
93397- ift = !max_addresses ||
93398- ipv6_count_addresses(idev) < max_addresses ?
93399- ipv6_add_addr(idev, &addr, tmp_plen,
93400- ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
93401- addr_flags) : NULL;
93402- if (IS_ERR_OR_NULL(ift)) {
93403+ ift = ipv6_add_addr(idev, &addr, tmp_plen,
93404+ ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
93405+ addr_flags);
93406+ if (IS_ERR(ift)) {
93407 in6_ifa_put(ifp);
93408 in6_dev_put(idev);
93409 pr_info("%s: retry temporary address regeneration\n", __func__);
93410@@ -2380,7 +2378,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
93411 p.iph.ihl = 5;
93412 p.iph.protocol = IPPROTO_IPV6;
93413 p.iph.ttl = 64;
93414- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
93415+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
93416
93417 if (ops->ndo_do_ioctl) {
93418 mm_segment_t oldfs = get_fs();
93419@@ -4002,7 +4000,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
93420 s_ip_idx = ip_idx = cb->args[2];
93421
93422 rcu_read_lock();
93423- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
93424+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
93425 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
93426 idx = 0;
93427 head = &net->dev_index_head[h];
93428@@ -4587,7 +4585,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
93429 dst_free(&ifp->rt->dst);
93430 break;
93431 }
93432- atomic_inc(&net->ipv6.dev_addr_genid);
93433+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
93434 }
93435
93436 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
93437@@ -4607,7 +4605,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
93438 int *valp = ctl->data;
93439 int val = *valp;
93440 loff_t pos = *ppos;
93441- ctl_table lctl;
93442+ ctl_table_no_const lctl;
93443 int ret;
93444
93445 /*
93446@@ -4689,7 +4687,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
93447 int *valp = ctl->data;
93448 int val = *valp;
93449 loff_t pos = *ppos;
93450- ctl_table lctl;
93451+ ctl_table_no_const lctl;
93452 int ret;
93453
93454 /*
93455diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
93456index 40ffd72..aeac0dc 100644
93457--- a/net/ipv6/esp6.c
93458+++ b/net/ipv6/esp6.c
93459@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
93460 net_adj = 0;
93461
93462 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
93463- net_adj) & ~(align - 1)) + (net_adj - 2);
93464+ net_adj) & ~(align - 1)) + net_adj - 2;
93465 }
93466
93467 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
93468diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
93469index b4ff0a4..db9b764 100644
93470--- a/net/ipv6/icmp.c
93471+++ b/net/ipv6/icmp.c
93472@@ -980,7 +980,7 @@ ctl_table ipv6_icmp_table_template[] = {
93473
93474 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
93475 {
93476- struct ctl_table *table;
93477+ ctl_table_no_const *table;
93478
93479 table = kmemdup(ipv6_icmp_table_template,
93480 sizeof(ipv6_icmp_table_template),
93481diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
93482index ecd6073..58162ae 100644
93483--- a/net/ipv6/ip6_gre.c
93484+++ b/net/ipv6/ip6_gre.c
93485@@ -74,7 +74,7 @@ struct ip6gre_net {
93486 struct net_device *fb_tunnel_dev;
93487 };
93488
93489-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
93490+static struct rtnl_link_ops ip6gre_link_ops;
93491 static int ip6gre_tunnel_init(struct net_device *dev);
93492 static void ip6gre_tunnel_setup(struct net_device *dev);
93493 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
93494@@ -1283,7 +1283,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
93495 }
93496
93497
93498-static struct inet6_protocol ip6gre_protocol __read_mostly = {
93499+static struct inet6_protocol ip6gre_protocol = {
93500 .handler = ip6gre_rcv,
93501 .err_handler = ip6gre_err,
93502 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
93503@@ -1617,7 +1617,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
93504 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
93505 };
93506
93507-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
93508+static struct rtnl_link_ops ip6gre_link_ops = {
93509 .kind = "ip6gre",
93510 .maxtype = IFLA_GRE_MAX,
93511 .policy = ip6gre_policy,
93512@@ -1630,7 +1630,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
93513 .fill_info = ip6gre_fill_info,
93514 };
93515
93516-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
93517+static struct rtnl_link_ops ip6gre_tap_ops = {
93518 .kind = "ip6gretap",
93519 .maxtype = IFLA_GRE_MAX,
93520 .policy = ip6gre_policy,
93521diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
93522index 1e55866..b398dab 100644
93523--- a/net/ipv6/ip6_tunnel.c
93524+++ b/net/ipv6/ip6_tunnel.c
93525@@ -88,7 +88,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
93526
93527 static int ip6_tnl_dev_init(struct net_device *dev);
93528 static void ip6_tnl_dev_setup(struct net_device *dev);
93529-static struct rtnl_link_ops ip6_link_ops __read_mostly;
93530+static struct rtnl_link_ops ip6_link_ops;
93531
93532 static int ip6_tnl_net_id __read_mostly;
93533 struct ip6_tnl_net {
93534@@ -1672,7 +1672,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
93535 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
93536 };
93537
93538-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
93539+static struct rtnl_link_ops ip6_link_ops = {
93540 .kind = "ip6tnl",
93541 .maxtype = IFLA_IPTUN_MAX,
93542 .policy = ip6_tnl_policy,
93543diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
93544index d1e2e8e..51c19ae 100644
93545--- a/net/ipv6/ipv6_sockglue.c
93546+++ b/net/ipv6/ipv6_sockglue.c
93547@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
93548 if (sk->sk_type != SOCK_STREAM)
93549 return -ENOPROTOOPT;
93550
93551- msg.msg_control = optval;
93552+ msg.msg_control = (void __force_kernel *)optval;
93553 msg.msg_controllen = len;
93554 msg.msg_flags = flags;
93555
93556diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
93557index 44400c2..8e11f52 100644
93558--- a/net/ipv6/netfilter/ip6_tables.c
93559+++ b/net/ipv6/netfilter/ip6_tables.c
93560@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
93561 #endif
93562
93563 static int get_info(struct net *net, void __user *user,
93564- const int *len, int compat)
93565+ int len, int compat)
93566 {
93567 char name[XT_TABLE_MAXNAMELEN];
93568 struct xt_table *t;
93569 int ret;
93570
93571- if (*len != sizeof(struct ip6t_getinfo)) {
93572- duprintf("length %u != %zu\n", *len,
93573+ if (len != sizeof(struct ip6t_getinfo)) {
93574+ duprintf("length %u != %zu\n", len,
93575 sizeof(struct ip6t_getinfo));
93576 return -EINVAL;
93577 }
93578@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
93579 info.size = private->size;
93580 strcpy(info.name, name);
93581
93582- if (copy_to_user(user, &info, *len) != 0)
93583+ if (copy_to_user(user, &info, len) != 0)
93584 ret = -EFAULT;
93585 else
93586 ret = 0;
93587@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
93588
93589 switch (cmd) {
93590 case IP6T_SO_GET_INFO:
93591- ret = get_info(sock_net(sk), user, len, 1);
93592+ ret = get_info(sock_net(sk), user, *len, 1);
93593 break;
93594 case IP6T_SO_GET_ENTRIES:
93595 ret = compat_get_entries(sock_net(sk), user, len);
93596@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
93597
93598 switch (cmd) {
93599 case IP6T_SO_GET_INFO:
93600- ret = get_info(sock_net(sk), user, len, 0);
93601+ ret = get_info(sock_net(sk), user, *len, 0);
93602 break;
93603
93604 case IP6T_SO_GET_ENTRIES:
93605diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
93606index dffdc1a..ccc6678 100644
93607--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
93608+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
93609@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
93610
93611 static int nf_ct_frag6_sysctl_register(struct net *net)
93612 {
93613- struct ctl_table *table;
93614+ ctl_table_no_const *table = NULL;
93615 struct ctl_table_header *hdr;
93616
93617- table = nf_ct_frag6_sysctl_table;
93618 if (!net_eq(net, &init_net)) {
93619- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
93620+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
93621 GFP_KERNEL);
93622 if (table == NULL)
93623 goto err_alloc;
93624@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
93625 table[0].data = &net->nf_frag.frags.timeout;
93626 table[1].data = &net->nf_frag.frags.low_thresh;
93627 table[2].data = &net->nf_frag.frags.high_thresh;
93628- }
93629-
93630- hdr = register_net_sysctl(net, "net/netfilter", table);
93631+ hdr = register_net_sysctl(net, "net/netfilter", table);
93632+ } else
93633+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
93634 if (hdr == NULL)
93635 goto err_reg;
93636
93637@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
93638 return 0;
93639
93640 err_reg:
93641- if (!net_eq(net, &init_net))
93642- kfree(table);
93643+ kfree(table);
93644 err_alloc:
93645 return -ENOMEM;
93646 }
93647diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
93648index eedff8c..7d7e24a 100644
93649--- a/net/ipv6/raw.c
93650+++ b/net/ipv6/raw.c
93651@@ -108,7 +108,7 @@ found:
93652 */
93653 static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
93654 {
93655- struct icmp6hdr *_hdr;
93656+ struct icmp6hdr _hdr;
93657 const struct icmp6hdr *hdr;
93658
93659 hdr = skb_header_pointer(skb, skb_transport_offset(skb),
93660@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
93661 {
93662 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
93663 skb_checksum_complete(skb)) {
93664- atomic_inc(&sk->sk_drops);
93665+ atomic_inc_unchecked(&sk->sk_drops);
93666 kfree_skb(skb);
93667 return NET_RX_DROP;
93668 }
93669@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
93670 struct raw6_sock *rp = raw6_sk(sk);
93671
93672 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
93673- atomic_inc(&sk->sk_drops);
93674+ atomic_inc_unchecked(&sk->sk_drops);
93675 kfree_skb(skb);
93676 return NET_RX_DROP;
93677 }
93678@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
93679
93680 if (inet->hdrincl) {
93681 if (skb_checksum_complete(skb)) {
93682- atomic_inc(&sk->sk_drops);
93683+ atomic_inc_unchecked(&sk->sk_drops);
93684 kfree_skb(skb);
93685 return NET_RX_DROP;
93686 }
93687@@ -602,7 +602,7 @@ out:
93688 return err;
93689 }
93690
93691-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
93692+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
93693 struct flowi6 *fl6, struct dst_entry **dstp,
93694 unsigned int flags)
93695 {
93696@@ -914,12 +914,15 @@ do_confirm:
93697 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
93698 char __user *optval, int optlen)
93699 {
93700+ struct icmp6_filter filter;
93701+
93702 switch (optname) {
93703 case ICMPV6_FILTER:
93704 if (optlen > sizeof(struct icmp6_filter))
93705 optlen = sizeof(struct icmp6_filter);
93706- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
93707+ if (copy_from_user(&filter, optval, optlen))
93708 return -EFAULT;
93709+ raw6_sk(sk)->filter = filter;
93710 return 0;
93711 default:
93712 return -ENOPROTOOPT;
93713@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
93714 char __user *optval, int __user *optlen)
93715 {
93716 int len;
93717+ struct icmp6_filter filter;
93718
93719 switch (optname) {
93720 case ICMPV6_FILTER:
93721@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
93722 len = sizeof(struct icmp6_filter);
93723 if (put_user(len, optlen))
93724 return -EFAULT;
93725- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
93726+ filter = raw6_sk(sk)->filter;
93727+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
93728 return -EFAULT;
93729 return 0;
93730 default:
93731@@ -1251,7 +1256,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
93732 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
93733 0,
93734 sock_i_ino(sp),
93735- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
93736+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
93737 }
93738
93739 static int raw6_seq_show(struct seq_file *seq, void *v)
93740diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
93741index 790d9f4..68ae078 100644
93742--- a/net/ipv6/reassembly.c
93743+++ b/net/ipv6/reassembly.c
93744@@ -621,12 +621,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
93745
93746 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93747 {
93748- struct ctl_table *table;
93749+ ctl_table_no_const *table = NULL;
93750 struct ctl_table_header *hdr;
93751
93752- table = ip6_frags_ns_ctl_table;
93753 if (!net_eq(net, &init_net)) {
93754- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
93755+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
93756 if (table == NULL)
93757 goto err_alloc;
93758
93759@@ -637,9 +636,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93760 /* Don't export sysctls to unprivileged users */
93761 if (net->user_ns != &init_user_ns)
93762 table[0].procname = NULL;
93763- }
93764+ hdr = register_net_sysctl(net, "net/ipv6", table);
93765+ } else
93766+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
93767
93768- hdr = register_net_sysctl(net, "net/ipv6", table);
93769 if (hdr == NULL)
93770 goto err_reg;
93771
93772@@ -647,8 +647,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
93773 return 0;
93774
93775 err_reg:
93776- if (!net_eq(net, &init_net))
93777- kfree(table);
93778+ kfree(table);
93779 err_alloc:
93780 return -ENOMEM;
93781 }
93782diff --git a/net/ipv6/route.c b/net/ipv6/route.c
93783index bacce6c..9d1741a 100644
93784--- a/net/ipv6/route.c
93785+++ b/net/ipv6/route.c
93786@@ -2903,7 +2903,7 @@ ctl_table ipv6_route_table_template[] = {
93787
93788 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
93789 {
93790- struct ctl_table *table;
93791+ ctl_table_no_const *table;
93792
93793 table = kmemdup(ipv6_route_table_template,
93794 sizeof(ipv6_route_table_template),
93795diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
93796index 60df36d..f3ab7c8 100644
93797--- a/net/ipv6/sit.c
93798+++ b/net/ipv6/sit.c
93799@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
93800 static void ipip6_dev_free(struct net_device *dev);
93801 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
93802 __be32 *v4dst);
93803-static struct rtnl_link_ops sit_link_ops __read_mostly;
93804+static struct rtnl_link_ops sit_link_ops;
93805
93806 static int sit_net_id __read_mostly;
93807 struct sit_net {
93808@@ -1453,7 +1453,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
93809 #endif
93810 };
93811
93812-static struct rtnl_link_ops sit_link_ops __read_mostly = {
93813+static struct rtnl_link_ops sit_link_ops = {
93814 .kind = "sit",
93815 .maxtype = IFLA_IPTUN_MAX,
93816 .policy = ipip6_policy,
93817diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
93818index e85c48b..b8268d3 100644
93819--- a/net/ipv6/sysctl_net_ipv6.c
93820+++ b/net/ipv6/sysctl_net_ipv6.c
93821@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
93822
93823 static int __net_init ipv6_sysctl_net_init(struct net *net)
93824 {
93825- struct ctl_table *ipv6_table;
93826+ ctl_table_no_const *ipv6_table;
93827 struct ctl_table *ipv6_route_table;
93828 struct ctl_table *ipv6_icmp_table;
93829 int err;
93830diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
93831index 0a17ed9..2526cc3 100644
93832--- a/net/ipv6/tcp_ipv6.c
93833+++ b/net/ipv6/tcp_ipv6.c
93834@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93835 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
93836 }
93837
93838+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93839+extern int grsec_enable_blackhole;
93840+#endif
93841+
93842 static void tcp_v6_hash(struct sock *sk)
93843 {
93844 if (sk->sk_state != TCP_CLOSE) {
93845@@ -1398,6 +1402,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
93846 return 0;
93847
93848 reset:
93849+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93850+ if (!grsec_enable_blackhole)
93851+#endif
93852 tcp_v6_send_reset(sk, skb);
93853 discard:
93854 if (opt_skb)
93855@@ -1480,12 +1487,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
93856 TCP_SKB_CB(skb)->sacked = 0;
93857
93858 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
93859- if (!sk)
93860+ if (!sk) {
93861+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93862+ ret = 1;
93863+#endif
93864 goto no_tcp_socket;
93865+ }
93866
93867 process:
93868- if (sk->sk_state == TCP_TIME_WAIT)
93869+ if (sk->sk_state == TCP_TIME_WAIT) {
93870+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93871+ ret = 2;
93872+#endif
93873 goto do_time_wait;
93874+ }
93875
93876 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
93877 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
93878@@ -1536,6 +1551,10 @@ csum_error:
93879 bad_packet:
93880 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
93881 } else {
93882+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93883+ if (!grsec_enable_blackhole || (ret == 1 &&
93884+ (skb->dev->flags & IFF_LOOPBACK)))
93885+#endif
93886 tcp_v6_send_reset(NULL, skb);
93887 }
93888
93889diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
93890index e7b28f9..d09c290 100644
93891--- a/net/ipv6/udp.c
93892+++ b/net/ipv6/udp.c
93893@@ -52,6 +52,10 @@
93894 #include <trace/events/skb.h>
93895 #include "udp_impl.h"
93896
93897+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93898+extern int grsec_enable_blackhole;
93899+#endif
93900+
93901 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
93902 {
93903 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
93904@@ -419,7 +423,7 @@ try_again:
93905 if (unlikely(err)) {
93906 trace_kfree_skb(skb, udpv6_recvmsg);
93907 if (!peeked) {
93908- atomic_inc(&sk->sk_drops);
93909+ atomic_inc_unchecked(&sk->sk_drops);
93910 if (is_udp4)
93911 UDP_INC_STATS_USER(sock_net(sk),
93912 UDP_MIB_INERRORS,
93913@@ -665,7 +669,7 @@ csum_error:
93914 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
93915 drop:
93916 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
93917- atomic_inc(&sk->sk_drops);
93918+ atomic_inc_unchecked(&sk->sk_drops);
93919 kfree_skb(skb);
93920 return -1;
93921 }
93922@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
93923 if (likely(skb1 == NULL))
93924 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
93925 if (!skb1) {
93926- atomic_inc(&sk->sk_drops);
93927+ atomic_inc_unchecked(&sk->sk_drops);
93928 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
93929 IS_UDPLITE(sk));
93930 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
93931@@ -860,6 +864,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
93932 goto csum_error;
93933
93934 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
93935+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
93936+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
93937+#endif
93938 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
93939
93940 kfree_skb(skb);
93941@@ -1392,7 +1399,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
93942 0,
93943 sock_i_ino(sp),
93944 atomic_read(&sp->sk_refcnt), sp,
93945- atomic_read(&sp->sk_drops));
93946+ atomic_read_unchecked(&sp->sk_drops));
93947 }
93948
93949 int udp6_seq_show(struct seq_file *seq, void *v)
93950diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
93951index 23ed03d..465a71d 100644
93952--- a/net/ipv6/xfrm6_policy.c
93953+++ b/net/ipv6/xfrm6_policy.c
93954@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
93955
93956 static int __net_init xfrm6_net_init(struct net *net)
93957 {
93958- struct ctl_table *table;
93959+ ctl_table_no_const *table = NULL;
93960 struct ctl_table_header *hdr;
93961
93962- table = xfrm6_policy_table;
93963 if (!net_eq(net, &init_net)) {
93964- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
93965+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
93966 if (!table)
93967 goto err_alloc;
93968
93969 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
93970- }
93971+ hdr = register_net_sysctl(net, "net/ipv6", table);
93972+ } else
93973+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
93974
93975- hdr = register_net_sysctl(net, "net/ipv6", table);
93976 if (!hdr)
93977 goto err_reg;
93978
93979@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
93980 return 0;
93981
93982 err_reg:
93983- if (!net_eq(net, &init_net))
93984- kfree(table);
93985+ kfree(table);
93986 err_alloc:
93987 return -ENOMEM;
93988 }
93989diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
93990index 41ac7938..75e3bb1 100644
93991--- a/net/irda/ircomm/ircomm_tty.c
93992+++ b/net/irda/ircomm/ircomm_tty.c
93993@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
93994 add_wait_queue(&port->open_wait, &wait);
93995
93996 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
93997- __FILE__, __LINE__, tty->driver->name, port->count);
93998+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
93999
94000 spin_lock_irqsave(&port->lock, flags);
94001 if (!tty_hung_up_p(filp))
94002- port->count--;
94003+ atomic_dec(&port->count);
94004 port->blocked_open++;
94005 spin_unlock_irqrestore(&port->lock, flags);
94006
94007@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
94008 }
94009
94010 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
94011- __FILE__, __LINE__, tty->driver->name, port->count);
94012+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
94013
94014 schedule();
94015 }
94016@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
94017
94018 spin_lock_irqsave(&port->lock, flags);
94019 if (!tty_hung_up_p(filp))
94020- port->count++;
94021+ atomic_inc(&port->count);
94022 port->blocked_open--;
94023 spin_unlock_irqrestore(&port->lock, flags);
94024
94025 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
94026- __FILE__, __LINE__, tty->driver->name, port->count);
94027+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
94028
94029 if (!retval)
94030 port->flags |= ASYNC_NORMAL_ACTIVE;
94031@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
94032
94033 /* ++ is not atomic, so this should be protected - Jean II */
94034 spin_lock_irqsave(&self->port.lock, flags);
94035- self->port.count++;
94036+ atomic_inc(&self->port.count);
94037 spin_unlock_irqrestore(&self->port.lock, flags);
94038 tty_port_tty_set(&self->port, tty);
94039
94040 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
94041- self->line, self->port.count);
94042+ self->line, atomic_read(&self->port.count));
94043
94044 /* Not really used by us, but lets do it anyway */
94045 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
94046@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
94047 tty_kref_put(port->tty);
94048 }
94049 port->tty = NULL;
94050- port->count = 0;
94051+ atomic_set(&port->count, 0);
94052 spin_unlock_irqrestore(&port->lock, flags);
94053
94054 wake_up_interruptible(&port->open_wait);
94055@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
94056 seq_putc(m, '\n');
94057
94058 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
94059- seq_printf(m, "Open count: %d\n", self->port.count);
94060+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
94061 seq_printf(m, "Max data size: %d\n", self->max_data_size);
94062 seq_printf(m, "Max header size: %d\n", self->max_header_size);
94063
94064diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
94065index ae69165..c8b82d8 100644
94066--- a/net/iucv/af_iucv.c
94067+++ b/net/iucv/af_iucv.c
94068@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
94069
94070 write_lock_bh(&iucv_sk_list.lock);
94071
94072- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
94073+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
94074 while (__iucv_get_sock_by_name(name)) {
94075 sprintf(name, "%08x",
94076- atomic_inc_return(&iucv_sk_list.autobind_name));
94077+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
94078 }
94079
94080 write_unlock_bh(&iucv_sk_list.lock);
94081diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
94082index 4fe76ff..426a904 100644
94083--- a/net/iucv/iucv.c
94084+++ b/net/iucv/iucv.c
94085@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
94086 return NOTIFY_OK;
94087 }
94088
94089-static struct notifier_block __refdata iucv_cpu_notifier = {
94090+static struct notifier_block iucv_cpu_notifier = {
94091 .notifier_call = iucv_cpu_notify,
94092 };
94093
94094diff --git a/net/key/af_key.c b/net/key/af_key.c
94095index ab8bd2c..cd2d641 100644
94096--- a/net/key/af_key.c
94097+++ b/net/key/af_key.c
94098@@ -3048,10 +3048,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
94099 static u32 get_acqseq(void)
94100 {
94101 u32 res;
94102- static atomic_t acqseq;
94103+ static atomic_unchecked_t acqseq;
94104
94105 do {
94106- res = atomic_inc_return(&acqseq);
94107+ res = atomic_inc_return_unchecked(&acqseq);
94108 } while (!res);
94109 return res;
94110 }
94111diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
94112index ae36f8e..09d42ac 100644
94113--- a/net/mac80211/cfg.c
94114+++ b/net/mac80211/cfg.c
94115@@ -806,7 +806,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
94116 ret = ieee80211_vif_use_channel(sdata, chandef,
94117 IEEE80211_CHANCTX_EXCLUSIVE);
94118 }
94119- } else if (local->open_count == local->monitors) {
94120+ } else if (local_read(&local->open_count) == local->monitors) {
94121 local->_oper_chandef = *chandef;
94122 ieee80211_hw_config(local, 0);
94123 }
94124@@ -2922,7 +2922,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
94125 else
94126 local->probe_req_reg--;
94127
94128- if (!local->open_count)
94129+ if (!local_read(&local->open_count))
94130 break;
94131
94132 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
94133@@ -3385,8 +3385,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
94134 if (chanctx_conf) {
94135 *chandef = chanctx_conf->def;
94136 ret = 0;
94137- } else if (local->open_count > 0 &&
94138- local->open_count == local->monitors &&
94139+ } else if (local_read(&local->open_count) > 0 &&
94140+ local_read(&local->open_count) == local->monitors &&
94141 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
94142 if (local->use_chanctx)
94143 *chandef = local->monitor_chandef;
94144diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
94145index 9ca8e32..48e4a9b 100644
94146--- a/net/mac80211/ieee80211_i.h
94147+++ b/net/mac80211/ieee80211_i.h
94148@@ -28,6 +28,7 @@
94149 #include <net/ieee80211_radiotap.h>
94150 #include <net/cfg80211.h>
94151 #include <net/mac80211.h>
94152+#include <asm/local.h>
94153 #include "key.h"
94154 #include "sta_info.h"
94155 #include "debug.h"
94156@@ -891,7 +892,7 @@ struct ieee80211_local {
94157 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
94158 spinlock_t queue_stop_reason_lock;
94159
94160- int open_count;
94161+ local_t open_count;
94162 int monitors, cooked_mntrs;
94163 /* number of interfaces with corresponding FIF_ flags */
94164 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
94165diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
94166index 514e90f..56f22bf 100644
94167--- a/net/mac80211/iface.c
94168+++ b/net/mac80211/iface.c
94169@@ -502,7 +502,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94170 break;
94171 }
94172
94173- if (local->open_count == 0) {
94174+ if (local_read(&local->open_count) == 0) {
94175 res = drv_start(local);
94176 if (res)
94177 goto err_del_bss;
94178@@ -545,7 +545,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94179 break;
94180 }
94181
94182- if (local->monitors == 0 && local->open_count == 0) {
94183+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
94184 res = ieee80211_add_virtual_monitor(local);
94185 if (res)
94186 goto err_stop;
94187@@ -653,7 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94188 atomic_inc(&local->iff_promiscs);
94189
94190 if (coming_up)
94191- local->open_count++;
94192+ local_inc(&local->open_count);
94193
94194 if (hw_reconf_flags)
94195 ieee80211_hw_config(local, hw_reconf_flags);
94196@@ -691,7 +691,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
94197 err_del_interface:
94198 drv_remove_interface(local, sdata);
94199 err_stop:
94200- if (!local->open_count)
94201+ if (!local_read(&local->open_count))
94202 drv_stop(local);
94203 err_del_bss:
94204 sdata->bss = NULL;
94205@@ -828,7 +828,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94206 }
94207
94208 if (going_down)
94209- local->open_count--;
94210+ local_dec(&local->open_count);
94211
94212 switch (sdata->vif.type) {
94213 case NL80211_IFTYPE_AP_VLAN:
94214@@ -895,7 +895,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94215 }
94216 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
94217
94218- if (local->open_count == 0)
94219+ if (local_read(&local->open_count) == 0)
94220 ieee80211_clear_tx_pending(local);
94221
94222 /*
94223@@ -931,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94224
94225 ieee80211_recalc_ps(local, -1);
94226
94227- if (local->open_count == 0) {
94228+ if (local_read(&local->open_count) == 0) {
94229 ieee80211_stop_device(local);
94230
94231 /* no reconfiguring after stop! */
94232@@ -942,7 +942,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
94233 ieee80211_configure_filter(local);
94234 ieee80211_hw_config(local, hw_reconf_flags);
94235
94236- if (local->monitors == local->open_count)
94237+ if (local->monitors == local_read(&local->open_count))
94238 ieee80211_add_virtual_monitor(local);
94239 }
94240
94241diff --git a/net/mac80211/main.c b/net/mac80211/main.c
94242index 8a7bfc4..4407cd0 100644
94243--- a/net/mac80211/main.c
94244+++ b/net/mac80211/main.c
94245@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
94246 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
94247 IEEE80211_CONF_CHANGE_POWER);
94248
94249- if (changed && local->open_count) {
94250+ if (changed && local_read(&local->open_count)) {
94251 ret = drv_config(local, changed);
94252 /*
94253 * Goal:
94254diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
94255index 3401262..d5cd68d 100644
94256--- a/net/mac80211/pm.c
94257+++ b/net/mac80211/pm.c
94258@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94259 struct ieee80211_sub_if_data *sdata;
94260 struct sta_info *sta;
94261
94262- if (!local->open_count)
94263+ if (!local_read(&local->open_count))
94264 goto suspend;
94265
94266 ieee80211_scan_cancel(local);
94267@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94268 cancel_work_sync(&local->dynamic_ps_enable_work);
94269 del_timer_sync(&local->dynamic_ps_timer);
94270
94271- local->wowlan = wowlan && local->open_count;
94272+ local->wowlan = wowlan && local_read(&local->open_count);
94273 if (local->wowlan) {
94274 int err = drv_suspend(local, wowlan);
94275 if (err < 0) {
94276@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
94277 WARN_ON(!list_empty(&local->chanctx_list));
94278
94279 /* stop hardware - this must stop RX */
94280- if (local->open_count)
94281+ if (local_read(&local->open_count))
94282 ieee80211_stop_device(local);
94283
94284 suspend:
94285diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
94286index a02bef3..f2f38dd 100644
94287--- a/net/mac80211/rate.c
94288+++ b/net/mac80211/rate.c
94289@@ -712,7 +712,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
94290
94291 ASSERT_RTNL();
94292
94293- if (local->open_count)
94294+ if (local_read(&local->open_count))
94295 return -EBUSY;
94296
94297 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
94298diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
94299index c97a065..ff61928 100644
94300--- a/net/mac80211/rc80211_pid_debugfs.c
94301+++ b/net/mac80211/rc80211_pid_debugfs.c
94302@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
94303
94304 spin_unlock_irqrestore(&events->lock, status);
94305
94306- if (copy_to_user(buf, pb, p))
94307+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
94308 return -EFAULT;
94309
94310 return p;
94311diff --git a/net/mac80211/util.c b/net/mac80211/util.c
94312index 72e6292..e6319eb 100644
94313--- a/net/mac80211/util.c
94314+++ b/net/mac80211/util.c
94315@@ -1472,7 +1472,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
94316 }
94317 #endif
94318 /* everything else happens only if HW was up & running */
94319- if (!local->open_count)
94320+ if (!local_read(&local->open_count))
94321 goto wake_up;
94322
94323 /*
94324@@ -1696,7 +1696,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
94325 local->in_reconfig = false;
94326 barrier();
94327
94328- if (local->monitors == local->open_count && local->monitors > 0)
94329+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
94330 ieee80211_add_virtual_monitor(local);
94331
94332 /*
94333diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
94334index 56d22ca..87c778f 100644
94335--- a/net/netfilter/Kconfig
94336+++ b/net/netfilter/Kconfig
94337@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
94338
94339 To compile it as a module, choose M here. If unsure, say N.
94340
94341+config NETFILTER_XT_MATCH_GRADM
94342+ tristate '"gradm" match support'
94343+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
94344+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
94345+ ---help---
94346+ The gradm match allows to match on grsecurity RBAC being enabled.
94347+ It is useful when iptables rules are applied early on bootup to
94348+ prevent connections to the machine (except from a trusted host)
94349+ while the RBAC system is disabled.
94350+
94351 config NETFILTER_XT_MATCH_HASHLIMIT
94352 tristate '"hashlimit" match support'
94353 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
94354diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
94355index a1abf87..dbcb7ee 100644
94356--- a/net/netfilter/Makefile
94357+++ b/net/netfilter/Makefile
94358@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
94359 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
94360 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
94361 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
94362+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
94363 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
94364 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
94365 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
94366diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
94367index f771390..145b765 100644
94368--- a/net/netfilter/ipset/ip_set_core.c
94369+++ b/net/netfilter/ipset/ip_set_core.c
94370@@ -1820,7 +1820,7 @@ done:
94371 return ret;
94372 }
94373
94374-static struct nf_sockopt_ops so_set __read_mostly = {
94375+static struct nf_sockopt_ops so_set = {
94376 .pf = PF_INET,
94377 .get_optmin = SO_IP_SET,
94378 .get_optmax = SO_IP_SET + 1,
94379diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
94380index a083bda..da661c3 100644
94381--- a/net/netfilter/ipvs/ip_vs_conn.c
94382+++ b/net/netfilter/ipvs/ip_vs_conn.c
94383@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
94384 /* Increase the refcnt counter of the dest */
94385 ip_vs_dest_hold(dest);
94386
94387- conn_flags = atomic_read(&dest->conn_flags);
94388+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
94389 if (cp->protocol != IPPROTO_UDP)
94390 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
94391 flags = cp->flags;
94392@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
94393
94394 cp->control = NULL;
94395 atomic_set(&cp->n_control, 0);
94396- atomic_set(&cp->in_pkts, 0);
94397+ atomic_set_unchecked(&cp->in_pkts, 0);
94398
94399 cp->packet_xmit = NULL;
94400 cp->app = NULL;
94401@@ -1190,7 +1190,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
94402
94403 /* Don't drop the entry if its number of incoming packets is not
94404 located in [0, 8] */
94405- i = atomic_read(&cp->in_pkts);
94406+ i = atomic_read_unchecked(&cp->in_pkts);
94407 if (i > 8 || i < 0) return 0;
94408
94409 if (!todrop_rate[i]) return 0;
94410diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
94411index 23b8eb5..48a8959 100644
94412--- a/net/netfilter/ipvs/ip_vs_core.c
94413+++ b/net/netfilter/ipvs/ip_vs_core.c
94414@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
94415 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
94416 /* do not touch skb anymore */
94417
94418- atomic_inc(&cp->in_pkts);
94419+ atomic_inc_unchecked(&cp->in_pkts);
94420 ip_vs_conn_put(cp);
94421 return ret;
94422 }
94423@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
94424 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
94425 pkts = sysctl_sync_threshold(ipvs);
94426 else
94427- pkts = atomic_add_return(1, &cp->in_pkts);
94428+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94429
94430 if (ipvs->sync_state & IP_VS_STATE_MASTER)
94431 ip_vs_sync_conn(net, cp, pkts);
94432diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
94433index 9e6c2a0..28552e2 100644
94434--- a/net/netfilter/ipvs/ip_vs_ctl.c
94435+++ b/net/netfilter/ipvs/ip_vs_ctl.c
94436@@ -789,7 +789,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
94437 */
94438 ip_vs_rs_hash(ipvs, dest);
94439 }
94440- atomic_set(&dest->conn_flags, conn_flags);
94441+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
94442
94443 /* bind the service */
94444 if (!dest->svc) {
94445@@ -1657,7 +1657,7 @@ proc_do_sync_ports(ctl_table *table, int write,
94446 * align with netns init in ip_vs_control_net_init()
94447 */
94448
94449-static struct ctl_table vs_vars[] = {
94450+static ctl_table_no_const vs_vars[] __read_only = {
94451 {
94452 .procname = "amemthresh",
94453 .maxlen = sizeof(int),
94454@@ -2060,7 +2060,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
94455 " %-7s %-6d %-10d %-10d\n",
94456 &dest->addr.in6,
94457 ntohs(dest->port),
94458- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
94459+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
94460 atomic_read(&dest->weight),
94461 atomic_read(&dest->activeconns),
94462 atomic_read(&dest->inactconns));
94463@@ -2071,7 +2071,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
94464 "%-7s %-6d %-10d %-10d\n",
94465 ntohl(dest->addr.ip),
94466 ntohs(dest->port),
94467- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
94468+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
94469 atomic_read(&dest->weight),
94470 atomic_read(&dest->activeconns),
94471 atomic_read(&dest->inactconns));
94472@@ -2549,7 +2549,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
94473
94474 entry.addr = dest->addr.ip;
94475 entry.port = dest->port;
94476- entry.conn_flags = atomic_read(&dest->conn_flags);
94477+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
94478 entry.weight = atomic_read(&dest->weight);
94479 entry.u_threshold = dest->u_threshold;
94480 entry.l_threshold = dest->l_threshold;
94481@@ -3092,7 +3092,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
94482 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
94483 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
94484 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
94485- (atomic_read(&dest->conn_flags) &
94486+ (atomic_read_unchecked(&dest->conn_flags) &
94487 IP_VS_CONN_F_FWD_MASK)) ||
94488 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
94489 atomic_read(&dest->weight)) ||
94490@@ -3682,7 +3682,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
94491 {
94492 int idx;
94493 struct netns_ipvs *ipvs = net_ipvs(net);
94494- struct ctl_table *tbl;
94495+ ctl_table_no_const *tbl;
94496
94497 atomic_set(&ipvs->dropentry, 0);
94498 spin_lock_init(&ipvs->dropentry_lock);
94499diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
94500index 5ea26bd..c9bc65f 100644
94501--- a/net/netfilter/ipvs/ip_vs_lblc.c
94502+++ b/net/netfilter/ipvs/ip_vs_lblc.c
94503@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
94504 * IPVS LBLC sysctl table
94505 */
94506 #ifdef CONFIG_SYSCTL
94507-static ctl_table vs_vars_table[] = {
94508+static ctl_table_no_const vs_vars_table[] __read_only = {
94509 {
94510 .procname = "lblc_expiration",
94511 .data = NULL,
94512diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
94513index 50123c2..067c773 100644
94514--- a/net/netfilter/ipvs/ip_vs_lblcr.c
94515+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
94516@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
94517 * IPVS LBLCR sysctl table
94518 */
94519
94520-static ctl_table vs_vars_table[] = {
94521+static ctl_table_no_const vs_vars_table[] __read_only = {
94522 {
94523 .procname = "lblcr_expiration",
94524 .data = NULL,
94525diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
94526index f6046d9..4f10cfd 100644
94527--- a/net/netfilter/ipvs/ip_vs_sync.c
94528+++ b/net/netfilter/ipvs/ip_vs_sync.c
94529@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
94530 cp = cp->control;
94531 if (cp) {
94532 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
94533- pkts = atomic_add_return(1, &cp->in_pkts);
94534+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94535 else
94536 pkts = sysctl_sync_threshold(ipvs);
94537 ip_vs_sync_conn(net, cp->control, pkts);
94538@@ -758,7 +758,7 @@ control:
94539 if (!cp)
94540 return;
94541 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
94542- pkts = atomic_add_return(1, &cp->in_pkts);
94543+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
94544 else
94545 pkts = sysctl_sync_threshold(ipvs);
94546 goto sloop;
94547@@ -882,7 +882,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
94548
94549 if (opt)
94550 memcpy(&cp->in_seq, opt, sizeof(*opt));
94551- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
94552+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
94553 cp->state = state;
94554 cp->old_state = cp->state;
94555 /*
94556diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
94557index b75ff64..0c51bbe 100644
94558--- a/net/netfilter/ipvs/ip_vs_xmit.c
94559+++ b/net/netfilter/ipvs/ip_vs_xmit.c
94560@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
94561 else
94562 rc = NF_ACCEPT;
94563 /* do not touch skb anymore */
94564- atomic_inc(&cp->in_pkts);
94565+ atomic_inc_unchecked(&cp->in_pkts);
94566 goto out;
94567 }
94568
94569@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
94570 else
94571 rc = NF_ACCEPT;
94572 /* do not touch skb anymore */
94573- atomic_inc(&cp->in_pkts);
94574+ atomic_inc_unchecked(&cp->in_pkts);
94575 goto out;
94576 }
94577
94578diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
94579index 2d3030a..7ba1c0a 100644
94580--- a/net/netfilter/nf_conntrack_acct.c
94581+++ b/net/netfilter/nf_conntrack_acct.c
94582@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
94583 #ifdef CONFIG_SYSCTL
94584 static int nf_conntrack_acct_init_sysctl(struct net *net)
94585 {
94586- struct ctl_table *table;
94587+ ctl_table_no_const *table;
94588
94589 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
94590 GFP_KERNEL);
94591diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
94592index 0283bae..5febcb0 100644
94593--- a/net/netfilter/nf_conntrack_core.c
94594+++ b/net/netfilter/nf_conntrack_core.c
94595@@ -1614,6 +1614,10 @@ void nf_conntrack_init_end(void)
94596 #define DYING_NULLS_VAL ((1<<30)+1)
94597 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
94598
94599+#ifdef CONFIG_GRKERNSEC_HIDESYM
94600+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
94601+#endif
94602+
94603 int nf_conntrack_init_net(struct net *net)
94604 {
94605 int ret;
94606@@ -1628,7 +1632,11 @@ int nf_conntrack_init_net(struct net *net)
94607 goto err_stat;
94608 }
94609
94610+#ifdef CONFIG_GRKERNSEC_HIDESYM
94611+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
94612+#else
94613 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
94614+#endif
94615 if (!net->ct.slabname) {
94616 ret = -ENOMEM;
94617 goto err_slabname;
94618diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
94619index 1df1761..ce8b88a 100644
94620--- a/net/netfilter/nf_conntrack_ecache.c
94621+++ b/net/netfilter/nf_conntrack_ecache.c
94622@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
94623 #ifdef CONFIG_SYSCTL
94624 static int nf_conntrack_event_init_sysctl(struct net *net)
94625 {
94626- struct ctl_table *table;
94627+ ctl_table_no_const *table;
94628
94629 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
94630 GFP_KERNEL);
94631diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
94632index 974a2a4..52cc6ff 100644
94633--- a/net/netfilter/nf_conntrack_helper.c
94634+++ b/net/netfilter/nf_conntrack_helper.c
94635@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
94636
94637 static int nf_conntrack_helper_init_sysctl(struct net *net)
94638 {
94639- struct ctl_table *table;
94640+ ctl_table_no_const *table;
94641
94642 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
94643 GFP_KERNEL);
94644diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
94645index 0ab9636..cea3c6a 100644
94646--- a/net/netfilter/nf_conntrack_proto.c
94647+++ b/net/netfilter/nf_conntrack_proto.c
94648@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
94649
94650 static void
94651 nf_ct_unregister_sysctl(struct ctl_table_header **header,
94652- struct ctl_table **table,
94653+ ctl_table_no_const **table,
94654 unsigned int users)
94655 {
94656 if (users > 0)
94657diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
94658index a99b6c3..cb372f9 100644
94659--- a/net/netfilter/nf_conntrack_proto_dccp.c
94660+++ b/net/netfilter/nf_conntrack_proto_dccp.c
94661@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
94662 const char *msg;
94663 u_int8_t state;
94664
94665- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94666+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94667 BUG_ON(dh == NULL);
94668
94669 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
94670@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
94671 out_invalid:
94672 if (LOG_INVALID(net, IPPROTO_DCCP))
94673 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
94674- NULL, msg);
94675+ NULL, "%s", msg);
94676 return false;
94677 }
94678
94679@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
94680 u_int8_t type, old_state, new_state;
94681 enum ct_dccp_roles role;
94682
94683- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94684+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94685 BUG_ON(dh == NULL);
94686 type = dh->dccph_type;
94687
94688@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
94689 unsigned int cscov;
94690 const char *msg;
94691
94692- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
94693+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
94694 if (dh == NULL) {
94695 msg = "nf_ct_dccp: short packet ";
94696 goto out_invalid;
94697@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
94698
94699 out_invalid:
94700 if (LOG_INVALID(net, IPPROTO_DCCP))
94701- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
94702+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
94703 return -NF_ACCEPT;
94704 }
94705
94706diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
94707index 4d4d8f1..e0f9a32 100644
94708--- a/net/netfilter/nf_conntrack_proto_tcp.c
94709+++ b/net/netfilter/nf_conntrack_proto_tcp.c
94710@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
94711 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
94712 __u32 seq, ack, sack, end, win, swin;
94713 s16 receiver_offset;
94714- bool res;
94715+ bool res, in_recv_win;
94716
94717 /*
94718 * Get the required data from the packet.
94719@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
94720 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
94721 receiver->td_scale);
94722
94723+ /* Is the ending sequence in the receive window (if available)? */
94724+ in_recv_win = !receiver->td_maxwin ||
94725+ after(end, sender->td_end - receiver->td_maxwin - 1);
94726+
94727 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
94728 before(seq, sender->td_maxend + 1),
94729- after(end, sender->td_end - receiver->td_maxwin - 1),
94730+ (in_recv_win ? 1 : 0),
94731 before(sack, receiver->td_end + 1),
94732 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
94733
94734 if (before(seq, sender->td_maxend + 1) &&
94735- after(end, sender->td_end - receiver->td_maxwin - 1) &&
94736+ in_recv_win &&
94737 before(sack, receiver->td_end + 1) &&
94738 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
94739 /*
94740@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
94741 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
94742 "nf_ct_tcp: %s ",
94743 before(seq, sender->td_maxend + 1) ?
94744- after(end, sender->td_end - receiver->td_maxwin - 1) ?
94745+ in_recv_win ?
94746 before(sack, receiver->td_end + 1) ?
94747 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
94748 : "ACK is under the lower bound (possible overly delayed ACK)"
94749diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
94750index bd700b4..4a3dc61 100644
94751--- a/net/netfilter/nf_conntrack_standalone.c
94752+++ b/net/netfilter/nf_conntrack_standalone.c
94753@@ -471,7 +471,7 @@ static ctl_table nf_ct_netfilter_table[] = {
94754
94755 static int nf_conntrack_standalone_init_sysctl(struct net *net)
94756 {
94757- struct ctl_table *table;
94758+ ctl_table_no_const *table;
94759
94760 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
94761 GFP_KERNEL);
94762diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
94763index 902fb0a..87f7fdb 100644
94764--- a/net/netfilter/nf_conntrack_timestamp.c
94765+++ b/net/netfilter/nf_conntrack_timestamp.c
94766@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
94767 #ifdef CONFIG_SYSCTL
94768 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
94769 {
94770- struct ctl_table *table;
94771+ ctl_table_no_const *table;
94772
94773 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
94774 GFP_KERNEL);
94775diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
94776index 3b18dd1..f79e0ca 100644
94777--- a/net/netfilter/nf_log.c
94778+++ b/net/netfilter/nf_log.c
94779@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
94780
94781 #ifdef CONFIG_SYSCTL
94782 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
94783-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
94784+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
94785
94786 static int nf_log_proc_dostring(ctl_table *table, int write,
94787 void __user *buffer, size_t *lenp, loff_t *ppos)
94788@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
94789 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
94790 mutex_unlock(&nf_log_mutex);
94791 } else {
94792+ ctl_table_no_const nf_log_table = *table;
94793+
94794 mutex_lock(&nf_log_mutex);
94795 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
94796 lockdep_is_held(&nf_log_mutex));
94797 if (!logger)
94798- table->data = "NONE";
94799+ nf_log_table.data = "NONE";
94800 else
94801- table->data = logger->name;
94802- r = proc_dostring(table, write, buffer, lenp, ppos);
94803+ nf_log_table.data = logger->name;
94804+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
94805 mutex_unlock(&nf_log_mutex);
94806 }
94807
94808diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
94809index f042ae5..30ea486 100644
94810--- a/net/netfilter/nf_sockopt.c
94811+++ b/net/netfilter/nf_sockopt.c
94812@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
94813 }
94814 }
94815
94816- list_add(&reg->list, &nf_sockopts);
94817+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
94818 out:
94819 mutex_unlock(&nf_sockopt_mutex);
94820 return ret;
94821@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
94822 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
94823 {
94824 mutex_lock(&nf_sockopt_mutex);
94825- list_del(&reg->list);
94826+ pax_list_del((struct list_head *)&reg->list);
94827 mutex_unlock(&nf_sockopt_mutex);
94828 }
94829 EXPORT_SYMBOL(nf_unregister_sockopt);
94830diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
94831index 962e979..e46f350 100644
94832--- a/net/netfilter/nfnetlink_log.c
94833+++ b/net/netfilter/nfnetlink_log.c
94834@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
94835 struct nfnl_log_net {
94836 spinlock_t instances_lock;
94837 struct hlist_head instance_table[INSTANCE_BUCKETS];
94838- atomic_t global_seq;
94839+ atomic_unchecked_t global_seq;
94840 };
94841
94842 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
94843@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
94844 nfmsg->version = NFNETLINK_V0;
94845 nfmsg->res_id = htons(inst->group_num);
94846
94847+ memset(&pmsg, 0, sizeof(pmsg));
94848 pmsg.hw_protocol = skb->protocol;
94849 pmsg.hook = hooknum;
94850
94851@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
94852 if (indev && skb->dev &&
94853 skb->mac_header != skb->network_header) {
94854 struct nfulnl_msg_packet_hw phw;
94855- int len = dev_parse_header(skb, phw.hw_addr);
94856+ int len;
94857+
94858+ memset(&phw, 0, sizeof(phw));
94859+ len = dev_parse_header(skb, phw.hw_addr);
94860 if (len > 0) {
94861 phw.hw_addrlen = htons(len);
94862 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
94863@@ -559,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
94864 /* global sequence number */
94865 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
94866 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
94867- htonl(atomic_inc_return(&log->global_seq))))
94868+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
94869 goto nla_put_failure;
94870
94871 if (data_len) {
94872diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
94873index 5352b2d..e0083ce 100644
94874--- a/net/netfilter/nfnetlink_queue_core.c
94875+++ b/net/netfilter/nfnetlink_queue_core.c
94876@@ -444,7 +444,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
94877 if (indev && entskb->dev &&
94878 entskb->mac_header != entskb->network_header) {
94879 struct nfqnl_msg_packet_hw phw;
94880- int len = dev_parse_header(entskb, phw.hw_addr);
94881+ int len;
94882+
94883+ memset(&phw, 0, sizeof(phw));
94884+ len = dev_parse_header(entskb, phw.hw_addr);
94885 if (len) {
94886 phw.hw_addrlen = htons(len);
94887 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
94888diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
94889index 7011c71..6113cc7 100644
94890--- a/net/netfilter/xt_TCPMSS.c
94891+++ b/net/netfilter/xt_TCPMSS.c
94892@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94893 {
94894 const struct xt_tcpmss_info *info = par->targinfo;
94895 struct tcphdr *tcph;
94896- unsigned int tcplen, i;
94897+ int len, tcp_hdrlen;
94898+ unsigned int i;
94899 __be16 oldval;
94900 u16 newmss;
94901 u8 *opt;
94902@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94903 if (!skb_make_writable(skb, skb->len))
94904 return -1;
94905
94906- tcplen = skb->len - tcphoff;
94907+ len = skb->len - tcphoff;
94908+ if (len < (int)sizeof(struct tcphdr))
94909+ return -1;
94910+
94911 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
94912+ tcp_hdrlen = tcph->doff * 4;
94913
94914- /* Header cannot be larger than the packet */
94915- if (tcplen < tcph->doff*4)
94916+ if (len < tcp_hdrlen)
94917 return -1;
94918
94919 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
94920@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94921 newmss = info->mss;
94922
94923 opt = (u_int8_t *)tcph;
94924- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
94925- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
94926- opt[i+1] == TCPOLEN_MSS) {
94927+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
94928+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
94929 u_int16_t oldmss;
94930
94931 oldmss = (opt[i+2] << 8) | opt[i+3];
94932@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94933 }
94934
94935 /* There is data after the header so the option can't be added
94936- without moving it, and doing so may make the SYN packet
94937- itself too large. Accept the packet unmodified instead. */
94938- if (tcplen > tcph->doff*4)
94939+ * without moving it, and doing so may make the SYN packet
94940+ * itself too large. Accept the packet unmodified instead.
94941+ */
94942+ if (len > tcp_hdrlen)
94943 return 0;
94944
94945 /*
94946@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
94947 newmss = min(newmss, (u16)1220);
94948
94949 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
94950- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
94951+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
94952
94953 inet_proto_csum_replace2(&tcph->check, skb,
94954- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
94955+ htons(len), htons(len + TCPOLEN_MSS), 1);
94956 opt[0] = TCPOPT_MSS;
94957 opt[1] = TCPOLEN_MSS;
94958 opt[2] = (newmss & 0xff00) >> 8;
94959diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
94960index b68fa19..625fa1d 100644
94961--- a/net/netfilter/xt_TCPOPTSTRIP.c
94962+++ b/net/netfilter/xt_TCPOPTSTRIP.c
94963@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
94964 struct tcphdr *tcph;
94965 u_int16_t n, o;
94966 u_int8_t *opt;
94967- int len;
94968+ int len, tcp_hdrlen;
94969
94970 /* This is a fragment, no TCP header is available */
94971 if (par->fragoff != 0)
94972@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
94973 return NF_DROP;
94974
94975 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
94976- if (tcph->doff * 4 > len)
94977+ tcp_hdrlen = tcph->doff * 4;
94978+
94979+ if (len < tcp_hdrlen)
94980 return NF_DROP;
94981
94982 opt = (u_int8_t *)tcph;
94983@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
94984 * Walk through all TCP options - if we find some option to remove,
94985 * set all octets to %TCPOPT_NOP and adjust checksum.
94986 */
94987- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
94988+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
94989 optl = optlen(opt, i);
94990
94991- if (i + optl > tcp_hdrlen(skb))
94992+ if (i + optl > tcp_hdrlen)
94993 break;
94994
94995 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
94996diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
94997new file mode 100644
94998index 0000000..c566332
94999--- /dev/null
95000+++ b/net/netfilter/xt_gradm.c
95001@@ -0,0 +1,51 @@
95002+/*
95003+ * gradm match for netfilter
95004