]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.10.7-201308171249.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.10.7-201308171249.patch
CommitLineData
623597c7
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..79768fb 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89@@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93+dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97+exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105+gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112+hash
113+hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117@@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121-kconfig
122+kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129-linux
130+lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134@@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138-media
139 mconf
140+mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147+mkpiggy
148 mkprep
149 mkregtable
150 mktables
151@@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155+parse-events*
156+pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160@@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164+pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168@@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172+realmode.lds
173+realmode.relocs
174 recordmcount
175+regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179@@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183+signing_key*
184+size_overflow_hash.h
185 sImage
186+slabinfo
187 sm_tbl*
188+sortextable
189 split-include
190 syscalltab.h
191 tables.c
192@@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196+user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200@@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204+vdsox32.lds
205+vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212+vmlinux.bin.bz2
213 vmlinux.lds
214+vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218@@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222+utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226+x509*
227 zImage*
228 zconf.hash.c
229+zconf.lex.c
230 zoffset.h
231diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232index 2fe6e76..889ee23 100644
233--- a/Documentation/kernel-parameters.txt
234+++ b/Documentation/kernel-parameters.txt
235@@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240+ ignore grsecurity's /proc restrictions
241+
242+
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246@@ -1928,6 +1932,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 noexec=on: enable non-executable mappings (default)
248 noexec=off: disable non-executable mappings
249
250+ nopcid [X86-64]
251+ Disable PCID (Process-Context IDentifier) even if it
252+ is supported by the processor.
253+
254 nosmap [X86]
255 Disable SMAP (Supervisor Mode Access Prevention)
256 even if it is supported by processor.
257@@ -2195,6 +2203,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
258 the specified number of seconds. This is to be used if
259 your oopses keep scrolling off the screen.
260
261+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
262+ virtualization environments that don't cope well with the
263+ expand down segment used by UDEREF on X86-32 or the frequent
264+ page table updates on X86-64.
265+
266+ pax_sanitize_slab=
267+ 0/1 to disable/enable slab object sanitization (enabled by
268+ default).
269+
270+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
271+
272+ pax_extra_latent_entropy
273+ Enable a very simple form of latent entropy extraction
274+ from the first 4GB of memory as the bootmem allocator
275+ passes the memory pages to the buddy allocator.
276+
277+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
278+ when the processor supports PCID.
279+
280 pcbit= [HW,ISDN]
281
282 pcd. [PARIDE]
283diff --git a/Makefile b/Makefile
284index 33e36ab..31f1dc8 100644
285--- a/Makefile
286+++ b/Makefile
287@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
288
289 HOSTCC = gcc
290 HOSTCXX = g++
291-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
292-HOSTCXXFLAGS = -O2
293+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
294+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
295+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
296
297 # Decide whether to build built-in, modular, or both.
298 # Normally, just do built-in.
299@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
300 # Rules shared between *config targets and build targets
301
302 # Basic helpers built in scripts/
303-PHONY += scripts_basic
304-scripts_basic:
305+PHONY += scripts_basic gcc-plugins
306+scripts_basic: gcc-plugins
307 $(Q)$(MAKE) $(build)=scripts/basic
308 $(Q)rm -f .tmp_quiet_recordmcount
309
310@@ -576,6 +577,65 @@ else
311 KBUILD_CFLAGS += -O2
312 endif
313
314+ifndef DISABLE_PAX_PLUGINS
315+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
316+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
317+else
318+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
319+endif
320+ifneq ($(PLUGINCC),)
321+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
322+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
323+endif
324+ifdef CONFIG_PAX_MEMORY_STACKLEAK
325+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
326+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
327+endif
328+ifdef CONFIG_KALLOCSTAT_PLUGIN
329+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
330+endif
331+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
332+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
333+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
334+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
335+endif
336+ifdef CONFIG_CHECKER_PLUGIN
337+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
338+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
339+endif
340+endif
341+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
342+ifdef CONFIG_PAX_SIZE_OVERFLOW
343+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
344+endif
345+ifdef CONFIG_PAX_LATENT_ENTROPY
346+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
347+endif
348+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
349+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
350+endif
351+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
352+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
353+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
354+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
355+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
356+ifeq ($(KBUILD_EXTMOD),)
357+gcc-plugins:
358+ $(Q)$(MAKE) $(build)=tools/gcc
359+else
360+gcc-plugins: ;
361+endif
362+else
363+gcc-plugins:
364+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
365+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
366+else
367+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
368+endif
369+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
370+endif
371+endif
372+
373 include $(srctree)/arch/$(SRCARCH)/Makefile
374
375 ifdef CONFIG_READABLE_ASM
376@@ -733,7 +793,7 @@ export mod_sign_cmd
377
378
379 ifeq ($(KBUILD_EXTMOD),)
380-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
381+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
382
383 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
384 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
385@@ -782,6 +842,8 @@ endif
386
387 # The actual objects are generated when descending,
388 # make sure no implicit rule kicks in
389+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
390+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
391 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
392
393 # Handle descending into subdirectories listed in $(vmlinux-dirs)
394@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
395 # Error messages still appears in the original language
396
397 PHONY += $(vmlinux-dirs)
398-$(vmlinux-dirs): prepare scripts
399+$(vmlinux-dirs): gcc-plugins prepare scripts
400 $(Q)$(MAKE) $(build)=$@
401
402 # Store (new) KERNELRELASE string in include/config/kernel.release
403@@ -835,6 +897,7 @@ prepare0: archprepare FORCE
404 $(Q)$(MAKE) $(build)=.
405
406 # All the preparing..
407+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
408 prepare: prepare0
409
410 # Generate some files
411@@ -942,6 +1005,8 @@ all: modules
412 # using awk while concatenating to the final file.
413
414 PHONY += modules
415+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
418 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
419 @$(kecho) ' Building modules, stage 2.';
420@@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
421
422 # Target to prepare building external modules
423 PHONY += modules_prepare
424-modules_prepare: prepare scripts
425+modules_prepare: gcc-plugins prepare scripts
426
427 # Target to install modules
428 PHONY += modules_install
429@@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
430 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
431 signing_key.priv signing_key.x509 x509.genkey \
432 extra_certificates signing_key.x509.keyid \
433- signing_key.x509.signer
434+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
435
436 # clean - Delete most, but leave enough to build external modules
437 #
438@@ -1063,6 +1128,7 @@ distclean: mrproper
439 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
440 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
441 -o -name '.*.rej' \
442+ -o -name '.*.rej' -o -name '*.so' \
443 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
444 -type f -print | xargs rm -f
445
446@@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
447 $(module-dirs): crmodverdir $(objtree)/Module.symvers
448 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
449
450+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
451+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
452 modules: $(module-dirs)
453 @$(kecho) ' Building modules, stage 2.';
454 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
455@@ -1359,17 +1427,21 @@ else
456 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
457 endif
458
459-%.s: %.c prepare scripts FORCE
460+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
461+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
462+%.s: %.c gcc-plugins prepare scripts FORCE
463 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
464 %.i: %.c prepare scripts FORCE
465 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
466-%.o: %.c prepare scripts FORCE
467+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
468+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
469+%.o: %.c gcc-plugins prepare scripts FORCE
470 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
471 %.lst: %.c prepare scripts FORCE
472 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
473-%.s: %.S prepare scripts FORCE
474+%.s: %.S gcc-plugins prepare scripts FORCE
475 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
476-%.o: %.S prepare scripts FORCE
477+%.o: %.S gcc-plugins prepare scripts FORCE
478 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
479 %.symtypes: %.c prepare scripts FORCE
480 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
481@@ -1379,11 +1451,15 @@ endif
482 $(cmd_crmodverdir)
483 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
484 $(build)=$(build-dir)
485-%/: prepare scripts FORCE
486+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
487+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
488+%/: gcc-plugins prepare scripts FORCE
489 $(cmd_crmodverdir)
490 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
491 $(build)=$(build-dir)
492-%.ko: prepare scripts FORCE
493+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495+%.ko: gcc-plugins prepare scripts FORCE
496 $(cmd_crmodverdir)
497 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
498 $(build)=$(build-dir) $(@:.ko=.o)
499diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
500index c2cbe4f..f7264b4 100644
501--- a/arch/alpha/include/asm/atomic.h
502+++ b/arch/alpha/include/asm/atomic.h
503@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
504 #define atomic_dec(v) atomic_sub(1,(v))
505 #define atomic64_dec(v) atomic64_sub(1,(v))
506
507+#define atomic64_read_unchecked(v) atomic64_read(v)
508+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
509+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
510+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
511+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
512+#define atomic64_inc_unchecked(v) atomic64_inc(v)
513+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
514+#define atomic64_dec_unchecked(v) atomic64_dec(v)
515+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
516+
517 #define smp_mb__before_atomic_dec() smp_mb()
518 #define smp_mb__after_atomic_dec() smp_mb()
519 #define smp_mb__before_atomic_inc() smp_mb()
520diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
521index ad368a9..fbe0f25 100644
522--- a/arch/alpha/include/asm/cache.h
523+++ b/arch/alpha/include/asm/cache.h
524@@ -4,19 +4,19 @@
525 #ifndef __ARCH_ALPHA_CACHE_H
526 #define __ARCH_ALPHA_CACHE_H
527
528+#include <linux/const.h>
529
530 /* Bytes per L1 (data) cache line. */
531 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
532-# define L1_CACHE_BYTES 64
533 # define L1_CACHE_SHIFT 6
534 #else
535 /* Both EV4 and EV5 are write-through, read-allocate,
536 direct-mapped, physical.
537 */
538-# define L1_CACHE_BYTES 32
539 # define L1_CACHE_SHIFT 5
540 #endif
541
542+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
543 #define SMP_CACHE_BYTES L1_CACHE_BYTES
544
545 #endif
546diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
547index 968d999..d36b2df 100644
548--- a/arch/alpha/include/asm/elf.h
549+++ b/arch/alpha/include/asm/elf.h
550@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
551
552 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
553
554+#ifdef CONFIG_PAX_ASLR
555+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
556+
557+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
558+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
559+#endif
560+
561 /* $0 is set by ld.so to a pointer to a function which might be
562 registered using atexit. This provides a mean for the dynamic
563 linker to call DT_FINI functions for shared libraries that have
564diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
565index bc2a0da..8ad11ee 100644
566--- a/arch/alpha/include/asm/pgalloc.h
567+++ b/arch/alpha/include/asm/pgalloc.h
568@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
569 pgd_set(pgd, pmd);
570 }
571
572+static inline void
573+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
574+{
575+ pgd_populate(mm, pgd, pmd);
576+}
577+
578 extern pgd_t *pgd_alloc(struct mm_struct *mm);
579
580 static inline void
581diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
582index 81a4342..348b927 100644
583--- a/arch/alpha/include/asm/pgtable.h
584+++ b/arch/alpha/include/asm/pgtable.h
585@@ -102,6 +102,17 @@ struct vm_area_struct;
586 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
587 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
588 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
589+
590+#ifdef CONFIG_PAX_PAGEEXEC
591+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
592+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
593+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
594+#else
595+# define PAGE_SHARED_NOEXEC PAGE_SHARED
596+# define PAGE_COPY_NOEXEC PAGE_COPY
597+# define PAGE_READONLY_NOEXEC PAGE_READONLY
598+#endif
599+
600 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
601
602 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
603diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
604index 2fd00b7..cfd5069 100644
605--- a/arch/alpha/kernel/module.c
606+++ b/arch/alpha/kernel/module.c
607@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
608
609 /* The small sections were sorted to the end of the segment.
610 The following should definitely cover them. */
611- gp = (u64)me->module_core + me->core_size - 0x8000;
612+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
613 got = sechdrs[me->arch.gotsecindex].sh_addr;
614
615 for (i = 0; i < n; i++) {
616diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
617index b9e37ad..44c24e7 100644
618--- a/arch/alpha/kernel/osf_sys.c
619+++ b/arch/alpha/kernel/osf_sys.c
620@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
621 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
622
623 static unsigned long
624-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
625- unsigned long limit)
626+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
627+ unsigned long limit, unsigned long flags)
628 {
629 struct vm_unmapped_area_info info;
630+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
631
632 info.flags = 0;
633 info.length = len;
634@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
635 info.high_limit = limit;
636 info.align_mask = 0;
637 info.align_offset = 0;
638+ info.threadstack_offset = offset;
639 return vm_unmapped_area(&info);
640 }
641
642@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
643 merely specific addresses, but regions of memory -- perhaps
644 this feature should be incorporated into all ports? */
645
646+#ifdef CONFIG_PAX_RANDMMAP
647+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
648+#endif
649+
650 if (addr) {
651- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
652+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
653 if (addr != (unsigned long) -ENOMEM)
654 return addr;
655 }
656
657 /* Next, try allocating at TASK_UNMAPPED_BASE. */
658- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
659- len, limit);
660+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
661+
662 if (addr != (unsigned long) -ENOMEM)
663 return addr;
664
665 /* Finally, try allocating in low memory. */
666- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
667+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
668
669 return addr;
670 }
671diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
672index 0c4132d..88f0d53 100644
673--- a/arch/alpha/mm/fault.c
674+++ b/arch/alpha/mm/fault.c
675@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
676 __reload_thread(pcb);
677 }
678
679+#ifdef CONFIG_PAX_PAGEEXEC
680+/*
681+ * PaX: decide what to do with offenders (regs->pc = fault address)
682+ *
683+ * returns 1 when task should be killed
684+ * 2 when patched PLT trampoline was detected
685+ * 3 when unpatched PLT trampoline was detected
686+ */
687+static int pax_handle_fetch_fault(struct pt_regs *regs)
688+{
689+
690+#ifdef CONFIG_PAX_EMUPLT
691+ int err;
692+
693+ do { /* PaX: patched PLT emulation #1 */
694+ unsigned int ldah, ldq, jmp;
695+
696+ err = get_user(ldah, (unsigned int *)regs->pc);
697+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
698+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
699+
700+ if (err)
701+ break;
702+
703+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
704+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
705+ jmp == 0x6BFB0000U)
706+ {
707+ unsigned long r27, addr;
708+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
709+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
710+
711+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
712+ err = get_user(r27, (unsigned long *)addr);
713+ if (err)
714+ break;
715+
716+ regs->r27 = r27;
717+ regs->pc = r27;
718+ return 2;
719+ }
720+ } while (0);
721+
722+ do { /* PaX: patched PLT emulation #2 */
723+ unsigned int ldah, lda, br;
724+
725+ err = get_user(ldah, (unsigned int *)regs->pc);
726+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
727+ err |= get_user(br, (unsigned int *)(regs->pc+8));
728+
729+ if (err)
730+ break;
731+
732+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
733+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
734+ (br & 0xFFE00000U) == 0xC3E00000U)
735+ {
736+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
737+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
738+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
739+
740+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
741+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
742+ return 2;
743+ }
744+ } while (0);
745+
746+ do { /* PaX: unpatched PLT emulation */
747+ unsigned int br;
748+
749+ err = get_user(br, (unsigned int *)regs->pc);
750+
751+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
752+ unsigned int br2, ldq, nop, jmp;
753+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
754+
755+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
756+ err = get_user(br2, (unsigned int *)addr);
757+ err |= get_user(ldq, (unsigned int *)(addr+4));
758+ err |= get_user(nop, (unsigned int *)(addr+8));
759+ err |= get_user(jmp, (unsigned int *)(addr+12));
760+ err |= get_user(resolver, (unsigned long *)(addr+16));
761+
762+ if (err)
763+ break;
764+
765+ if (br2 == 0xC3600000U &&
766+ ldq == 0xA77B000CU &&
767+ nop == 0x47FF041FU &&
768+ jmp == 0x6B7B0000U)
769+ {
770+ regs->r28 = regs->pc+4;
771+ regs->r27 = addr+16;
772+ regs->pc = resolver;
773+ return 3;
774+ }
775+ }
776+ } while (0);
777+#endif
778+
779+ return 1;
780+}
781+
782+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
783+{
784+ unsigned long i;
785+
786+ printk(KERN_ERR "PAX: bytes at PC: ");
787+ for (i = 0; i < 5; i++) {
788+ unsigned int c;
789+ if (get_user(c, (unsigned int *)pc+i))
790+ printk(KERN_CONT "???????? ");
791+ else
792+ printk(KERN_CONT "%08x ", c);
793+ }
794+ printk("\n");
795+}
796+#endif
797
798 /*
799 * This routine handles page faults. It determines the address,
800@@ -133,8 +251,29 @@ retry:
801 good_area:
802 si_code = SEGV_ACCERR;
803 if (cause < 0) {
804- if (!(vma->vm_flags & VM_EXEC))
805+ if (!(vma->vm_flags & VM_EXEC)) {
806+
807+#ifdef CONFIG_PAX_PAGEEXEC
808+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
809+ goto bad_area;
810+
811+ up_read(&mm->mmap_sem);
812+ switch (pax_handle_fetch_fault(regs)) {
813+
814+#ifdef CONFIG_PAX_EMUPLT
815+ case 2:
816+ case 3:
817+ return;
818+#endif
819+
820+ }
821+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
822+ do_group_exit(SIGKILL);
823+#else
824 goto bad_area;
825+#endif
826+
827+ }
828 } else if (!cause) {
829 /* Allow reads even for write-only mappings */
830 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
831diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
832index 18a9f5e..ca910b7 100644
833--- a/arch/arm/Kconfig
834+++ b/arch/arm/Kconfig
835@@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
836
837 config UACCESS_WITH_MEMCPY
838 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
839- depends on MMU
840+ depends on MMU && !PAX_MEMORY_UDEREF
841 default y if CPU_FEROCEON
842 help
843 Implement faster copy_to_user and clear_user methods for CPU
844diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
845index da1c77d..2ee6056 100644
846--- a/arch/arm/include/asm/atomic.h
847+++ b/arch/arm/include/asm/atomic.h
848@@ -17,17 +17,35 @@
849 #include <asm/barrier.h>
850 #include <asm/cmpxchg.h>
851
852+#ifdef CONFIG_GENERIC_ATOMIC64
853+#include <asm-generic/atomic64.h>
854+#endif
855+
856 #define ATOMIC_INIT(i) { (i) }
857
858 #ifdef __KERNEL__
859
860+#define _ASM_EXTABLE(from, to) \
861+" .pushsection __ex_table,\"a\"\n"\
862+" .align 3\n" \
863+" .long " #from ", " #to"\n" \
864+" .popsection"
865+
866 /*
867 * On ARM, ordinary assignment (str instruction) doesn't clear the local
868 * strex/ldrex monitor on some implementations. The reason we can use it for
869 * atomic_set() is the clrex or dummy strex done on every exception return.
870 */
871 #define atomic_read(v) (*(volatile int *)&(v)->counter)
872+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
873+{
874+ return v->counter;
875+}
876 #define atomic_set(v,i) (((v)->counter) = (i))
877+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
878+{
879+ v->counter = i;
880+}
881
882 #if __LINUX_ARM_ARCH__ >= 6
883
884@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
885 int result;
886
887 __asm__ __volatile__("@ atomic_add\n"
888+"1: ldrex %1, [%3]\n"
889+" adds %0, %1, %4\n"
890+
891+#ifdef CONFIG_PAX_REFCOUNT
892+" bvc 3f\n"
893+"2: bkpt 0xf103\n"
894+"3:\n"
895+#endif
896+
897+" strex %1, %0, [%3]\n"
898+" teq %1, #0\n"
899+" bne 1b"
900+
901+#ifdef CONFIG_PAX_REFCOUNT
902+"\n4:\n"
903+ _ASM_EXTABLE(2b, 4b)
904+#endif
905+
906+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
907+ : "r" (&v->counter), "Ir" (i)
908+ : "cc");
909+}
910+
911+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
912+{
913+ unsigned long tmp;
914+ int result;
915+
916+ __asm__ __volatile__("@ atomic_add_unchecked\n"
917 "1: ldrex %0, [%3]\n"
918 " add %0, %0, %4\n"
919 " strex %1, %0, [%3]\n"
920@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
921 smp_mb();
922
923 __asm__ __volatile__("@ atomic_add_return\n"
924+"1: ldrex %1, [%3]\n"
925+" adds %0, %1, %4\n"
926+
927+#ifdef CONFIG_PAX_REFCOUNT
928+" bvc 3f\n"
929+" mov %0, %1\n"
930+"2: bkpt 0xf103\n"
931+"3:\n"
932+#endif
933+
934+" strex %1, %0, [%3]\n"
935+" teq %1, #0\n"
936+" bne 1b"
937+
938+#ifdef CONFIG_PAX_REFCOUNT
939+"\n4:\n"
940+ _ASM_EXTABLE(2b, 4b)
941+#endif
942+
943+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
944+ : "r" (&v->counter), "Ir" (i)
945+ : "cc");
946+
947+ smp_mb();
948+
949+ return result;
950+}
951+
952+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
953+{
954+ unsigned long tmp;
955+ int result;
956+
957+ smp_mb();
958+
959+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
960 "1: ldrex %0, [%3]\n"
961 " add %0, %0, %4\n"
962 " strex %1, %0, [%3]\n"
963@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
964 int result;
965
966 __asm__ __volatile__("@ atomic_sub\n"
967+"1: ldrex %1, [%3]\n"
968+" subs %0, %1, %4\n"
969+
970+#ifdef CONFIG_PAX_REFCOUNT
971+" bvc 3f\n"
972+"2: bkpt 0xf103\n"
973+"3:\n"
974+#endif
975+
976+" strex %1, %0, [%3]\n"
977+" teq %1, #0\n"
978+" bne 1b"
979+
980+#ifdef CONFIG_PAX_REFCOUNT
981+"\n4:\n"
982+ _ASM_EXTABLE(2b, 4b)
983+#endif
984+
985+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
986+ : "r" (&v->counter), "Ir" (i)
987+ : "cc");
988+}
989+
990+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
991+{
992+ unsigned long tmp;
993+ int result;
994+
995+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
996 "1: ldrex %0, [%3]\n"
997 " sub %0, %0, %4\n"
998 " strex %1, %0, [%3]\n"
999@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1000 smp_mb();
1001
1002 __asm__ __volatile__("@ atomic_sub_return\n"
1003-"1: ldrex %0, [%3]\n"
1004-" sub %0, %0, %4\n"
1005+"1: ldrex %1, [%3]\n"
1006+" subs %0, %1, %4\n"
1007+
1008+#ifdef CONFIG_PAX_REFCOUNT
1009+" bvc 3f\n"
1010+" mov %0, %1\n"
1011+"2: bkpt 0xf103\n"
1012+"3:\n"
1013+#endif
1014+
1015 " strex %1, %0, [%3]\n"
1016 " teq %1, #0\n"
1017 " bne 1b"
1018+
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+"\n4:\n"
1021+ _ASM_EXTABLE(2b, 4b)
1022+#endif
1023+
1024 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1025 : "r" (&v->counter), "Ir" (i)
1026 : "cc");
1027@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1028 return oldval;
1029 }
1030
1031+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1032+{
1033+ unsigned long oldval, res;
1034+
1035+ smp_mb();
1036+
1037+ do {
1038+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1039+ "ldrex %1, [%3]\n"
1040+ "mov %0, #0\n"
1041+ "teq %1, %4\n"
1042+ "strexeq %0, %5, [%3]\n"
1043+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1044+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1045+ : "cc");
1046+ } while (res);
1047+
1048+ smp_mb();
1049+
1050+ return oldval;
1051+}
1052+
1053 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 {
1055 unsigned long tmp, tmp2;
1056@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1057
1058 return val;
1059 }
1060+
1061+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1062+{
1063+ return atomic_add_return(i, v);
1064+}
1065+
1066 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1067+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1068+{
1069+ (void) atomic_add_return(i, v);
1070+}
1071
1072 static inline int atomic_sub_return(int i, atomic_t *v)
1073 {
1074@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1075 return val;
1076 }
1077 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1078+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1079+{
1080+ (void) atomic_sub_return(i, v);
1081+}
1082
1083 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1084 {
1085@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1086 return ret;
1087 }
1088
1089+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1090+{
1091+ return atomic_cmpxchg(v, old, new);
1092+}
1093+
1094 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1095 {
1096 unsigned long flags;
1097@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1098 #endif /* __LINUX_ARM_ARCH__ */
1099
1100 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1101+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1102+{
1103+ return xchg(&v->counter, new);
1104+}
1105
1106 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1107 {
1108@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1109 }
1110
1111 #define atomic_inc(v) atomic_add(1, v)
1112+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1113+{
1114+ atomic_add_unchecked(1, v);
1115+}
1116 #define atomic_dec(v) atomic_sub(1, v)
1117+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1118+{
1119+ atomic_sub_unchecked(1, v);
1120+}
1121
1122 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1123+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1124+{
1125+ return atomic_add_return_unchecked(1, v) == 0;
1126+}
1127 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1128 #define atomic_inc_return(v) (atomic_add_return(1, v))
1129+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1130+{
1131+ return atomic_add_return_unchecked(1, v);
1132+}
1133 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1134 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1135
1136@@ -241,6 +428,14 @@ typedef struct {
1137 u64 __aligned(8) counter;
1138 } atomic64_t;
1139
1140+#ifdef CONFIG_PAX_REFCOUNT
1141+typedef struct {
1142+ u64 __aligned(8) counter;
1143+} atomic64_unchecked_t;
1144+#else
1145+typedef atomic64_t atomic64_unchecked_t;
1146+#endif
1147+
1148 #define ATOMIC64_INIT(i) { (i) }
1149
1150 #ifdef CONFIG_ARM_LPAE
1151@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1152 return result;
1153 }
1154
1155+static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1156+{
1157+ u64 result;
1158+
1159+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1160+" ldrd %0, %H0, [%1]"
1161+ : "=&r" (result)
1162+ : "r" (&v->counter), "Qo" (v->counter)
1163+ );
1164+
1165+ return result;
1166+}
1167+
1168 static inline void atomic64_set(atomic64_t *v, u64 i)
1169 {
1170 __asm__ __volatile__("@ atomic64_set\n"
1171@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1172 : "r" (&v->counter), "r" (i)
1173 );
1174 }
1175+
1176+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1177+{
1178+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1179+" strd %2, %H2, [%1]"
1180+ : "=Qo" (v->counter)
1181+ : "r" (&v->counter), "r" (i)
1182+ );
1183+}
1184 #else
1185 static inline u64 atomic64_read(const atomic64_t *v)
1186 {
1187@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1188 return result;
1189 }
1190
1191+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1192+{
1193+ u64 result;
1194+
1195+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1196+" ldrexd %0, %H0, [%1]"
1197+ : "=&r" (result)
1198+ : "r" (&v->counter), "Qo" (v->counter)
1199+ );
1200+
1201+ return result;
1202+}
1203+
1204 static inline void atomic64_set(atomic64_t *v, u64 i)
1205 {
1206 u64 tmp;
1207@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1208 : "r" (&v->counter), "r" (i)
1209 : "cc");
1210 }
1211+
1212+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1213+{
1214+ u64 tmp;
1215+
1216+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1217+"1: ldrexd %0, %H0, [%2]\n"
1218+" strexd %0, %3, %H3, [%2]\n"
1219+" teq %0, #0\n"
1220+" bne 1b"
1221+ : "=&r" (tmp), "=Qo" (v->counter)
1222+ : "r" (&v->counter), "r" (i)
1223+ : "cc");
1224+}
1225+
1226 #endif
1227
1228 static inline void atomic64_add(u64 i, atomic64_t *v)
1229@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1230 __asm__ __volatile__("@ atomic64_add\n"
1231 "1: ldrexd %0, %H0, [%3]\n"
1232 " adds %0, %0, %4\n"
1233+" adcs %H0, %H0, %H4\n"
1234+
1235+#ifdef CONFIG_PAX_REFCOUNT
1236+" bvc 3f\n"
1237+"2: bkpt 0xf103\n"
1238+"3:\n"
1239+#endif
1240+
1241+" strexd %1, %0, %H0, [%3]\n"
1242+" teq %1, #0\n"
1243+" bne 1b"
1244+
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+"\n4:\n"
1247+ _ASM_EXTABLE(2b, 4b)
1248+#endif
1249+
1250+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1251+ : "r" (&v->counter), "r" (i)
1252+ : "cc");
1253+}
1254+
1255+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1256+{
1257+ u64 result;
1258+ unsigned long tmp;
1259+
1260+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1261+"1: ldrexd %0, %H0, [%3]\n"
1262+" adds %0, %0, %4\n"
1263 " adc %H0, %H0, %H4\n"
1264 " strexd %1, %0, %H0, [%3]\n"
1265 " teq %1, #0\n"
1266@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1267
1268 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1269 {
1270- u64 result;
1271- unsigned long tmp;
1272+ u64 result, tmp;
1273
1274 smp_mb();
1275
1276 __asm__ __volatile__("@ atomic64_add_return\n"
1277+"1: ldrexd %1, %H1, [%3]\n"
1278+" adds %0, %1, %4\n"
1279+" adcs %H0, %H1, %H4\n"
1280+
1281+#ifdef CONFIG_PAX_REFCOUNT
1282+" bvc 3f\n"
1283+" mov %0, %1\n"
1284+" mov %H0, %H1\n"
1285+"2: bkpt 0xf103\n"
1286+"3:\n"
1287+#endif
1288+
1289+" strexd %1, %0, %H0, [%3]\n"
1290+" teq %1, #0\n"
1291+" bne 1b"
1292+
1293+#ifdef CONFIG_PAX_REFCOUNT
1294+"\n4:\n"
1295+ _ASM_EXTABLE(2b, 4b)
1296+#endif
1297+
1298+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1299+ : "r" (&v->counter), "r" (i)
1300+ : "cc");
1301+
1302+ smp_mb();
1303+
1304+ return result;
1305+}
1306+
1307+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1308+{
1309+ u64 result;
1310+ unsigned long tmp;
1311+
1312+ smp_mb();
1313+
1314+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1315 "1: ldrexd %0, %H0, [%3]\n"
1316 " adds %0, %0, %4\n"
1317 " adc %H0, %H0, %H4\n"
1318@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_sub\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " subs %0, %0, %4\n"
1322+" sbcs %H0, %H0, %H4\n"
1323+
1324+#ifdef CONFIG_PAX_REFCOUNT
1325+" bvc 3f\n"
1326+"2: bkpt 0xf103\n"
1327+"3:\n"
1328+#endif
1329+
1330+" strexd %1, %0, %H0, [%3]\n"
1331+" teq %1, #0\n"
1332+" bne 1b"
1333+
1334+#ifdef CONFIG_PAX_REFCOUNT
1335+"\n4:\n"
1336+ _ASM_EXTABLE(2b, 4b)
1337+#endif
1338+
1339+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1340+ : "r" (&v->counter), "r" (i)
1341+ : "cc");
1342+}
1343+
1344+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1345+{
1346+ u64 result;
1347+ unsigned long tmp;
1348+
1349+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1350+"1: ldrexd %0, %H0, [%3]\n"
1351+" subs %0, %0, %4\n"
1352 " sbc %H0, %H0, %H4\n"
1353 " strexd %1, %0, %H0, [%3]\n"
1354 " teq %1, #0\n"
1355@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1356
1357 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1358 {
1359- u64 result;
1360- unsigned long tmp;
1361+ u64 result, tmp;
1362
1363 smp_mb();
1364
1365 __asm__ __volatile__("@ atomic64_sub_return\n"
1366-"1: ldrexd %0, %H0, [%3]\n"
1367-" subs %0, %0, %4\n"
1368-" sbc %H0, %H0, %H4\n"
1369+"1: ldrexd %1, %H1, [%3]\n"
1370+" subs %0, %1, %4\n"
1371+" sbcs %H0, %H1, %H4\n"
1372+
1373+#ifdef CONFIG_PAX_REFCOUNT
1374+" bvc 3f\n"
1375+" mov %0, %1\n"
1376+" mov %H0, %H1\n"
1377+"2: bkpt 0xf103\n"
1378+"3:\n"
1379+#endif
1380+
1381 " strexd %1, %0, %H0, [%3]\n"
1382 " teq %1, #0\n"
1383 " bne 1b"
1384+
1385+#ifdef CONFIG_PAX_REFCOUNT
1386+"\n4:\n"
1387+ _ASM_EXTABLE(2b, 4b)
1388+#endif
1389+
1390 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1391 : "r" (&v->counter), "r" (i)
1392 : "cc");
1393@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1394 return oldval;
1395 }
1396
1397+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1398+{
1399+ u64 oldval;
1400+ unsigned long res;
1401+
1402+ smp_mb();
1403+
1404+ do {
1405+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1406+ "ldrexd %1, %H1, [%3]\n"
1407+ "mov %0, #0\n"
1408+ "teq %1, %4\n"
1409+ "teqeq %H1, %H4\n"
1410+ "strexdeq %0, %5, %H5, [%3]"
1411+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1412+ : "r" (&ptr->counter), "r" (old), "r" (new)
1413+ : "cc");
1414+ } while (res);
1415+
1416+ smp_mb();
1417+
1418+ return oldval;
1419+}
1420+
1421 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1422 {
1423 u64 result;
1424@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1425
1426 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1427 {
1428- u64 result;
1429- unsigned long tmp;
1430+ u64 result, tmp;
1431
1432 smp_mb();
1433
1434 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1435-"1: ldrexd %0, %H0, [%3]\n"
1436-" subs %0, %0, #1\n"
1437-" sbc %H0, %H0, #0\n"
1438+"1: ldrexd %1, %H1, [%3]\n"
1439+" subs %0, %1, #1\n"
1440+" sbcs %H0, %H1, #0\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: bkpt 0xf103\n"
1447+"3:\n"
1448+#endif
1449+
1450 " teq %H0, #0\n"
1451-" bmi 2f\n"
1452+" bmi 4f\n"
1453 " strexd %1, %0, %H0, [%3]\n"
1454 " teq %1, #0\n"
1455 " bne 1b\n"
1456-"2:"
1457+"4:\n"
1458+
1459+#ifdef CONFIG_PAX_REFCOUNT
1460+ _ASM_EXTABLE(2b, 4b)
1461+#endif
1462+
1463 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1464 : "r" (&v->counter)
1465 : "cc");
1466@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1467 " teq %0, %5\n"
1468 " teqeq %H0, %H5\n"
1469 " moveq %1, #0\n"
1470-" beq 2f\n"
1471+" beq 4f\n"
1472 " adds %0, %0, %6\n"
1473-" adc %H0, %H0, %H6\n"
1474+" adcs %H0, %H0, %H6\n"
1475+
1476+#ifdef CONFIG_PAX_REFCOUNT
1477+" bvc 3f\n"
1478+"2: bkpt 0xf103\n"
1479+"3:\n"
1480+#endif
1481+
1482 " strexd %2, %0, %H0, [%4]\n"
1483 " teq %2, #0\n"
1484 " bne 1b\n"
1485-"2:"
1486+"4:\n"
1487+
1488+#ifdef CONFIG_PAX_REFCOUNT
1489+ _ASM_EXTABLE(2b, 4b)
1490+#endif
1491+
1492 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1493 : "r" (&v->counter), "r" (u), "r" (a)
1494 : "cc");
1495@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1496
1497 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1498 #define atomic64_inc(v) atomic64_add(1LL, (v))
1499+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1500 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1501+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1502 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1503 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1504 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1505+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1506 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1507 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1508 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1509diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1510index 75fe66b..ba3dee4 100644
1511--- a/arch/arm/include/asm/cache.h
1512+++ b/arch/arm/include/asm/cache.h
1513@@ -4,8 +4,10 @@
1514 #ifndef __ASMARM_CACHE_H
1515 #define __ASMARM_CACHE_H
1516
1517+#include <linux/const.h>
1518+
1519 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1520-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1521+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1522
1523 /*
1524 * Memory returned by kmalloc() may be used for DMA, so we must make
1525@@ -24,5 +26,6 @@
1526 #endif
1527
1528 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1529+#define __read_only __attribute__ ((__section__(".data..read_only")))
1530
1531 #endif
1532diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1533index 17d0ae8..014e350 100644
1534--- a/arch/arm/include/asm/cacheflush.h
1535+++ b/arch/arm/include/asm/cacheflush.h
1536@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1537 void (*dma_unmap_area)(const void *, size_t, int);
1538
1539 void (*dma_flush_range)(const void *, const void *);
1540-};
1541+} __no_const;
1542
1543 /*
1544 * Select the calling method
1545diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1546index 6dcc164..b14d917 100644
1547--- a/arch/arm/include/asm/checksum.h
1548+++ b/arch/arm/include/asm/checksum.h
1549@@ -37,7 +37,19 @@ __wsum
1550 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1551
1552 __wsum
1553-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1554+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1555+
1556+static inline __wsum
1557+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1558+{
1559+ __wsum ret;
1560+ pax_open_userland();
1561+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1562+ pax_close_userland();
1563+ return ret;
1564+}
1565+
1566+
1567
1568 /*
1569 * Fold a partial checksum without adding pseudo headers
1570diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1571index 4f009c1..466c59b 100644
1572--- a/arch/arm/include/asm/cmpxchg.h
1573+++ b/arch/arm/include/asm/cmpxchg.h
1574@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1575
1576 #define xchg(ptr,x) \
1577 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1578+#define xchg_unchecked(ptr,x) \
1579+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1580
1581 #include <asm-generic/cmpxchg-local.h>
1582
1583diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1584index 6ddbe44..b5e38b1 100644
1585--- a/arch/arm/include/asm/domain.h
1586+++ b/arch/arm/include/asm/domain.h
1587@@ -48,18 +48,37 @@
1588 * Domain types
1589 */
1590 #define DOMAIN_NOACCESS 0
1591-#define DOMAIN_CLIENT 1
1592 #ifdef CONFIG_CPU_USE_DOMAINS
1593+#define DOMAIN_USERCLIENT 1
1594+#define DOMAIN_KERNELCLIENT 1
1595 #define DOMAIN_MANAGER 3
1596+#define DOMAIN_VECTORS DOMAIN_USER
1597 #else
1598+
1599+#ifdef CONFIG_PAX_KERNEXEC
1600 #define DOMAIN_MANAGER 1
1601+#define DOMAIN_KERNEXEC 3
1602+#else
1603+#define DOMAIN_MANAGER 1
1604+#endif
1605+
1606+#ifdef CONFIG_PAX_MEMORY_UDEREF
1607+#define DOMAIN_USERCLIENT 0
1608+#define DOMAIN_UDEREF 1
1609+#define DOMAIN_VECTORS DOMAIN_KERNEL
1610+#else
1611+#define DOMAIN_USERCLIENT 1
1612+#define DOMAIN_VECTORS DOMAIN_USER
1613+#endif
1614+#define DOMAIN_KERNELCLIENT 1
1615+
1616 #endif
1617
1618 #define domain_val(dom,type) ((type) << (2*(dom)))
1619
1620 #ifndef __ASSEMBLY__
1621
1622-#ifdef CONFIG_CPU_USE_DOMAINS
1623+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1624 static inline void set_domain(unsigned val)
1625 {
1626 asm volatile(
1627@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1628 isb();
1629 }
1630
1631-#define modify_domain(dom,type) \
1632- do { \
1633- struct thread_info *thread = current_thread_info(); \
1634- unsigned int domain = thread->cpu_domain; \
1635- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1636- thread->cpu_domain = domain | domain_val(dom, type); \
1637- set_domain(thread->cpu_domain); \
1638- } while (0)
1639-
1640+extern void modify_domain(unsigned int dom, unsigned int type);
1641 #else
1642 static inline void set_domain(unsigned val) { }
1643 static inline void modify_domain(unsigned dom, unsigned type) { }
1644diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1645index 56211f2..17e8a25 100644
1646--- a/arch/arm/include/asm/elf.h
1647+++ b/arch/arm/include/asm/elf.h
1648@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1649 the loader. We need to make sure that it is out of the way of the program
1650 that it will "exec", and that there is sufficient room for the brk. */
1651
1652-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1653+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1654+
1655+#ifdef CONFIG_PAX_ASLR
1656+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1657+
1658+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1659+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1660+#endif
1661
1662 /* When the program starts, a1 contains a pointer to a function to be
1663 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1664@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1665 extern void elf_set_personality(const struct elf32_hdr *);
1666 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1667
1668-struct mm_struct;
1669-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1670-#define arch_randomize_brk arch_randomize_brk
1671-
1672 #ifdef CONFIG_MMU
1673 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1674 struct linux_binprm;
1675diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1676index de53547..52b9a28 100644
1677--- a/arch/arm/include/asm/fncpy.h
1678+++ b/arch/arm/include/asm/fncpy.h
1679@@ -81,7 +81,9 @@
1680 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1681 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1682 \
1683+ pax_open_kernel(); \
1684 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1685+ pax_close_kernel(); \
1686 flush_icache_range((unsigned long)(dest_buf), \
1687 (unsigned long)(dest_buf) + (size)); \
1688 \
1689diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1690index e42cf59..7b94b8f 100644
1691--- a/arch/arm/include/asm/futex.h
1692+++ b/arch/arm/include/asm/futex.h
1693@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1694 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1695 return -EFAULT;
1696
1697+ pax_open_userland();
1698+
1699 smp_mb();
1700 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1701 "1: ldrex %1, [%4]\n"
1702@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1703 : "cc", "memory");
1704 smp_mb();
1705
1706+ pax_close_userland();
1707+
1708 *uval = val;
1709 return ret;
1710 }
1711@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1712 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1713 return -EFAULT;
1714
1715+ pax_open_userland();
1716+
1717 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1718 "1: " TUSER(ldr) " %1, [%4]\n"
1719 " teq %1, %2\n"
1720@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1721 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1722 : "cc", "memory");
1723
1724+ pax_close_userland();
1725+
1726 *uval = val;
1727 return ret;
1728 }
1729@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1730 return -EFAULT;
1731
1732 pagefault_disable(); /* implies preempt_disable() */
1733+ pax_open_userland();
1734
1735 switch (op) {
1736 case FUTEX_OP_SET:
1737@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1738 ret = -ENOSYS;
1739 }
1740
1741+ pax_close_userland();
1742 pagefault_enable(); /* subsumes preempt_enable() */
1743
1744 if (!ret) {
1745diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1746index 83eb2f7..ed77159 100644
1747--- a/arch/arm/include/asm/kmap_types.h
1748+++ b/arch/arm/include/asm/kmap_types.h
1749@@ -4,6 +4,6 @@
1750 /*
1751 * This is the "bare minimum". AIO seems to require this.
1752 */
1753-#define KM_TYPE_NR 16
1754+#define KM_TYPE_NR 17
1755
1756 #endif
1757diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1758index 9e614a1..3302cca 100644
1759--- a/arch/arm/include/asm/mach/dma.h
1760+++ b/arch/arm/include/asm/mach/dma.h
1761@@ -22,7 +22,7 @@ struct dma_ops {
1762 int (*residue)(unsigned int, dma_t *); /* optional */
1763 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1764 const char *type;
1765-};
1766+} __do_const;
1767
1768 struct dma_struct {
1769 void *addr; /* single DMA address */
1770diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1771index 2fe141f..192dc01 100644
1772--- a/arch/arm/include/asm/mach/map.h
1773+++ b/arch/arm/include/asm/mach/map.h
1774@@ -27,13 +27,16 @@ struct map_desc {
1775 #define MT_MINICLEAN 6
1776 #define MT_LOW_VECTORS 7
1777 #define MT_HIGH_VECTORS 8
1778-#define MT_MEMORY 9
1779+#define MT_MEMORY_RWX 9
1780 #define MT_ROM 10
1781-#define MT_MEMORY_NONCACHED 11
1782+#define MT_MEMORY_NONCACHED_RX 11
1783 #define MT_MEMORY_DTCM 12
1784 #define MT_MEMORY_ITCM 13
1785 #define MT_MEMORY_SO 14
1786 #define MT_MEMORY_DMA_READY 15
1787+#define MT_MEMORY_RW 16
1788+#define MT_MEMORY_RX 17
1789+#define MT_MEMORY_NONCACHED_RW 18
1790
1791 #ifdef CONFIG_MMU
1792 extern void iotable_init(struct map_desc *, int);
1793diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1794index 12f71a1..04e063c 100644
1795--- a/arch/arm/include/asm/outercache.h
1796+++ b/arch/arm/include/asm/outercache.h
1797@@ -35,7 +35,7 @@ struct outer_cache_fns {
1798 #endif
1799 void (*set_debug)(unsigned long);
1800 void (*resume)(void);
1801-};
1802+} __no_const;
1803
1804 #ifdef CONFIG_OUTER_CACHE
1805
1806diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1807index cbdc7a2..32f44fe 100644
1808--- a/arch/arm/include/asm/page.h
1809+++ b/arch/arm/include/asm/page.h
1810@@ -114,7 +114,7 @@ struct cpu_user_fns {
1811 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1812 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1813 unsigned long vaddr, struct vm_area_struct *vma);
1814-};
1815+} __no_const;
1816
1817 #ifdef MULTI_USER
1818 extern struct cpu_user_fns cpu_user;
1819diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1820index 943504f..c37a730 100644
1821--- a/arch/arm/include/asm/pgalloc.h
1822+++ b/arch/arm/include/asm/pgalloc.h
1823@@ -17,6 +17,7 @@
1824 #include <asm/processor.h>
1825 #include <asm/cacheflush.h>
1826 #include <asm/tlbflush.h>
1827+#include <asm/system_info.h>
1828
1829 #define check_pgt_cache() do { } while (0)
1830
1831@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1832 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1833 }
1834
1835+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1836+{
1837+ pud_populate(mm, pud, pmd);
1838+}
1839+
1840 #else /* !CONFIG_ARM_LPAE */
1841
1842 /*
1843@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1844 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1845 #define pmd_free(mm, pmd) do { } while (0)
1846 #define pud_populate(mm,pmd,pte) BUG()
1847+#define pud_populate_kernel(mm,pmd,pte) BUG()
1848
1849 #endif /* CONFIG_ARM_LPAE */
1850
1851@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1852 __free_page(pte);
1853 }
1854
1855+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1856+{
1857+#ifdef CONFIG_ARM_LPAE
1858+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859+#else
1860+ if (addr & SECTION_SIZE)
1861+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1862+ else
1863+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1864+#endif
1865+ flush_pmd_entry(pmdp);
1866+}
1867+
1868 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1869 pmdval_t prot)
1870 {
1871@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1872 static inline void
1873 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1874 {
1875- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1876+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1877 }
1878 #define pmd_pgtable(pmd) pmd_page(pmd)
1879
1880diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1881index 5cfba15..f415e1a 100644
1882--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1883+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1884@@ -20,12 +20,15 @@
1885 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1886 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1887 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1888+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1889 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1890 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1891 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1892+
1893 /*
1894 * - section
1895 */
1896+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1897 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1898 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1899 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1900@@ -37,6 +40,7 @@
1901 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1902 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1903 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1904+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1905
1906 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1907 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1908@@ -66,6 +70,7 @@
1909 * - extended small page/tiny page
1910 */
1911 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1912+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1913 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1914 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1915 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1916diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1917index f97ee02..cc9fe9e 100644
1918--- a/arch/arm/include/asm/pgtable-2level.h
1919+++ b/arch/arm/include/asm/pgtable-2level.h
1920@@ -126,6 +126,9 @@
1921 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1922 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1923
1924+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1925+#define L_PTE_PXN (_AT(pteval_t, 0))
1926+
1927 /*
1928 * These are the memory types, defined to be compatible with
1929 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1930diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1931index 18f5cef..25b8f43 100644
1932--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1933+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1934@@ -41,6 +41,7 @@
1935 */
1936 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1937 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1938+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1939 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1940 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1941 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1942@@ -71,6 +72,7 @@
1943 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1944 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1945 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1946+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1948
1949 /*
1950diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1951index 86b8fe3..e25f975 100644
1952--- a/arch/arm/include/asm/pgtable-3level.h
1953+++ b/arch/arm/include/asm/pgtable-3level.h
1954@@ -74,6 +74,7 @@
1955 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1956 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1957 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1958+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1959 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1960 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1961 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1962@@ -82,6 +83,7 @@
1963 /*
1964 * To be used in assembly code with the upper page attributes.
1965 */
1966+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1967 #define L_PTE_XN_HIGH (1 << (54 - 32))
1968 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1969
1970diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1971index 9bcd262..fba731c 100644
1972--- a/arch/arm/include/asm/pgtable.h
1973+++ b/arch/arm/include/asm/pgtable.h
1974@@ -30,6 +30,9 @@
1975 #include <asm/pgtable-2level.h>
1976 #endif
1977
1978+#define ktla_ktva(addr) (addr)
1979+#define ktva_ktla(addr) (addr)
1980+
1981 /*
1982 * Just any arbitrary offset to the start of the vmalloc VM area: the
1983 * current 8MB value just means that there will be a 8MB "hole" after the
1984@@ -45,6 +48,9 @@
1985 #define LIBRARY_TEXT_START 0x0c000000
1986
1987 #ifndef __ASSEMBLY__
1988+extern pteval_t __supported_pte_mask;
1989+extern pmdval_t __supported_pmd_mask;
1990+
1991 extern void __pte_error(const char *file, int line, pte_t);
1992 extern void __pmd_error(const char *file, int line, pmd_t);
1993 extern void __pgd_error(const char *file, int line, pgd_t);
1994@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1995 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1996 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1997
1998+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1999+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2000+
2001+#ifdef CONFIG_PAX_KERNEXEC
2002+#include <asm/domain.h>
2003+#include <linux/thread_info.h>
2004+#include <linux/preempt.h>
2005+#endif
2006+
2007+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2008+static inline int test_domain(int domain, int domaintype)
2009+{
2010+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2011+}
2012+#endif
2013+
2014+#ifdef CONFIG_PAX_KERNEXEC
2015+static inline unsigned long pax_open_kernel(void) {
2016+#ifdef CONFIG_ARM_LPAE
2017+ /* TODO */
2018+#else
2019+ preempt_disable();
2020+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2021+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2022+#endif
2023+ return 0;
2024+}
2025+
2026+static inline unsigned long pax_close_kernel(void) {
2027+#ifdef CONFIG_ARM_LPAE
2028+ /* TODO */
2029+#else
2030+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2031+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2032+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2033+ preempt_enable_no_resched();
2034+#endif
2035+ return 0;
2036+}
2037+#else
2038+static inline unsigned long pax_open_kernel(void) { return 0; }
2039+static inline unsigned long pax_close_kernel(void) { return 0; }
2040+#endif
2041+
2042 /*
2043 * This is the lowest virtual address we can permit any user space
2044 * mapping to be mapped at. This is particularly important for
2045@@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2046 /*
2047 * The pgprot_* and protection_map entries will be fixed up in runtime
2048 * to include the cachable and bufferable bits based on memory policy,
2049- * as well as any architecture dependent bits like global/ASID and SMP
2050- * shared mapping bits.
2051+ * as well as any architecture dependent bits like global/ASID, PXN,
2052+ * and SMP shared mapping bits.
2053 */
2054 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2055
2056@@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2057 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2058 {
2059 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2060- L_PTE_NONE | L_PTE_VALID;
2061+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2062 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2063 return pte;
2064 }
2065diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2066index f3628fb..a0672dd 100644
2067--- a/arch/arm/include/asm/proc-fns.h
2068+++ b/arch/arm/include/asm/proc-fns.h
2069@@ -75,7 +75,7 @@ extern struct processor {
2070 unsigned int suspend_size;
2071 void (*do_suspend)(void *);
2072 void (*do_resume)(void *);
2073-} processor;
2074+} __do_const processor;
2075
2076 #ifndef MULTI_CPU
2077 extern void cpu_proc_init(void);
2078diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2079index ce0dbe7..c085b6f 100644
2080--- a/arch/arm/include/asm/psci.h
2081+++ b/arch/arm/include/asm/psci.h
2082@@ -29,7 +29,7 @@ struct psci_operations {
2083 int (*cpu_off)(struct psci_power_state state);
2084 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2085 int (*migrate)(unsigned long cpuid);
2086-};
2087+} __no_const;
2088
2089 extern struct psci_operations psci_ops;
2090
2091diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2092index d3a22be..3a69ad5 100644
2093--- a/arch/arm/include/asm/smp.h
2094+++ b/arch/arm/include/asm/smp.h
2095@@ -107,7 +107,7 @@ struct smp_operations {
2096 int (*cpu_disable)(unsigned int cpu);
2097 #endif
2098 #endif
2099-};
2100+} __no_const;
2101
2102 /*
2103 * set platform specific SMP operations
2104diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2105index f00b569..aa5bb41 100644
2106--- a/arch/arm/include/asm/thread_info.h
2107+++ b/arch/arm/include/asm/thread_info.h
2108@@ -77,9 +77,9 @@ struct thread_info {
2109 .flags = 0, \
2110 .preempt_count = INIT_PREEMPT_COUNT, \
2111 .addr_limit = KERNEL_DS, \
2112- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2113- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2114- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2115+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2116+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2117+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2118 .restart_block = { \
2119 .fn = do_no_restart_syscall, \
2120 }, \
2121@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2122 #define TIF_SYSCALL_AUDIT 9
2123 #define TIF_SYSCALL_TRACEPOINT 10
2124 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2125-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2126+/* within 8 bits of TIF_SYSCALL_TRACE
2127+ * to meet flexible second operand requirements
2128+ */
2129+#define TIF_GRSEC_SETXID 12
2130+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2131 #define TIF_USING_IWMMXT 17
2132 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2133 #define TIF_RESTORE_SIGMASK 20
2134@@ -165,10 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2135 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2136 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2137 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2138+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2139
2140 /* Checks for any syscall work in entry-common.S */
2141 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2142- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2143+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2144
2145 /*
2146 * Change these and you break ASM code in entry-common.S
2147diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
2148index bdf2b84..aa9b4ac 100644
2149--- a/arch/arm/include/asm/tlb.h
2150+++ b/arch/arm/include/asm/tlb.h
2151@@ -43,6 +43,7 @@ struct mmu_gather {
2152 struct mm_struct *mm;
2153 unsigned int fullmm;
2154 struct vm_area_struct *vma;
2155+ unsigned long start, end;
2156 unsigned long range_start;
2157 unsigned long range_end;
2158 unsigned int nr;
2159@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
2160 }
2161
2162 static inline void
2163-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
2164+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
2165 {
2166 tlb->mm = mm;
2167- tlb->fullmm = fullmm;
2168+ tlb->fullmm = !(start | (end+1));
2169+ tlb->start = start;
2170+ tlb->end = end;
2171 tlb->vma = NULL;
2172 tlb->max = ARRAY_SIZE(tlb->local);
2173 tlb->pages = tlb->local;
2174diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2175index 7e1f760..de33b13 100644
2176--- a/arch/arm/include/asm/uaccess.h
2177+++ b/arch/arm/include/asm/uaccess.h
2178@@ -18,6 +18,7 @@
2179 #include <asm/domain.h>
2180 #include <asm/unified.h>
2181 #include <asm/compiler.h>
2182+#include <asm/pgtable.h>
2183
2184 #define VERIFY_READ 0
2185 #define VERIFY_WRITE 1
2186@@ -63,11 +64,38 @@ extern int __put_user_bad(void);
2187 static inline void set_fs(mm_segment_t fs)
2188 {
2189 current_thread_info()->addr_limit = fs;
2190- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2191+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2192 }
2193
2194 #define segment_eq(a,b) ((a) == (b))
2195
2196+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2197+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2198+
2199+static inline void pax_open_userland(void)
2200+{
2201+
2202+#ifdef CONFIG_PAX_MEMORY_UDEREF
2203+ if (segment_eq(get_fs(), USER_DS)) {
2204+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2205+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2206+ }
2207+#endif
2208+
2209+}
2210+
2211+static inline void pax_close_userland(void)
2212+{
2213+
2214+#ifdef CONFIG_PAX_MEMORY_UDEREF
2215+ if (segment_eq(get_fs(), USER_DS)) {
2216+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2217+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2218+ }
2219+#endif
2220+
2221+}
2222+
2223 #define __addr_ok(addr) ({ \
2224 unsigned long flag; \
2225 __asm__("cmp %2, %0; movlo %0, #0" \
2226@@ -143,8 +171,12 @@ extern int __get_user_4(void *);
2227
2228 #define get_user(x,p) \
2229 ({ \
2230+ int __e; \
2231 might_fault(); \
2232- __get_user_check(x,p); \
2233+ pax_open_userland(); \
2234+ __e = __get_user_check(x,p); \
2235+ pax_close_userland(); \
2236+ __e; \
2237 })
2238
2239 extern int __put_user_1(void *, unsigned int);
2240@@ -188,8 +220,12 @@ extern int __put_user_8(void *, unsigned long long);
2241
2242 #define put_user(x,p) \
2243 ({ \
2244+ int __e; \
2245 might_fault(); \
2246- __put_user_check(x,p); \
2247+ pax_open_userland(); \
2248+ __e = __put_user_check(x,p); \
2249+ pax_close_userland(); \
2250+ __e; \
2251 })
2252
2253 #else /* CONFIG_MMU */
2254@@ -230,13 +266,17 @@ static inline void set_fs(mm_segment_t fs)
2255 #define __get_user(x,ptr) \
2256 ({ \
2257 long __gu_err = 0; \
2258+ pax_open_userland(); \
2259 __get_user_err((x),(ptr),__gu_err); \
2260+ pax_close_userland(); \
2261 __gu_err; \
2262 })
2263
2264 #define __get_user_error(x,ptr,err) \
2265 ({ \
2266+ pax_open_userland(); \
2267 __get_user_err((x),(ptr),err); \
2268+ pax_close_userland(); \
2269 (void) 0; \
2270 })
2271
2272@@ -312,13 +352,17 @@ do { \
2273 #define __put_user(x,ptr) \
2274 ({ \
2275 long __pu_err = 0; \
2276+ pax_open_userland(); \
2277 __put_user_err((x),(ptr),__pu_err); \
2278+ pax_close_userland(); \
2279 __pu_err; \
2280 })
2281
2282 #define __put_user_error(x,ptr,err) \
2283 ({ \
2284+ pax_open_userland(); \
2285 __put_user_err((x),(ptr),err); \
2286+ pax_close_userland(); \
2287 (void) 0; \
2288 })
2289
2290@@ -418,11 +462,44 @@ do { \
2291
2292
2293 #ifdef CONFIG_MMU
2294-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2295-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2296+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2297+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2298+
2299+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2300+{
2301+ unsigned long ret;
2302+
2303+ check_object_size(to, n, false);
2304+ pax_open_userland();
2305+ ret = ___copy_from_user(to, from, n);
2306+ pax_close_userland();
2307+ return ret;
2308+}
2309+
2310+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2311+{
2312+ unsigned long ret;
2313+
2314+ check_object_size(from, n, true);
2315+ pax_open_userland();
2316+ ret = ___copy_to_user(to, from, n);
2317+ pax_close_userland();
2318+ return ret;
2319+}
2320+
2321 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2322-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2323+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2324 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2325+
2326+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2327+{
2328+ unsigned long ret;
2329+ pax_open_userland();
2330+ ret = ___clear_user(addr, n);
2331+ pax_close_userland();
2332+ return ret;
2333+}
2334+
2335 #else
2336 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2337 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2338@@ -431,6 +508,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2339
2340 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2341 {
2342+ if ((long)n < 0)
2343+ return n;
2344+
2345 if (access_ok(VERIFY_READ, from, n))
2346 n = __copy_from_user(to, from, n);
2347 else /* security hole - plug it */
2348@@ -440,6 +520,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2349
2350 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2351 {
2352+ if ((long)n < 0)
2353+ return n;
2354+
2355 if (access_ok(VERIFY_WRITE, to, n))
2356 n = __copy_to_user(to, from, n);
2357 return n;
2358diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2359index 96ee092..37f1844 100644
2360--- a/arch/arm/include/uapi/asm/ptrace.h
2361+++ b/arch/arm/include/uapi/asm/ptrace.h
2362@@ -73,7 +73,7 @@
2363 * ARMv7 groups of PSR bits
2364 */
2365 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2366-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2367+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2368 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2369 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2370
2371diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2372index 60d3b73..e5a0f22 100644
2373--- a/arch/arm/kernel/armksyms.c
2374+++ b/arch/arm/kernel/armksyms.c
2375@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2376
2377 /* networking */
2378 EXPORT_SYMBOL(csum_partial);
2379-EXPORT_SYMBOL(csum_partial_copy_from_user);
2380+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2381 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2382 EXPORT_SYMBOL(__csum_ipv6_magic);
2383
2384@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2385 #ifdef CONFIG_MMU
2386 EXPORT_SYMBOL(copy_page);
2387
2388-EXPORT_SYMBOL(__copy_from_user);
2389-EXPORT_SYMBOL(__copy_to_user);
2390-EXPORT_SYMBOL(__clear_user);
2391+EXPORT_SYMBOL(___copy_from_user);
2392+EXPORT_SYMBOL(___copy_to_user);
2393+EXPORT_SYMBOL(___clear_user);
2394
2395 EXPORT_SYMBOL(__get_user_1);
2396 EXPORT_SYMBOL(__get_user_2);
2397diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2398index d43c7e5..257c050 100644
2399--- a/arch/arm/kernel/entry-armv.S
2400+++ b/arch/arm/kernel/entry-armv.S
2401@@ -47,6 +47,87 @@
2402 9997:
2403 .endm
2404
2405+ .macro pax_enter_kernel
2406+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2407+ @ make aligned space for saved DACR
2408+ sub sp, sp, #8
2409+ @ save regs
2410+ stmdb sp!, {r1, r2}
2411+ @ read DACR from cpu_domain into r1
2412+ mov r2, sp
2413+ @ assume 8K pages, since we have to split the immediate in two
2414+ bic r2, r2, #(0x1fc0)
2415+ bic r2, r2, #(0x3f)
2416+ ldr r1, [r2, #TI_CPU_DOMAIN]
2417+ @ store old DACR on stack
2418+ str r1, [sp, #8]
2419+#ifdef CONFIG_PAX_KERNEXEC
2420+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2421+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2422+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2423+#endif
2424+#ifdef CONFIG_PAX_MEMORY_UDEREF
2425+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2426+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2427+#endif
2428+ @ write r1 to current_thread_info()->cpu_domain
2429+ str r1, [r2, #TI_CPU_DOMAIN]
2430+ @ write r1 to DACR
2431+ mcr p15, 0, r1, c3, c0, 0
2432+ @ instruction sync
2433+ instr_sync
2434+ @ restore regs
2435+ ldmia sp!, {r1, r2}
2436+#endif
2437+ .endm
2438+
2439+ .macro pax_open_userland
2440+#ifdef CONFIG_PAX_MEMORY_UDEREF
2441+ @ save regs
2442+ stmdb sp!, {r0, r1}
2443+ @ read DACR from cpu_domain into r1
2444+ mov r0, sp
2445+ @ assume 8K pages, since we have to split the immediate in two
2446+ bic r0, r0, #(0x1fc0)
2447+ bic r0, r0, #(0x3f)
2448+ ldr r1, [r0, #TI_CPU_DOMAIN]
2449+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2450+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2451+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2452+ @ write r1 to current_thread_info()->cpu_domain
2453+ str r1, [r0, #TI_CPU_DOMAIN]
2454+ @ write r1 to DACR
2455+ mcr p15, 0, r1, c3, c0, 0
2456+ @ instruction sync
2457+ instr_sync
2458+ @ restore regs
2459+ ldmia sp!, {r0, r1}
2460+#endif
2461+ .endm
2462+
2463+ .macro pax_close_userland
2464+#ifdef CONFIG_PAX_MEMORY_UDEREF
2465+ @ save regs
2466+ stmdb sp!, {r0, r1}
2467+ @ read DACR from cpu_domain into r1
2468+ mov r0, sp
2469+ @ assume 8K pages, since we have to split the immediate in two
2470+ bic r0, r0, #(0x1fc0)
2471+ bic r0, r0, #(0x3f)
2472+ ldr r1, [r0, #TI_CPU_DOMAIN]
2473+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2474+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2475+ @ write r1 to current_thread_info()->cpu_domain
2476+ str r1, [r0, #TI_CPU_DOMAIN]
2477+ @ write r1 to DACR
2478+ mcr p15, 0, r1, c3, c0, 0
2479+ @ instruction sync
2480+ instr_sync
2481+ @ restore regs
2482+ ldmia sp!, {r0, r1}
2483+#endif
2484+ .endm
2485+
2486 .macro pabt_helper
2487 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2488 #ifdef MULTI_PABORT
2489@@ -89,11 +170,15 @@
2490 * Invalid mode handlers
2491 */
2492 .macro inv_entry, reason
2493+
2494+ pax_enter_kernel
2495+
2496 sub sp, sp, #S_FRAME_SIZE
2497 ARM( stmib sp, {r1 - lr} )
2498 THUMB( stmia sp, {r0 - r12} )
2499 THUMB( str sp, [sp, #S_SP] )
2500 THUMB( str lr, [sp, #S_LR] )
2501+
2502 mov r1, #\reason
2503 .endm
2504
2505@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2506 .macro svc_entry, stack_hole=0
2507 UNWIND(.fnstart )
2508 UNWIND(.save {r0 - pc} )
2509+
2510+ pax_enter_kernel
2511+
2512 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2513+
2514 #ifdef CONFIG_THUMB2_KERNEL
2515 SPFIX( str r0, [sp] ) @ temporarily saved
2516 SPFIX( mov r0, sp )
2517@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2518 ldmia r0, {r3 - r5}
2519 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2520 mov r6, #-1 @ "" "" "" ""
2521+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2522+ @ offset sp by 8 as done in pax_enter_kernel
2523+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2524+#else
2525 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2526+#endif
2527 SPFIX( addeq r2, r2, #4 )
2528 str r3, [sp, #-4]! @ save the "real" r0 copied
2529 @ from the exception stack
2530@@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
2531 .macro usr_entry
2532 UNWIND(.fnstart )
2533 UNWIND(.cantunwind ) @ don't unwind the user space
2534+
2535+ pax_enter_kernel_user
2536+
2537 sub sp, sp, #S_FRAME_SIZE
2538 ARM( stmib sp, {r1 - r12} )
2539 THUMB( stmia sp, {r0 - r12} )
2540@@ -357,7 +454,8 @@ ENDPROC(__pabt_svc)
2541 .endm
2542
2543 .macro kuser_cmpxchg_check
2544-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2545+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
2546+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2547 #ifndef CONFIG_MMU
2548 #warning "NPTL on non MMU needs fixing"
2549 #else
2550@@ -414,7 +512,9 @@ __und_usr:
2551 tst r3, #PSR_T_BIT @ Thumb mode?
2552 bne __und_usr_thumb
2553 sub r4, r2, #4 @ ARM instr at LR - 4
2554+ pax_open_userland
2555 1: ldrt r0, [r4]
2556+ pax_close_userland
2557 #ifdef CONFIG_CPU_ENDIAN_BE8
2558 rev r0, r0 @ little endian instruction
2559 #endif
2560@@ -449,10 +549,14 @@ __und_usr_thumb:
2561 */
2562 .arch armv6t2
2563 #endif
2564+ pax_open_userland
2565 2: ldrht r5, [r4]
2566+ pax_close_userland
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2573 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2574 orr r0, r0, r5, lsl #16
2575@@ -481,7 +585,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: mov pc, r9
2580+4: pax_close_userland
2581+ mov pc, r9
2582 .popsection
2583 .pushsection __ex_table,"a"
2584 .long 1b, 4b
2585@@ -690,7 +795,7 @@ ENTRY(__switch_to)
2586 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2587 THUMB( str sp, [ip], #4 )
2588 THUMB( str lr, [ip], #4 )
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 set_tls r3, r4, r5
2594@@ -699,7 +804,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index bc5bc0a..d0998ca 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -10,18 +10,46 @@
2608
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
2658
2659 .align 5
2660 ENTRY(vector_swi)
2661+
2662 sub sp, sp, #S_FRAME_SIZE
2663 stmia sp, {r0 - r12} @ Calling r0 - r12
2664 ARM( add r8, sp, #S_PC )
2665@@ -399,6 +428,12 @@ ENTRY(vector_swi)
2666 ldr scno, [lr, #-4] @ get SWI instruction
2667 #endif
2668
2669+ /*
2670+ * do this here to avoid a performance hit of wrapping the code above
2671+ * that directly dereferences userland to parse the SWI instruction
2672+ */
2673+ pax_enter_kernel_user
2674+
2675 #ifdef CONFIG_ALIGNMENT_TRAP
2676 ldr ip, __cr_alignment
2677 ldr ip, [ip]
2678diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2679index 160f337..db67ee4 100644
2680--- a/arch/arm/kernel/entry-header.S
2681+++ b/arch/arm/kernel/entry-header.S
2682@@ -73,6 +73,60 @@
2683 msr cpsr_c, \rtemp @ switch back to the SVC mode
2684 .endm
2685
2686+ .macro pax_enter_kernel_user
2687+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2688+ @ save regs
2689+ stmdb sp!, {r0, r1}
2690+ @ read DACR from cpu_domain into r1
2691+ mov r0, sp
2692+ @ assume 8K pages, since we have to split the immediate in two
2693+ bic r0, r0, #(0x1fc0)
2694+ bic r0, r0, #(0x3f)
2695+ ldr r1, [r0, #TI_CPU_DOMAIN]
2696+#ifdef CONFIG_PAX_MEMORY_UDEREF
2697+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2698+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2699+#endif
2700+#ifdef CONFIG_PAX_KERNEXEC
2701+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2702+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2703+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2704+#endif
2705+ @ write r1 to current_thread_info()->cpu_domain
2706+ str r1, [r0, #TI_CPU_DOMAIN]
2707+ @ write r1 to DACR
2708+ mcr p15, 0, r1, c3, c0, 0
2709+ @ instruction sync
2710+ instr_sync
2711+ @ restore regs
2712+ ldmia sp!, {r0, r1}
2713+#endif
2714+ .endm
2715+
2716+ .macro pax_exit_kernel
2717+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2718+ @ save regs
2719+ stmdb sp!, {r0, r1}
2720+ @ read old DACR from stack into r1
2721+ ldr r1, [sp, #(8 + S_SP)]
2722+ sub r1, r1, #8
2723+ ldr r1, [r1]
2724+
2725+ @ write r1 to current_thread_info()->cpu_domain
2726+ mov r0, sp
2727+ @ assume 8K pages, since we have to split the immediate in two
2728+ bic r0, r0, #(0x1fc0)
2729+ bic r0, r0, #(0x3f)
2730+ str r1, [r0, #TI_CPU_DOMAIN]
2731+ @ write r1 to DACR
2732+ mcr p15, 0, r1, c3, c0, 0
2733+ @ instruction sync
2734+ instr_sync
2735+ @ restore regs
2736+ ldmia sp!, {r0, r1}
2737+#endif
2738+ .endm
2739+
2740 #ifndef CONFIG_THUMB2_KERNEL
2741 .macro svc_exit, rpsr, irq = 0
2742 .if \irq != 0
2743@@ -92,6 +146,9 @@
2744 blne trace_hardirqs_off
2745 #endif
2746 .endif
2747+
2748+ pax_exit_kernel
2749+
2750 msr spsr_cxsf, \rpsr
2751 #if defined(CONFIG_CPU_V6)
2752 ldr r0, [sp]
2753@@ -155,6 +212,9 @@
2754 blne trace_hardirqs_off
2755 #endif
2756 .endif
2757+
2758+ pax_exit_kernel
2759+
2760 ldr lr, [sp, #S_SP] @ top of the stack
2761 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2762 clrex @ clear the exclusive monitor
2763diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2764index 25442f4..d4948fc 100644
2765--- a/arch/arm/kernel/fiq.c
2766+++ b/arch/arm/kernel/fiq.c
2767@@ -84,17 +84,16 @@ int show_fiq_list(struct seq_file *p, int prec)
2768
2769 void set_fiq_handler(void *start, unsigned int length)
2770 {
2771-#if defined(CONFIG_CPU_USE_DOMAINS)
2772- void *base = (void *)0xffff0000;
2773-#else
2774 void *base = vectors_page;
2775-#endif
2776 unsigned offset = FIQ_OFFSET;
2777
2778+ pax_open_kernel();
2779 memcpy(base + offset, start, length);
2780+ pax_close_kernel();
2781+
2782+ if (!cache_is_vipt_nonaliasing())
2783+ flush_icache_range(base + offset, offset + length);
2784 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
2785- if (!vectors_high())
2786- flush_icache_range(offset, offset + length);
2787 }
2788
2789 int claim_fiq(struct fiq_handler *f)
2790diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2791index 8bac553..caee108 100644
2792--- a/arch/arm/kernel/head.S
2793+++ b/arch/arm/kernel/head.S
2794@@ -52,7 +52,9 @@
2795 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2796
2797 .macro pgtbl, rd, phys
2798- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2799+ mov \rd, #TEXT_OFFSET
2800+ sub \rd, #PG_DIR_SIZE
2801+ add \rd, \rd, \phys
2802 .endm
2803
2804 /*
2805@@ -434,7 +436,7 @@ __enable_mmu:
2806 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2807 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2808 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2809- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2810+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2811 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2812 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2813 #endif
2814diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2815index 1fd749e..47adb08 100644
2816--- a/arch/arm/kernel/hw_breakpoint.c
2817+++ b/arch/arm/kernel/hw_breakpoint.c
2818@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2819 return NOTIFY_OK;
2820 }
2821
2822-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2823+static struct notifier_block dbg_reset_nb = {
2824 .notifier_call = dbg_reset_notify,
2825 };
2826
2827diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2828index 1e9be5d..03edbc2 100644
2829--- a/arch/arm/kernel/module.c
2830+++ b/arch/arm/kernel/module.c
2831@@ -37,12 +37,37 @@
2832 #endif
2833
2834 #ifdef CONFIG_MMU
2835-void *module_alloc(unsigned long size)
2836+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2837 {
2838+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2839+ return NULL;
2840 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2841- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2842+ GFP_KERNEL, prot, -1,
2843 __builtin_return_address(0));
2844 }
2845+
2846+void *module_alloc(unsigned long size)
2847+{
2848+
2849+#ifdef CONFIG_PAX_KERNEXEC
2850+ return __module_alloc(size, PAGE_KERNEL);
2851+#else
2852+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2853+#endif
2854+
2855+}
2856+
2857+#ifdef CONFIG_PAX_KERNEXEC
2858+void module_free_exec(struct module *mod, void *module_region)
2859+{
2860+ module_free(mod, module_region);
2861+}
2862+
2863+void *module_alloc_exec(unsigned long size)
2864+{
2865+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2866+}
2867+#endif
2868 #endif
2869
2870 int
2871diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2872index 07314af..c46655c 100644
2873--- a/arch/arm/kernel/patch.c
2874+++ b/arch/arm/kernel/patch.c
2875@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2876 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2877 int size;
2878
2879+ pax_open_kernel();
2880 if (thumb2 && __opcode_is_thumb16(insn)) {
2881 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2882 size = sizeof(u16);
2883@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2884 *(u32 *)addr = insn;
2885 size = sizeof(u32);
2886 }
2887+ pax_close_kernel();
2888
2889 flush_icache_range((uintptr_t)(addr),
2890 (uintptr_t)(addr) + size);
2891diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
2892index d9f5cd4..e186ee1 100644
2893--- a/arch/arm/kernel/perf_event.c
2894+++ b/arch/arm/kernel/perf_event.c
2895@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
2896 static int
2897 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
2898 {
2899- int mapping = (*event_map)[config];
2900+ int mapping;
2901+
2902+ if (config >= PERF_COUNT_HW_MAX)
2903+ return -EINVAL;
2904+
2905+ mapping = (*event_map)[config];
2906 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
2907 }
2908
2909@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
2910 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
2911 struct pmu *leader_pmu = event->group_leader->pmu;
2912
2913+ if (is_software_event(event))
2914+ return 1;
2915+
2916 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
2917 return 1;
2918
2919diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2920index 1f2740e..b36e225 100644
2921--- a/arch/arm/kernel/perf_event_cpu.c
2922+++ b/arch/arm/kernel/perf_event_cpu.c
2923@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2924 return NOTIFY_OK;
2925 }
2926
2927-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2928+static struct notifier_block cpu_pmu_hotplug_notifier = {
2929 .notifier_call = cpu_pmu_notify,
2930 };
2931
2932diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2933index 5bc2615..4f1a0c2 100644
2934--- a/arch/arm/kernel/process.c
2935+++ b/arch/arm/kernel/process.c
2936@@ -28,10 +28,10 @@
2937 #include <linux/tick.h>
2938 #include <linux/utsname.h>
2939 #include <linux/uaccess.h>
2940-#include <linux/random.h>
2941 #include <linux/hw_breakpoint.h>
2942 #include <linux/cpuidle.h>
2943 #include <linux/leds.h>
2944+#include <linux/random.h>
2945
2946 #include <asm/cacheflush.h>
2947 #include <asm/idmap.h>
2948@@ -223,6 +223,7 @@ void machine_power_off(void)
2949
2950 if (pm_power_off)
2951 pm_power_off();
2952+ BUG();
2953 }
2954
2955 /*
2956@@ -236,7 +237,7 @@ void machine_power_off(void)
2957 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2958 * to use. Implementing such co-ordination would be essentially impossible.
2959 */
2960-void machine_restart(char *cmd)
2961+__noreturn void machine_restart(char *cmd)
2962 {
2963 smp_send_stop();
2964
2965@@ -258,8 +259,8 @@ void __show_regs(struct pt_regs *regs)
2966
2967 show_regs_print_info(KERN_DEFAULT);
2968
2969- print_symbol("PC is at %s\n", instruction_pointer(regs));
2970- print_symbol("LR is at %s\n", regs->ARM_lr);
2971+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2972+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2973 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2974 "sp : %08lx ip : %08lx fp : %08lx\n",
2975 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2976@@ -426,12 +427,6 @@ unsigned long get_wchan(struct task_struct *p)
2977 return 0;
2978 }
2979
2980-unsigned long arch_randomize_brk(struct mm_struct *mm)
2981-{
2982- unsigned long range_end = mm->brk + 0x02000000;
2983- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2984-}
2985-
2986 #ifdef CONFIG_MMU
2987 #ifdef CONFIG_KUSER_HELPERS
2988 /*
2989@@ -447,7 +442,7 @@ static struct vm_area_struct gate_vma = {
2990
2991 static int __init gate_vma_init(void)
2992 {
2993- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2994+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2995 return 0;
2996 }
2997 arch_initcall(gate_vma_init);
2998@@ -466,48 +461,23 @@ int in_gate_area_no_mm(unsigned long addr)
2999 {
3000 return in_gate_area(NULL, addr);
3001 }
3002-#define is_gate_vma(vma) ((vma) = &gate_vma)
3003+#define is_gate_vma(vma) ((vma) == &gate_vma)
3004 #else
3005 #define is_gate_vma(vma) 0
3006 #endif
3007
3008 const char *arch_vma_name(struct vm_area_struct *vma)
3009 {
3010- return is_gate_vma(vma) ? "[vectors]" :
3011- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3012- "[sigpage]" : NULL;
3013+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3014 }
3015
3016-static struct page *signal_page;
3017-extern struct page *get_signal_page(void);
3018-
3019 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3020 {
3021 struct mm_struct *mm = current->mm;
3022- unsigned long addr;
3023- int ret;
3024-
3025- if (!signal_page)
3026- signal_page = get_signal_page();
3027- if (!signal_page)
3028- return -ENOMEM;
3029
3030 down_write(&mm->mmap_sem);
3031- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3032- if (IS_ERR_VALUE(addr)) {
3033- ret = addr;
3034- goto up_fail;
3035- }
3036-
3037- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3038- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3039- &signal_page);
3040-
3041- if (ret == 0)
3042- mm->context.sigpage = addr;
3043-
3044- up_fail:
3045+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3046 up_write(&mm->mmap_sem);
3047- return ret;
3048+ return 0;
3049 }
3050 #endif
3051diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3052index 3653164..d83e55d 100644
3053--- a/arch/arm/kernel/psci.c
3054+++ b/arch/arm/kernel/psci.c
3055@@ -24,7 +24,7 @@
3056 #include <asm/opcodes-virt.h>
3057 #include <asm/psci.h>
3058
3059-struct psci_operations psci_ops;
3060+struct psci_operations psci_ops __read_only;
3061
3062 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3063
3064diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3065index 03deeff..741ce88 100644
3066--- a/arch/arm/kernel/ptrace.c
3067+++ b/arch/arm/kernel/ptrace.c
3068@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
3069 return current_thread_info()->syscall;
3070 }
3071
3072+#ifdef CONFIG_GRKERNSEC_SETXID
3073+extern void gr_delayed_cred_worker(void);
3074+#endif
3075+
3076 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3077 {
3078 current_thread_info()->syscall = scno;
3079
3080+#ifdef CONFIG_GRKERNSEC_SETXID
3081+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3082+ gr_delayed_cred_worker();
3083+#endif
3084+
3085 /* Do the secure computing check first; failures should be fast. */
3086 if (secure_computing(scno) == -1)
3087 return -1;
3088diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3089index b4b1d39..efdc9be 100644
3090--- a/arch/arm/kernel/setup.c
3091+++ b/arch/arm/kernel/setup.c
3092@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3093 unsigned int elf_hwcap __read_mostly;
3094 EXPORT_SYMBOL(elf_hwcap);
3095
3096+pteval_t __supported_pte_mask __read_only;
3097+pmdval_t __supported_pmd_mask __read_only;
3098
3099 #ifdef MULTI_CPU
3100-struct processor processor __read_mostly;
3101+struct processor processor;
3102 #endif
3103 #ifdef MULTI_TLB
3104-struct cpu_tlb_fns cpu_tlb __read_mostly;
3105+struct cpu_tlb_fns cpu_tlb __read_only;
3106 #endif
3107 #ifdef MULTI_USER
3108-struct cpu_user_fns cpu_user __read_mostly;
3109+struct cpu_user_fns cpu_user __read_only;
3110 #endif
3111 #ifdef MULTI_CACHE
3112-struct cpu_cache_fns cpu_cache __read_mostly;
3113+struct cpu_cache_fns cpu_cache __read_only;
3114 #endif
3115 #ifdef CONFIG_OUTER_CACHE
3116-struct outer_cache_fns outer_cache __read_mostly;
3117+struct outer_cache_fns outer_cache __read_only;
3118 EXPORT_SYMBOL(outer_cache);
3119 #endif
3120
3121@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3122 asm("mrc p15, 0, %0, c0, c1, 4"
3123 : "=r" (mmfr0));
3124 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3125- (mmfr0 & 0x000000f0) >= 0x00000030)
3126+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3127 cpu_arch = CPU_ARCH_ARMv7;
3128- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3129+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3130+ __supported_pte_mask |= L_PTE_PXN;
3131+ __supported_pmd_mask |= PMD_PXNTABLE;
3132+ }
3133+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3134 (mmfr0 & 0x000000f0) == 0x00000020)
3135 cpu_arch = CPU_ARCH_ARMv6;
3136 else
3137@@ -479,7 +485,7 @@ static void __init setup_processor(void)
3138 __cpu_architecture = __get_cpu_architecture();
3139
3140 #ifdef MULTI_CPU
3141- processor = *list->proc;
3142+ memcpy((void *)&processor, list->proc, sizeof processor);
3143 #endif
3144 #ifdef MULTI_TLB
3145 cpu_tlb = *list->tlb;
3146diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3147index 5a42c12..a2bb7c6 100644
3148--- a/arch/arm/kernel/signal.c
3149+++ b/arch/arm/kernel/signal.c
3150@@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
3151 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
3152 };
3153
3154-static unsigned long signal_return_offset;
3155-
3156 #ifdef CONFIG_CRUNCH
3157 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3158 {
3159@@ -406,8 +404,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3160 * except when the MPU has protected the vectors
3161 * page from PL0
3162 */
3163- retcode = mm->context.sigpage + signal_return_offset +
3164- (idx << 2) + thumb;
3165+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3166 } else
3167 #endif
3168 {
3169@@ -611,33 +608,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3170 } while (thread_flags & _TIF_WORK_MASK);
3171 return 0;
3172 }
3173-
3174-struct page *get_signal_page(void)
3175-{
3176- unsigned long ptr;
3177- unsigned offset;
3178- struct page *page;
3179- void *addr;
3180-
3181- page = alloc_pages(GFP_KERNEL, 0);
3182-
3183- if (!page)
3184- return NULL;
3185-
3186- addr = page_address(page);
3187-
3188- /* Give the signal return code some randomness */
3189- offset = 0x200 + (get_random_int() & 0x7fc);
3190- signal_return_offset = offset;
3191-
3192- /*
3193- * Copy signal return handlers into the vector page, and
3194- * set sigreturn to be a pointer to these.
3195- */
3196- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3197-
3198- ptr = (unsigned long)addr + offset;
3199- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3200-
3201- return page;
3202-}
3203diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3204index 5919eb4..b5d6dfe 100644
3205--- a/arch/arm/kernel/smp.c
3206+++ b/arch/arm/kernel/smp.c
3207@@ -70,7 +70,7 @@ enum ipi_msg_type {
3208
3209 static DECLARE_COMPLETION(cpu_running);
3210
3211-static struct smp_operations smp_ops;
3212+static struct smp_operations smp_ops __read_only;
3213
3214 void __init smp_set_ops(struct smp_operations *ops)
3215 {
3216diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3217index 6b9567e..b8af2d6 100644
3218--- a/arch/arm/kernel/traps.c
3219+++ b/arch/arm/kernel/traps.c
3220@@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3221 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3222 {
3223 #ifdef CONFIG_KALLSYMS
3224- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3225+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3226 #else
3227 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3228 #endif
3229@@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3230 static int die_owner = -1;
3231 static unsigned int die_nest_count;
3232
3233+extern void gr_handle_kernel_exploit(void);
3234+
3235 static unsigned long oops_begin(void)
3236 {
3237 int cpu;
3238@@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3239 panic("Fatal exception in interrupt");
3240 if (panic_on_oops)
3241 panic("Fatal exception");
3242+
3243+ gr_handle_kernel_exploit();
3244+
3245 if (signr)
3246 do_exit(signr);
3247 }
3248@@ -592,7 +597,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3249 * The user helper at 0xffff0fe0 must be used instead.
3250 * (see entry-armv.S for details)
3251 */
3252+ pax_open_kernel();
3253 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3254+ pax_close_kernel();
3255 }
3256 return 0;
3257
3258@@ -848,5 +855,9 @@ void __init early_trap_init(void *vectors_base)
3259 kuser_init(vectors_base);
3260
3261 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3262- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3263+
3264+#ifndef CONFIG_PAX_MEMORY_UDEREF
3265+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3266+#endif
3267+
3268 }
3269diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3270index 33f2ea3..0b91824 100644
3271--- a/arch/arm/kernel/vmlinux.lds.S
3272+++ b/arch/arm/kernel/vmlinux.lds.S
3273@@ -8,7 +8,11 @@
3274 #include <asm/thread_info.h>
3275 #include <asm/memory.h>
3276 #include <asm/page.h>
3277-
3278+
3279+#ifdef CONFIG_PAX_KERNEXEC
3280+#include <asm/pgtable.h>
3281+#endif
3282+
3283 #define PROC_INFO \
3284 . = ALIGN(4); \
3285 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3286@@ -94,6 +98,11 @@ SECTIONS
3287 _text = .;
3288 HEAD_TEXT
3289 }
3290+
3291+#ifdef CONFIG_PAX_KERNEXEC
3292+ . = ALIGN(1<<SECTION_SHIFT);
3293+#endif
3294+
3295 .text : { /* Real text segment */
3296 _stext = .; /* Text and read-only data */
3297 __exception_text_start = .;
3298@@ -116,6 +125,8 @@ SECTIONS
3299 ARM_CPU_KEEP(PROC_INFO)
3300 }
3301
3302+ _etext = .; /* End of text section */
3303+
3304 RO_DATA(PAGE_SIZE)
3305
3306 . = ALIGN(4);
3307@@ -146,7 +157,9 @@ SECTIONS
3308
3309 NOTES
3310
3311- _etext = .; /* End of text and rodata section */
3312+#ifdef CONFIG_PAX_KERNEXEC
3313+ . = ALIGN(1<<SECTION_SHIFT);
3314+#endif
3315
3316 #ifndef CONFIG_XIP_KERNEL
3317 . = ALIGN(PAGE_SIZE);
3318@@ -224,6 +237,11 @@ SECTIONS
3319 . = PAGE_OFFSET + TEXT_OFFSET;
3320 #else
3321 __init_end = .;
3322+
3323+#ifdef CONFIG_PAX_KERNEXEC
3324+ . = ALIGN(1<<SECTION_SHIFT);
3325+#endif
3326+
3327 . = ALIGN(THREAD_SIZE);
3328 __data_loc = .;
3329 #endif
3330diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3331index 14a0d98..7771a7d 100644
3332--- a/arch/arm/lib/clear_user.S
3333+++ b/arch/arm/lib/clear_user.S
3334@@ -12,14 +12,14 @@
3335
3336 .text
3337
3338-/* Prototype: int __clear_user(void *addr, size_t sz)
3339+/* Prototype: int ___clear_user(void *addr, size_t sz)
3340 * Purpose : clear some user memory
3341 * Params : addr - user memory address to clear
3342 * : sz - number of bytes to clear
3343 * Returns : number of bytes NOT cleared
3344 */
3345 ENTRY(__clear_user_std)
3346-WEAK(__clear_user)
3347+WEAK(___clear_user)
3348 stmfd sp!, {r1, lr}
3349 mov r2, #0
3350 cmp r1, #4
3351@@ -44,7 +44,7 @@ WEAK(__clear_user)
3352 USER( strnebt r2, [r0])
3353 mov r0, #0
3354 ldmfd sp!, {r1, pc}
3355-ENDPROC(__clear_user)
3356+ENDPROC(___clear_user)
3357 ENDPROC(__clear_user_std)
3358
3359 .pushsection .fixup,"ax"
3360diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3361index 66a477a..bee61d3 100644
3362--- a/arch/arm/lib/copy_from_user.S
3363+++ b/arch/arm/lib/copy_from_user.S
3364@@ -16,7 +16,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_from_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -84,11 +84,11 @@
3374
3375 .text
3376
3377-ENTRY(__copy_from_user)
3378+ENTRY(___copy_from_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_from_user)
3383+ENDPROC(___copy_from_user)
3384
3385 .pushsection .fixup,"ax"
3386 .align 0
3387diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3388index 6ee2f67..d1cce76 100644
3389--- a/arch/arm/lib/copy_page.S
3390+++ b/arch/arm/lib/copy_page.S
3391@@ -10,6 +10,7 @@
3392 * ASM optimised string functions
3393 */
3394 #include <linux/linkage.h>
3395+#include <linux/const.h>
3396 #include <asm/assembler.h>
3397 #include <asm/asm-offsets.h>
3398 #include <asm/cache.h>
3399diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3400index d066df6..df28194 100644
3401--- a/arch/arm/lib/copy_to_user.S
3402+++ b/arch/arm/lib/copy_to_user.S
3403@@ -16,7 +16,7 @@
3404 /*
3405 * Prototype:
3406 *
3407- * size_t __copy_to_user(void *to, const void *from, size_t n)
3408+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3409 *
3410 * Purpose:
3411 *
3412@@ -88,11 +88,11 @@
3413 .text
3414
3415 ENTRY(__copy_to_user_std)
3416-WEAK(__copy_to_user)
3417+WEAK(___copy_to_user)
3418
3419 #include "copy_template.S"
3420
3421-ENDPROC(__copy_to_user)
3422+ENDPROC(___copy_to_user)
3423 ENDPROC(__copy_to_user_std)
3424
3425 .pushsection .fixup,"ax"
3426diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3427index 7d08b43..f7ca7ea 100644
3428--- a/arch/arm/lib/csumpartialcopyuser.S
3429+++ b/arch/arm/lib/csumpartialcopyuser.S
3430@@ -57,8 +57,8 @@
3431 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3432 */
3433
3434-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3435-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3436+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3437+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3438
3439 #include "csumpartialcopygeneric.S"
3440
3441diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3442index 64dbfa5..84a3fd9 100644
3443--- a/arch/arm/lib/delay.c
3444+++ b/arch/arm/lib/delay.c
3445@@ -28,7 +28,7 @@
3446 /*
3447 * Default to the loop-based delay implementation.
3448 */
3449-struct arm_delay_ops arm_delay_ops = {
3450+struct arm_delay_ops arm_delay_ops __read_only = {
3451 .delay = __loop_delay,
3452 .const_udelay = __loop_const_udelay,
3453 .udelay = __loop_udelay,
3454diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3455index 025f742..8432b08 100644
3456--- a/arch/arm/lib/uaccess_with_memcpy.c
3457+++ b/arch/arm/lib/uaccess_with_memcpy.c
3458@@ -104,7 +104,7 @@ out:
3459 }
3460
3461 unsigned long
3462-__copy_to_user(void __user *to, const void *from, unsigned long n)
3463+___copy_to_user(void __user *to, const void *from, unsigned long n)
3464 {
3465 /*
3466 * This test is stubbed out of the main function above to keep
3467diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3468index f389228..592ef66 100644
3469--- a/arch/arm/mach-kirkwood/common.c
3470+++ b/arch/arm/mach-kirkwood/common.c
3471@@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3472 clk_gate_ops.disable(hw);
3473 }
3474
3475-static struct clk_ops clk_gate_fn_ops;
3476+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3477+{
3478+ return clk_gate_ops.is_enabled(hw);
3479+}
3480+
3481+static struct clk_ops clk_gate_fn_ops = {
3482+ .enable = clk_gate_fn_enable,
3483+ .disable = clk_gate_fn_disable,
3484+ .is_enabled = clk_gate_fn_is_enabled,
3485+};
3486
3487 static struct clk __init *clk_register_gate_fn(struct device *dev,
3488 const char *name,
3489@@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3490 gate_fn->fn_en = fn_en;
3491 gate_fn->fn_dis = fn_dis;
3492
3493- /* ops is the gate ops, but with our enable/disable functions */
3494- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3495- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3496- clk_gate_fn_ops = clk_gate_ops;
3497- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3498- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3499- }
3500-
3501 clk = clk_register(dev, &gate_fn->gate.hw);
3502
3503 if (IS_ERR(clk))
3504diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3505index f6eeb87..cc90868 100644
3506--- a/arch/arm/mach-omap2/board-n8x0.c
3507+++ b/arch/arm/mach-omap2/board-n8x0.c
3508@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3509 }
3510 #endif
3511
3512-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3513+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3514 .late_init = n8x0_menelaus_late_init,
3515 };
3516
3517diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3518index 6c4da12..d9ca72d 100644
3519--- a/arch/arm/mach-omap2/gpmc.c
3520+++ b/arch/arm/mach-omap2/gpmc.c
3521@@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
3522 };
3523
3524 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3525-static struct irq_chip gpmc_irq_chip;
3526 static unsigned gpmc_irq_start;
3527
3528 static struct resource gpmc_mem_root;
3529@@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3530
3531 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3532
3533+static struct irq_chip gpmc_irq_chip = {
3534+ .name = "gpmc",
3535+ .irq_startup = gpmc_irq_noop_ret,
3536+ .irq_enable = gpmc_irq_enable,
3537+ .irq_disable = gpmc_irq_disable,
3538+ .irq_shutdown = gpmc_irq_noop,
3539+ .irq_ack = gpmc_irq_noop,
3540+ .irq_mask = gpmc_irq_noop,
3541+ .irq_unmask = gpmc_irq_noop,
3542+
3543+};
3544+
3545 static int gpmc_setup_irq(void)
3546 {
3547 int i;
3548@@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
3549 return gpmc_irq_start;
3550 }
3551
3552- gpmc_irq_chip.name = "gpmc";
3553- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3554- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3555- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3556- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3557- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3558- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3559- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3560-
3561 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3562 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3563
3564diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3565index f8bb3b9..831e7b8 100644
3566--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3567+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3568@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3569 return NOTIFY_OK;
3570 }
3571
3572-static struct notifier_block __refdata irq_hotplug_notifier = {
3573+static struct notifier_block irq_hotplug_notifier = {
3574 .notifier_call = irq_cpu_hotplug_notify,
3575 };
3576
3577diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3578index e6d2307..d057195 100644
3579--- a/arch/arm/mach-omap2/omap_device.c
3580+++ b/arch/arm/mach-omap2/omap_device.c
3581@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3582 struct platform_device __init *omap_device_build(const char *pdev_name,
3583 int pdev_id,
3584 struct omap_hwmod *oh,
3585- void *pdata, int pdata_len)
3586+ const void *pdata, int pdata_len)
3587 {
3588 struct omap_hwmod *ohs[] = { oh };
3589
3590@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3591 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3592 int pdev_id,
3593 struct omap_hwmod **ohs,
3594- int oh_cnt, void *pdata,
3595+ int oh_cnt, const void *pdata,
3596 int pdata_len)
3597 {
3598 int ret = -ENOMEM;
3599diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3600index 044c31d..2ee0861 100644
3601--- a/arch/arm/mach-omap2/omap_device.h
3602+++ b/arch/arm/mach-omap2/omap_device.h
3603@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3604 /* Core code interface */
3605
3606 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3607- struct omap_hwmod *oh, void *pdata,
3608+ struct omap_hwmod *oh, const void *pdata,
3609 int pdata_len);
3610
3611 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3612 struct omap_hwmod **oh, int oh_cnt,
3613- void *pdata, int pdata_len);
3614+ const void *pdata, int pdata_len);
3615
3616 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3617 struct omap_hwmod **ohs, int oh_cnt);
3618diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3619index 7341eff..fd75e34 100644
3620--- a/arch/arm/mach-omap2/omap_hwmod.c
3621+++ b/arch/arm/mach-omap2/omap_hwmod.c
3622@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3623 int (*init_clkdm)(struct omap_hwmod *oh);
3624 void (*update_context_lost)(struct omap_hwmod *oh);
3625 int (*get_context_lost)(struct omap_hwmod *oh);
3626-};
3627+} __no_const;
3628
3629 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3630-static struct omap_hwmod_soc_ops soc_ops;
3631+static struct omap_hwmod_soc_ops soc_ops __read_only;
3632
3633 /* omap_hwmod_list contains all registered struct omap_hwmods */
3634 static LIST_HEAD(omap_hwmod_list);
3635diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3636index d15c7bb..b2d1f0c 100644
3637--- a/arch/arm/mach-omap2/wd_timer.c
3638+++ b/arch/arm/mach-omap2/wd_timer.c
3639@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3640 struct omap_hwmod *oh;
3641 char *oh_name = "wd_timer2";
3642 char *dev_name = "omap_wdt";
3643- struct omap_wd_timer_platform_data pdata;
3644+ static struct omap_wd_timer_platform_data pdata = {
3645+ .read_reset_sources = prm_read_reset_sources
3646+ };
3647
3648 if (!cpu_class_is_omap2() || of_have_populated_dt())
3649 return 0;
3650@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3651 return -EINVAL;
3652 }
3653
3654- pdata.read_reset_sources = prm_read_reset_sources;
3655-
3656 pdev = omap_device_build(dev_name, id, oh, &pdata,
3657 sizeof(struct omap_wd_timer_platform_data));
3658 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3659diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3660index 0cdba8d..297993e 100644
3661--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3662+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3663@@ -181,7 +181,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3664 bool entered_lp2 = false;
3665
3666 if (tegra_pending_sgi())
3667- ACCESS_ONCE(abort_flag) = true;
3668+ ACCESS_ONCE_RW(abort_flag) = true;
3669
3670 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3671
3672diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3673index cad3ca86..1d79e0f 100644
3674--- a/arch/arm/mach-ux500/setup.h
3675+++ b/arch/arm/mach-ux500/setup.h
3676@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3677 .type = MT_DEVICE, \
3678 }
3679
3680-#define __MEM_DEV_DESC(x, sz) { \
3681- .virtual = IO_ADDRESS(x), \
3682- .pfn = __phys_to_pfn(x), \
3683- .length = sz, \
3684- .type = MT_MEMORY, \
3685-}
3686-
3687 extern struct smp_operations ux500_smp_ops;
3688 extern void ux500_cpu_die(unsigned int cpu);
3689
3690diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3691index 2950082..d0f0782 100644
3692--- a/arch/arm/mm/Kconfig
3693+++ b/arch/arm/mm/Kconfig
3694@@ -436,7 +436,7 @@ config CPU_32v5
3695
3696 config CPU_32v6
3697 bool
3698- select CPU_USE_DOMAINS if CPU_V6 && MMU
3699+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3700 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3701
3702 config CPU_32v6K
3703@@ -585,6 +585,7 @@ config CPU_CP15_MPU
3704
3705 config CPU_USE_DOMAINS
3706 bool
3707+ depends on !ARM_LPAE && !PAX_KERNEXEC
3708 help
3709 This option enables or disables the use of domain switching
3710 via the set_fs() function.
3711@@ -780,6 +781,7 @@ config NEED_KUSER_HELPERS
3712 config KUSER_HELPERS
3713 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3714 default y
3715+ depends on !(CPU_V6 || CPU_V6K || CPU_V7)
3716 help
3717 Warning: disabling this option may break user programs.
3718
3719@@ -790,7 +792,7 @@ config KUSER_HELPERS
3720 run on ARMv4 through to ARMv7 without modification.
3721
3722 However, the fixed address nature of these helpers can be used
3723- by ROP (return orientated programming) authors when creating
3724+ by ROP (Return Oriented Programming) authors when creating
3725 exploits.
3726
3727 If all of the binaries and libraries which run on your platform
3728diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3729index 6f4585b..7b6f52b 100644
3730--- a/arch/arm/mm/alignment.c
3731+++ b/arch/arm/mm/alignment.c
3732@@ -211,10 +211,12 @@ union offset_union {
3733 #define __get16_unaligned_check(ins,val,addr) \
3734 do { \
3735 unsigned int err = 0, v, a = addr; \
3736+ pax_open_userland(); \
3737 __get8_unaligned_check(ins,v,a,err); \
3738 val = v << ((BE) ? 8 : 0); \
3739 __get8_unaligned_check(ins,v,a,err); \
3740 val |= v << ((BE) ? 0 : 8); \
3741+ pax_close_userland(); \
3742 if (err) \
3743 goto fault; \
3744 } while (0)
3745@@ -228,6 +230,7 @@ union offset_union {
3746 #define __get32_unaligned_check(ins,val,addr) \
3747 do { \
3748 unsigned int err = 0, v, a = addr; \
3749+ pax_open_userland(); \
3750 __get8_unaligned_check(ins,v,a,err); \
3751 val = v << ((BE) ? 24 : 0); \
3752 __get8_unaligned_check(ins,v,a,err); \
3753@@ -236,6 +239,7 @@ union offset_union {
3754 val |= v << ((BE) ? 8 : 16); \
3755 __get8_unaligned_check(ins,v,a,err); \
3756 val |= v << ((BE) ? 0 : 24); \
3757+ pax_close_userland(); \
3758 if (err) \
3759 goto fault; \
3760 } while (0)
3761@@ -249,6 +253,7 @@ union offset_union {
3762 #define __put16_unaligned_check(ins,val,addr) \
3763 do { \
3764 unsigned int err = 0, v = val, a = addr; \
3765+ pax_open_userland(); \
3766 __asm__( FIRST_BYTE_16 \
3767 ARM( "1: "ins" %1, [%2], #1\n" ) \
3768 THUMB( "1: "ins" %1, [%2]\n" ) \
3769@@ -268,6 +273,7 @@ union offset_union {
3770 " .popsection\n" \
3771 : "=r" (err), "=&r" (v), "=&r" (a) \
3772 : "0" (err), "1" (v), "2" (a)); \
3773+ pax_close_userland(); \
3774 if (err) \
3775 goto fault; \
3776 } while (0)
3777@@ -281,6 +287,7 @@ union offset_union {
3778 #define __put32_unaligned_check(ins,val,addr) \
3779 do { \
3780 unsigned int err = 0, v = val, a = addr; \
3781+ pax_open_userland(); \
3782 __asm__( FIRST_BYTE_32 \
3783 ARM( "1: "ins" %1, [%2], #1\n" ) \
3784 THUMB( "1: "ins" %1, [%2]\n" ) \
3785@@ -310,6 +317,7 @@ union offset_union {
3786 " .popsection\n" \
3787 : "=r" (err), "=&r" (v), "=&r" (a) \
3788 : "0" (err), "1" (v), "2" (a)); \
3789+ pax_close_userland(); \
3790 if (err) \
3791 goto fault; \
3792 } while (0)
3793diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3794index 5dbf13f..ee1ec24 100644
3795--- a/arch/arm/mm/fault.c
3796+++ b/arch/arm/mm/fault.c
3797@@ -25,6 +25,7 @@
3798 #include <asm/system_misc.h>
3799 #include <asm/system_info.h>
3800 #include <asm/tlbflush.h>
3801+#include <asm/sections.h>
3802
3803 #include "fault.h"
3804
3805@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3806 if (fixup_exception(regs))
3807 return;
3808
3809+#ifdef CONFIG_PAX_KERNEXEC
3810+ if ((fsr & FSR_WRITE) &&
3811+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3812+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3813+ {
3814+ if (current->signal->curr_ip)
3815+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3816+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3817+ else
3818+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3819+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3820+ }
3821+#endif
3822+
3823 /*
3824 * No handler, we'll have to terminate things with extreme prejudice.
3825 */
3826@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3827 }
3828 #endif
3829
3830+#ifdef CONFIG_PAX_PAGEEXEC
3831+ if (fsr & FSR_LNX_PF) {
3832+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3833+ do_group_exit(SIGKILL);
3834+ }
3835+#endif
3836+
3837 tsk->thread.address = addr;
3838 tsk->thread.error_code = fsr;
3839 tsk->thread.trap_no = 14;
3840@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3841 }
3842 #endif /* CONFIG_MMU */
3843
3844+#ifdef CONFIG_PAX_PAGEEXEC
3845+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3846+{
3847+ long i;
3848+
3849+ printk(KERN_ERR "PAX: bytes at PC: ");
3850+ for (i = 0; i < 20; i++) {
3851+ unsigned char c;
3852+ if (get_user(c, (__force unsigned char __user *)pc+i))
3853+ printk(KERN_CONT "?? ");
3854+ else
3855+ printk(KERN_CONT "%02x ", c);
3856+ }
3857+ printk("\n");
3858+
3859+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3860+ for (i = -1; i < 20; i++) {
3861+ unsigned long c;
3862+ if (get_user(c, (__force unsigned long __user *)sp+i))
3863+ printk(KERN_CONT "???????? ");
3864+ else
3865+ printk(KERN_CONT "%08lx ", c);
3866+ }
3867+ printk("\n");
3868+}
3869+#endif
3870+
3871 /*
3872 * First Level Translation Fault Handler
3873 *
3874@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3875 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3876 struct siginfo info;
3877
3878+#ifdef CONFIG_PAX_MEMORY_UDEREF
3879+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3880+ if (current->signal->curr_ip)
3881+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3882+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3883+ else
3884+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3885+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3886+ goto die;
3887+ }
3888+#endif
3889+
3890 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3891 return;
3892
3893+die:
3894 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3895 inf->name, fsr, addr);
3896
3897@@ -569,15 +631,67 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3898 ifsr_info[nr].name = name;
3899 }
3900
3901+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3902+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3903+
3904 asmlinkage void __exception
3905 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3906 {
3907 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3908 struct siginfo info;
3909
3910+ if (user_mode(regs)) {
3911+ unsigned long sigpage = current->mm->context.sigpage;
3912+
3913+ if (sigpage <= addr && addr < sigpage + 7*4) {
3914+ if (addr < sigpage + 3*4)
3915+ sys_sigreturn(regs);
3916+ else
3917+ sys_rt_sigreturn(regs);
3918+ return;
3919+ }
3920+ if (addr == 0xffff0fe0UL) {
3921+ /*
3922+ * PaX: __kuser_get_tls emulation
3923+ */
3924+ regs->ARM_r0 = current_thread_info()->tp_value;
3925+ regs->ARM_pc = regs->ARM_lr;
3926+ return;
3927+ }
3928+ }
3929+
3930+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3931+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3932+ if (current->signal->curr_ip)
3933+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3934+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3935+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3936+ else
3937+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3938+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3939+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3940+ goto die;
3941+ }
3942+#endif
3943+
3944+#ifdef CONFIG_PAX_REFCOUNT
3945+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3946+ unsigned int bkpt;
3947+
3948+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3949+ current->thread.error_code = ifsr;
3950+ current->thread.trap_no = 0;
3951+ pax_report_refcount_overflow(regs);
3952+ fixup_exception(regs);
3953+ return;
3954+ }
3955+ }
3956+#endif
3957+
3958 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3959 return;
3960
3961+die:
3962 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3963 inf->name, ifsr, addr);
3964
3965diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3966index cf08bdf..772656c 100644
3967--- a/arch/arm/mm/fault.h
3968+++ b/arch/arm/mm/fault.h
3969@@ -3,6 +3,7 @@
3970
3971 /*
3972 * Fault status register encodings. We steal bit 31 for our own purposes.
3973+ * Set when the FSR value is from an instruction fault.
3974 */
3975 #define FSR_LNX_PF (1 << 31)
3976 #define FSR_WRITE (1 << 11)
3977@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3978 }
3979 #endif
3980
3981+/* valid for LPAE and !LPAE */
3982+static inline int is_xn_fault(unsigned int fsr)
3983+{
3984+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3985+}
3986+
3987+static inline int is_domain_fault(unsigned int fsr)
3988+{
3989+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3990+}
3991+
3992 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3993 unsigned long search_exception_table(unsigned long addr);
3994
3995diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3996index 0ecc43f..190b956 100644
3997--- a/arch/arm/mm/init.c
3998+++ b/arch/arm/mm/init.c
3999@@ -30,6 +30,8 @@
4000 #include <asm/setup.h>
4001 #include <asm/tlb.h>
4002 #include <asm/fixmap.h>
4003+#include <asm/system_info.h>
4004+#include <asm/cp15.h>
4005
4006 #include <asm/mach/arch.h>
4007 #include <asm/mach/map.h>
4008@@ -726,7 +728,46 @@ void free_initmem(void)
4009 {
4010 #ifdef CONFIG_HAVE_TCM
4011 extern char __tcm_start, __tcm_end;
4012+#endif
4013
4014+#ifdef CONFIG_PAX_KERNEXEC
4015+ unsigned long addr;
4016+ pgd_t *pgd;
4017+ pud_t *pud;
4018+ pmd_t *pmd;
4019+ int cpu_arch = cpu_architecture();
4020+ unsigned int cr = get_cr();
4021+
4022+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4023+ /* make pages tables, etc before .text NX */
4024+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4025+ pgd = pgd_offset_k(addr);
4026+ pud = pud_offset(pgd, addr);
4027+ pmd = pmd_offset(pud, addr);
4028+ __section_update(pmd, addr, PMD_SECT_XN);
4029+ }
4030+ /* make init NX */
4031+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4032+ pgd = pgd_offset_k(addr);
4033+ pud = pud_offset(pgd, addr);
4034+ pmd = pmd_offset(pud, addr);
4035+ __section_update(pmd, addr, PMD_SECT_XN);
4036+ }
4037+ /* make kernel code/rodata RX */
4038+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4039+ pgd = pgd_offset_k(addr);
4040+ pud = pud_offset(pgd, addr);
4041+ pmd = pmd_offset(pud, addr);
4042+#ifdef CONFIG_ARM_LPAE
4043+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4044+#else
4045+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4046+#endif
4047+ }
4048+ }
4049+#endif
4050+
4051+#ifdef CONFIG_HAVE_TCM
4052 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4053 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
4054 #endif
4055diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4056index 04d9006..c547d85 100644
4057--- a/arch/arm/mm/ioremap.c
4058+++ b/arch/arm/mm/ioremap.c
4059@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4060 unsigned int mtype;
4061
4062 if (cached)
4063- mtype = MT_MEMORY;
4064+ mtype = MT_MEMORY_RX;
4065 else
4066- mtype = MT_MEMORY_NONCACHED;
4067+ mtype = MT_MEMORY_NONCACHED_RX;
4068
4069 return __arm_ioremap_caller(phys_addr, size, mtype,
4070 __builtin_return_address(0));
4071diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4072index 10062ce..8695745 100644
4073--- a/arch/arm/mm/mmap.c
4074+++ b/arch/arm/mm/mmap.c
4075@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4076 struct vm_area_struct *vma;
4077 int do_align = 0;
4078 int aliasing = cache_is_vipt_aliasing();
4079+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4080 struct vm_unmapped_area_info info;
4081
4082 /*
4083@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4084 if (len > TASK_SIZE)
4085 return -ENOMEM;
4086
4087+#ifdef CONFIG_PAX_RANDMMAP
4088+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4089+#endif
4090+
4091 if (addr) {
4092 if (do_align)
4093 addr = COLOUR_ALIGN(addr, pgoff);
4094@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4095 addr = PAGE_ALIGN(addr);
4096
4097 vma = find_vma(mm, addr);
4098- if (TASK_SIZE - len >= addr &&
4099- (!vma || addr + len <= vma->vm_start))
4100+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4101 return addr;
4102 }
4103
4104@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4105 info.high_limit = TASK_SIZE;
4106 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4107 info.align_offset = pgoff << PAGE_SHIFT;
4108+ info.threadstack_offset = offset;
4109 return vm_unmapped_area(&info);
4110 }
4111
4112@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4113 unsigned long addr = addr0;
4114 int do_align = 0;
4115 int aliasing = cache_is_vipt_aliasing();
4116+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4117 struct vm_unmapped_area_info info;
4118
4119 /*
4120@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4121 return addr;
4122 }
4123
4124+#ifdef CONFIG_PAX_RANDMMAP
4125+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4126+#endif
4127+
4128 /* requesting a specific address */
4129 if (addr) {
4130 if (do_align)
4131@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4132 else
4133 addr = PAGE_ALIGN(addr);
4134 vma = find_vma(mm, addr);
4135- if (TASK_SIZE - len >= addr &&
4136- (!vma || addr + len <= vma->vm_start))
4137+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4138 return addr;
4139 }
4140
4141@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4142 info.high_limit = mm->mmap_base;
4143 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4144 info.align_offset = pgoff << PAGE_SHIFT;
4145+ info.threadstack_offset = offset;
4146 addr = vm_unmapped_area(&info);
4147
4148 /*
4149@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4150 {
4151 unsigned long random_factor = 0UL;
4152
4153+#ifdef CONFIG_PAX_RANDMMAP
4154+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4155+#endif
4156+
4157 /* 8 bits of randomness in 20 address space bits */
4158 if ((current->flags & PF_RANDOMIZE) &&
4159 !(current->personality & ADDR_NO_RANDOMIZE))
4160@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4161
4162 if (mmap_is_legacy()) {
4163 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4164+
4165+#ifdef CONFIG_PAX_RANDMMAP
4166+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4167+ mm->mmap_base += mm->delta_mmap;
4168+#endif
4169+
4170 mm->get_unmapped_area = arch_get_unmapped_area;
4171 mm->unmap_area = arch_unmap_area;
4172 } else {
4173 mm->mmap_base = mmap_base(random_factor);
4174+
4175+#ifdef CONFIG_PAX_RANDMMAP
4176+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4177+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4178+#endif
4179+
4180 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4181 mm->unmap_area = arch_unmap_area_topdown;
4182 }
4183diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4184index daf336f..4e6392c 100644
4185--- a/arch/arm/mm/mmu.c
4186+++ b/arch/arm/mm/mmu.c
4187@@ -36,6 +36,22 @@
4188 #include "mm.h"
4189 #include "tcm.h"
4190
4191+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4192+void modify_domain(unsigned int dom, unsigned int type)
4193+{
4194+ struct thread_info *thread = current_thread_info();
4195+ unsigned int domain = thread->cpu_domain;
4196+ /*
4197+ * DOMAIN_MANAGER might be defined to some other value,
4198+ * use the arch-defined constant
4199+ */
4200+ domain &= ~domain_val(dom, 3);
4201+ thread->cpu_domain = domain | domain_val(dom, type);
4202+ set_domain(thread->cpu_domain);
4203+}
4204+EXPORT_SYMBOL(modify_domain);
4205+#endif
4206+
4207 /*
4208 * empty_zero_page is a special page that is used for
4209 * zero-initialized data and COW.
4210@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4211
4212 #endif /* ifdef CONFIG_CPU_CP15 / else */
4213
4214-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4215+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4216 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4217
4218-static struct mem_type mem_types[] = {
4219+#ifdef CONFIG_PAX_KERNEXEC
4220+#define L_PTE_KERNEXEC L_PTE_RDONLY
4221+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4222+#else
4223+#define L_PTE_KERNEXEC L_PTE_DIRTY
4224+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4225+#endif
4226+
4227+static struct mem_type mem_types[] __read_only = {
4228 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4229 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4230 L_PTE_SHARED,
4231@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4232 [MT_UNCACHED] = {
4233 .prot_pte = PROT_PTE_DEVICE,
4234 .prot_l1 = PMD_TYPE_TABLE,
4235- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4236+ .prot_sect = PROT_SECT_DEVICE,
4237 .domain = DOMAIN_IO,
4238 },
4239 [MT_CACHECLEAN] = {
4240- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4241+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4242 .domain = DOMAIN_KERNEL,
4243 },
4244 #ifndef CONFIG_ARM_LPAE
4245 [MT_MINICLEAN] = {
4246- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4247+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4248 .domain = DOMAIN_KERNEL,
4249 },
4250 #endif
4251@@ -277,36 +301,54 @@ static struct mem_type mem_types[] = {
4252 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4253 L_PTE_RDONLY,
4254 .prot_l1 = PMD_TYPE_TABLE,
4255- .domain = DOMAIN_USER,
4256+ .domain = DOMAIN_VECTORS,
4257 },
4258 [MT_HIGH_VECTORS] = {
4259 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4260 L_PTE_USER | L_PTE_RDONLY,
4261 .prot_l1 = PMD_TYPE_TABLE,
4262- .domain = DOMAIN_USER,
4263+ .domain = DOMAIN_VECTORS,
4264 },
4265- [MT_MEMORY] = {
4266+ [MT_MEMORY_RWX] = {
4267 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4268 .prot_l1 = PMD_TYPE_TABLE,
4269 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4270 .domain = DOMAIN_KERNEL,
4271 },
4272+ [MT_MEMORY_RW] = {
4273+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4274+ .prot_l1 = PMD_TYPE_TABLE,
4275+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4276+ .domain = DOMAIN_KERNEL,
4277+ },
4278+ [MT_MEMORY_RX] = {
4279+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4280+ .prot_l1 = PMD_TYPE_TABLE,
4281+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4282+ .domain = DOMAIN_KERNEL,
4283+ },
4284 [MT_ROM] = {
4285- .prot_sect = PMD_TYPE_SECT,
4286+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4287 .domain = DOMAIN_KERNEL,
4288 },
4289- [MT_MEMORY_NONCACHED] = {
4290+ [MT_MEMORY_NONCACHED_RW] = {
4291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4292 L_PTE_MT_BUFFERABLE,
4293 .prot_l1 = PMD_TYPE_TABLE,
4294 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4295 .domain = DOMAIN_KERNEL,
4296 },
4297+ [MT_MEMORY_NONCACHED_RX] = {
4298+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4299+ L_PTE_MT_BUFFERABLE,
4300+ .prot_l1 = PMD_TYPE_TABLE,
4301+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4302+ .domain = DOMAIN_KERNEL,
4303+ },
4304 [MT_MEMORY_DTCM] = {
4305- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4306- L_PTE_XN,
4307+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4308 .prot_l1 = PMD_TYPE_TABLE,
4309- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4310+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4311 .domain = DOMAIN_KERNEL,
4312 },
4313 [MT_MEMORY_ITCM] = {
4314@@ -316,10 +358,10 @@ static struct mem_type mem_types[] = {
4315 },
4316 [MT_MEMORY_SO] = {
4317 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4318- L_PTE_MT_UNCACHED | L_PTE_XN,
4319+ L_PTE_MT_UNCACHED,
4320 .prot_l1 = PMD_TYPE_TABLE,
4321 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4322- PMD_SECT_UNCACHED | PMD_SECT_XN,
4323+ PMD_SECT_UNCACHED,
4324 .domain = DOMAIN_KERNEL,
4325 },
4326 [MT_MEMORY_DMA_READY] = {
4327@@ -405,9 +447,35 @@ static void __init build_mem_type_table(void)
4328 * to prevent speculative instruction fetches.
4329 */
4330 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4331+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4332 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4333+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4334 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4335+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4336 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4337+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4338+
4339+ /* Mark other regions on ARMv6+ as execute-never */
4340+
4341+#ifdef CONFIG_PAX_KERNEXEC
4342+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4343+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4344+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4345+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4346+#ifndef CONFIG_ARM_LPAE
4347+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4348+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4349+#endif
4350+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4351+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4352+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4353+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4354+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4355+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4356+#endif
4357+
4358+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4359+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4360 }
4361 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4362 /*
4363@@ -468,6 +536,9 @@ static void __init build_mem_type_table(void)
4364 * from SVC mode and no access from userspace.
4365 */
4366 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4367+#ifdef CONFIG_PAX_KERNEXEC
4368+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4369+#endif
4370 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4371 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4372 #endif
4373@@ -485,11 +556,17 @@ static void __init build_mem_type_table(void)
4374 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4375 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4376 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4377- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4378- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4379+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4380+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4381+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4382+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4383+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4384+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4385 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4386- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4387- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4388+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4389+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4390+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4391+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4392 }
4393 }
4394
4395@@ -500,15 +577,20 @@ static void __init build_mem_type_table(void)
4396 if (cpu_arch >= CPU_ARCH_ARMv6) {
4397 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4398 /* Non-cacheable Normal is XCB = 001 */
4399- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4400+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4401+ PMD_SECT_BUFFERED;
4402+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4403 PMD_SECT_BUFFERED;
4404 } else {
4405 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4406- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4407+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4408+ PMD_SECT_TEX(1);
4409+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4410 PMD_SECT_TEX(1);
4411 }
4412 } else {
4413- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4414+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4415+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4416 }
4417
4418 #ifdef CONFIG_ARM_LPAE
4419@@ -524,6 +606,8 @@ static void __init build_mem_type_table(void)
4420 vecs_pgprot |= PTE_EXT_AF;
4421 #endif
4422
4423+ user_pgprot |= __supported_pte_mask;
4424+
4425 for (i = 0; i < 16; i++) {
4426 pteval_t v = pgprot_val(protection_map[i]);
4427 protection_map[i] = __pgprot(v | user_pgprot);
4428@@ -541,10 +625,15 @@ static void __init build_mem_type_table(void)
4429
4430 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4431 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4432- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4433- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4434+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4435+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4436+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4437+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4438+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4439+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4440 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4441- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4442+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4443+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4444 mem_types[MT_ROM].prot_sect |= cp->pmd;
4445
4446 switch (cp->pmd) {
4447@@ -1166,18 +1255,15 @@ void __init arm_mm_memblock_reserve(void)
4448 * called function. This means you can't use any function or debugging
4449 * method which may touch any device, otherwise the kernel _will_ crash.
4450 */
4451+
4452+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4453+
4454 static void __init devicemaps_init(struct machine_desc *mdesc)
4455 {
4456 struct map_desc map;
4457 unsigned long addr;
4458- void *vectors;
4459
4460- /*
4461- * Allocate the vector page early.
4462- */
4463- vectors = early_alloc(PAGE_SIZE * 2);
4464-
4465- early_trap_init(vectors);
4466+ early_trap_init(&vectors);
4467
4468 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4469 pmd_clear(pmd_off_k(addr));
4470@@ -1217,7 +1303,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4471 * location (0xffff0000). If we aren't using high-vectors, also
4472 * create a mapping at the low-vectors virtual address.
4473 */
4474- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4475+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4476 map.virtual = 0xffff0000;
4477 map.length = PAGE_SIZE;
4478 #ifdef CONFIG_KUSER_HELPERS
4479@@ -1287,8 +1373,39 @@ static void __init map_lowmem(void)
4480 map.pfn = __phys_to_pfn(start);
4481 map.virtual = __phys_to_virt(start);
4482 map.length = end - start;
4483- map.type = MT_MEMORY;
4484
4485+#ifdef CONFIG_PAX_KERNEXEC
4486+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4487+ struct map_desc kernel;
4488+ struct map_desc initmap;
4489+
4490+ /* when freeing initmem we will make this RW */
4491+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4492+ initmap.virtual = (unsigned long)__init_begin;
4493+ initmap.length = _sdata - __init_begin;
4494+ initmap.type = MT_MEMORY_RWX;
4495+ create_mapping(&initmap);
4496+
4497+ /* when freeing initmem we will make this RX */
4498+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4499+ kernel.virtual = (unsigned long)_stext;
4500+ kernel.length = __init_begin - _stext;
4501+ kernel.type = MT_MEMORY_RWX;
4502+ create_mapping(&kernel);
4503+
4504+ if (map.virtual < (unsigned long)_stext) {
4505+ map.length = (unsigned long)_stext - map.virtual;
4506+ map.type = MT_MEMORY_RWX;
4507+ create_mapping(&map);
4508+ }
4509+
4510+ map.pfn = __phys_to_pfn(__pa(_sdata));
4511+ map.virtual = (unsigned long)_sdata;
4512+ map.length = end - __pa(_sdata);
4513+ }
4514+#endif
4515+
4516+ map.type = MT_MEMORY_RW;
4517 create_mapping(&map);
4518 }
4519 }
4520diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4521index a5bc92d..0bb4730 100644
4522--- a/arch/arm/plat-omap/sram.c
4523+++ b/arch/arm/plat-omap/sram.c
4524@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4525 * Looks like we need to preserve some bootloader code at the
4526 * beginning of SRAM for jumping to flash for reboot to work...
4527 */
4528+ pax_open_kernel();
4529 memset_io(omap_sram_base + omap_sram_skip, 0,
4530 omap_sram_size - omap_sram_skip);
4531+ pax_close_kernel();
4532 }
4533diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4534index ce6d763..cfea917 100644
4535--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4536+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4537@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4538 int (*started)(unsigned ch);
4539 int (*flush)(unsigned ch);
4540 int (*stop)(unsigned ch);
4541-};
4542+} __no_const;
4543
4544 extern void *samsung_dmadev_get_ops(void);
4545 extern void *s3c_dma_get_ops(void);
4546diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
4547index 654f096..5546653 100644
4548--- a/arch/arm64/include/asm/tlb.h
4549+++ b/arch/arm64/include/asm/tlb.h
4550@@ -35,6 +35,7 @@ struct mmu_gather {
4551 struct mm_struct *mm;
4552 unsigned int fullmm;
4553 struct vm_area_struct *vma;
4554+ unsigned long start, end;
4555 unsigned long range_start;
4556 unsigned long range_end;
4557 unsigned int nr;
4558@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
4559 }
4560
4561 static inline void
4562-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
4563+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
4564 {
4565 tlb->mm = mm;
4566- tlb->fullmm = fullmm;
4567+ tlb->fullmm = !(start | (end+1));
4568+ tlb->start = start;
4569+ tlb->end = end;
4570 tlb->vma = NULL;
4571 tlb->max = ARRAY_SIZE(tlb->local);
4572 tlb->pages = tlb->local;
4573diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4574index f4726dc..39ed646 100644
4575--- a/arch/arm64/kernel/debug-monitors.c
4576+++ b/arch/arm64/kernel/debug-monitors.c
4577@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4578 return NOTIFY_OK;
4579 }
4580
4581-static struct notifier_block __cpuinitdata os_lock_nb = {
4582+static struct notifier_block os_lock_nb = {
4583 .notifier_call = os_lock_notify,
4584 };
4585
4586diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4587index 5ab825c..96aaec8 100644
4588--- a/arch/arm64/kernel/hw_breakpoint.c
4589+++ b/arch/arm64/kernel/hw_breakpoint.c
4590@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4591 return NOTIFY_OK;
4592 }
4593
4594-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4595+static struct notifier_block hw_breakpoint_reset_nb = {
4596 .notifier_call = hw_breakpoint_reset_notify,
4597 };
4598
4599diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4600index c3a58a1..78fbf54 100644
4601--- a/arch/avr32/include/asm/cache.h
4602+++ b/arch/avr32/include/asm/cache.h
4603@@ -1,8 +1,10 @@
4604 #ifndef __ASM_AVR32_CACHE_H
4605 #define __ASM_AVR32_CACHE_H
4606
4607+#include <linux/const.h>
4608+
4609 #define L1_CACHE_SHIFT 5
4610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4612
4613 /*
4614 * Memory returned by kmalloc() may be used for DMA, so we must make
4615diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4616index d232888..87c8df1 100644
4617--- a/arch/avr32/include/asm/elf.h
4618+++ b/arch/avr32/include/asm/elf.h
4619@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4620 the loader. We need to make sure that it is out of the way of the program
4621 that it will "exec", and that there is sufficient room for the brk. */
4622
4623-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4624+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4625
4626+#ifdef CONFIG_PAX_ASLR
4627+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4628+
4629+#define PAX_DELTA_MMAP_LEN 15
4630+#define PAX_DELTA_STACK_LEN 15
4631+#endif
4632
4633 /* This yields a mask that user programs can use to figure out what
4634 instruction set this CPU supports. This could be done in user space,
4635diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4636index 479330b..53717a8 100644
4637--- a/arch/avr32/include/asm/kmap_types.h
4638+++ b/arch/avr32/include/asm/kmap_types.h
4639@@ -2,9 +2,9 @@
4640 #define __ASM_AVR32_KMAP_TYPES_H
4641
4642 #ifdef CONFIG_DEBUG_HIGHMEM
4643-# define KM_TYPE_NR 29
4644+# define KM_TYPE_NR 30
4645 #else
4646-# define KM_TYPE_NR 14
4647+# define KM_TYPE_NR 15
4648 #endif
4649
4650 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4651diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4652index b2f2d2d..d1c85cb 100644
4653--- a/arch/avr32/mm/fault.c
4654+++ b/arch/avr32/mm/fault.c
4655@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4656
4657 int exception_trace = 1;
4658
4659+#ifdef CONFIG_PAX_PAGEEXEC
4660+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4661+{
4662+ unsigned long i;
4663+
4664+ printk(KERN_ERR "PAX: bytes at PC: ");
4665+ for (i = 0; i < 20; i++) {
4666+ unsigned char c;
4667+ if (get_user(c, (unsigned char *)pc+i))
4668+ printk(KERN_CONT "???????? ");
4669+ else
4670+ printk(KERN_CONT "%02x ", c);
4671+ }
4672+ printk("\n");
4673+}
4674+#endif
4675+
4676 /*
4677 * This routine handles page faults. It determines the address and the
4678 * problem, and then passes it off to one of the appropriate routines.
4679@@ -174,6 +191,16 @@ bad_area:
4680 up_read(&mm->mmap_sem);
4681
4682 if (user_mode(regs)) {
4683+
4684+#ifdef CONFIG_PAX_PAGEEXEC
4685+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4686+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4687+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4688+ do_group_exit(SIGKILL);
4689+ }
4690+ }
4691+#endif
4692+
4693 if (exception_trace && printk_ratelimit())
4694 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4695 "sp %08lx ecr %lu\n",
4696diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4697index 568885a..f8008df 100644
4698--- a/arch/blackfin/include/asm/cache.h
4699+++ b/arch/blackfin/include/asm/cache.h
4700@@ -7,6 +7,7 @@
4701 #ifndef __ARCH_BLACKFIN_CACHE_H
4702 #define __ARCH_BLACKFIN_CACHE_H
4703
4704+#include <linux/const.h>
4705 #include <linux/linkage.h> /* for asmlinkage */
4706
4707 /*
4708@@ -14,7 +15,7 @@
4709 * Blackfin loads 32 bytes for cache
4710 */
4711 #define L1_CACHE_SHIFT 5
4712-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4713+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4714 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4715
4716 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4717diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4718index aea2718..3639a60 100644
4719--- a/arch/cris/include/arch-v10/arch/cache.h
4720+++ b/arch/cris/include/arch-v10/arch/cache.h
4721@@ -1,8 +1,9 @@
4722 #ifndef _ASM_ARCH_CACHE_H
4723 #define _ASM_ARCH_CACHE_H
4724
4725+#include <linux/const.h>
4726 /* Etrax 100LX have 32-byte cache-lines. */
4727-#define L1_CACHE_BYTES 32
4728 #define L1_CACHE_SHIFT 5
4729+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4730
4731 #endif /* _ASM_ARCH_CACHE_H */
4732diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4733index 7caf25d..ee65ac5 100644
4734--- a/arch/cris/include/arch-v32/arch/cache.h
4735+++ b/arch/cris/include/arch-v32/arch/cache.h
4736@@ -1,11 +1,12 @@
4737 #ifndef _ASM_CRIS_ARCH_CACHE_H
4738 #define _ASM_CRIS_ARCH_CACHE_H
4739
4740+#include <linux/const.h>
4741 #include <arch/hwregs/dma.h>
4742
4743 /* A cache-line is 32 bytes. */
4744-#define L1_CACHE_BYTES 32
4745 #define L1_CACHE_SHIFT 5
4746+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4747
4748 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4749
4750diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4751index b86329d..6709906 100644
4752--- a/arch/frv/include/asm/atomic.h
4753+++ b/arch/frv/include/asm/atomic.h
4754@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4755 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4756 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4757
4758+#define atomic64_read_unchecked(v) atomic64_read(v)
4759+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4760+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4761+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4762+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4763+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4764+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4765+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4766+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4767+
4768 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4769 {
4770 int c, old;
4771diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4772index 2797163..c2a401d 100644
4773--- a/arch/frv/include/asm/cache.h
4774+++ b/arch/frv/include/asm/cache.h
4775@@ -12,10 +12,11 @@
4776 #ifndef __ASM_CACHE_H
4777 #define __ASM_CACHE_H
4778
4779+#include <linux/const.h>
4780
4781 /* bytes per L1 cache line */
4782 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4783-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4784+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4785
4786 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4787 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4788diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4789index 43901f2..0d8b865 100644
4790--- a/arch/frv/include/asm/kmap_types.h
4791+++ b/arch/frv/include/asm/kmap_types.h
4792@@ -2,6 +2,6 @@
4793 #ifndef _ASM_KMAP_TYPES_H
4794 #define _ASM_KMAP_TYPES_H
4795
4796-#define KM_TYPE_NR 17
4797+#define KM_TYPE_NR 18
4798
4799 #endif
4800diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4801index 836f147..4cf23f5 100644
4802--- a/arch/frv/mm/elf-fdpic.c
4803+++ b/arch/frv/mm/elf-fdpic.c
4804@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4805 {
4806 struct vm_area_struct *vma;
4807 struct vm_unmapped_area_info info;
4808+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4809
4810 if (len > TASK_SIZE)
4811 return -ENOMEM;
4812@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4813 if (addr) {
4814 addr = PAGE_ALIGN(addr);
4815 vma = find_vma(current->mm, addr);
4816- if (TASK_SIZE - len >= addr &&
4817- (!vma || addr + len <= vma->vm_start))
4818+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4819 goto success;
4820 }
4821
4822@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4823 info.high_limit = (current->mm->start_stack - 0x00200000);
4824 info.align_mask = 0;
4825 info.align_offset = 0;
4826+ info.threadstack_offset = offset;
4827 addr = vm_unmapped_area(&info);
4828 if (!(addr & ~PAGE_MASK))
4829 goto success;
4830diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4831index f4ca594..adc72fd6 100644
4832--- a/arch/hexagon/include/asm/cache.h
4833+++ b/arch/hexagon/include/asm/cache.h
4834@@ -21,9 +21,11 @@
4835 #ifndef __ASM_CACHE_H
4836 #define __ASM_CACHE_H
4837
4838+#include <linux/const.h>
4839+
4840 /* Bytes per L1 cache line */
4841-#define L1_CACHE_SHIFT (5)
4842-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4843+#define L1_CACHE_SHIFT 5
4844+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4845
4846 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4847 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4848diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4849index 6e6fe18..a6ae668 100644
4850--- a/arch/ia64/include/asm/atomic.h
4851+++ b/arch/ia64/include/asm/atomic.h
4852@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4853 #define atomic64_inc(v) atomic64_add(1, (v))
4854 #define atomic64_dec(v) atomic64_sub(1, (v))
4855
4856+#define atomic64_read_unchecked(v) atomic64_read(v)
4857+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4858+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4859+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4860+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4861+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4862+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4863+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4864+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4865+
4866 /* Atomic operations are already serializing */
4867 #define smp_mb__before_atomic_dec() barrier()
4868 #define smp_mb__after_atomic_dec() barrier()
4869diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4870index 988254a..e1ee885 100644
4871--- a/arch/ia64/include/asm/cache.h
4872+++ b/arch/ia64/include/asm/cache.h
4873@@ -1,6 +1,7 @@
4874 #ifndef _ASM_IA64_CACHE_H
4875 #define _ASM_IA64_CACHE_H
4876
4877+#include <linux/const.h>
4878
4879 /*
4880 * Copyright (C) 1998-2000 Hewlett-Packard Co
4881@@ -9,7 +10,7 @@
4882
4883 /* Bytes per L1 (data) cache line. */
4884 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4885-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4886+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4887
4888 #ifdef CONFIG_SMP
4889 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4890diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4891index 5a83c5c..4d7f553 100644
4892--- a/arch/ia64/include/asm/elf.h
4893+++ b/arch/ia64/include/asm/elf.h
4894@@ -42,6 +42,13 @@
4895 */
4896 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4897
4898+#ifdef CONFIG_PAX_ASLR
4899+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4900+
4901+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4902+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4903+#endif
4904+
4905 #define PT_IA_64_UNWIND 0x70000001
4906
4907 /* IA-64 relocations: */
4908diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4909index 96a8d92..617a1cf 100644
4910--- a/arch/ia64/include/asm/pgalloc.h
4911+++ b/arch/ia64/include/asm/pgalloc.h
4912@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4913 pgd_val(*pgd_entry) = __pa(pud);
4914 }
4915
4916+static inline void
4917+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4918+{
4919+ pgd_populate(mm, pgd_entry, pud);
4920+}
4921+
4922 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4923 {
4924 return quicklist_alloc(0, GFP_KERNEL, NULL);
4925@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4926 pud_val(*pud_entry) = __pa(pmd);
4927 }
4928
4929+static inline void
4930+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4931+{
4932+ pud_populate(mm, pud_entry, pmd);
4933+}
4934+
4935 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4936 {
4937 return quicklist_alloc(0, GFP_KERNEL, NULL);
4938diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4939index 815810c..d60bd4c 100644
4940--- a/arch/ia64/include/asm/pgtable.h
4941+++ b/arch/ia64/include/asm/pgtable.h
4942@@ -12,7 +12,7 @@
4943 * David Mosberger-Tang <davidm@hpl.hp.com>
4944 */
4945
4946-
4947+#include <linux/const.h>
4948 #include <asm/mman.h>
4949 #include <asm/page.h>
4950 #include <asm/processor.h>
4951@@ -142,6 +142,17 @@
4952 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4953 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4954 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4955+
4956+#ifdef CONFIG_PAX_PAGEEXEC
4957+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4958+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4959+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4960+#else
4961+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4962+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4963+# define PAGE_COPY_NOEXEC PAGE_COPY
4964+#endif
4965+
4966 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4967 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4968 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4969diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4970index 54ff557..70c88b7 100644
4971--- a/arch/ia64/include/asm/spinlock.h
4972+++ b/arch/ia64/include/asm/spinlock.h
4973@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4974 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4975
4976 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4977- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4978+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4979 }
4980
4981 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4982diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
4983index ef3a9de..bc5efc7 100644
4984--- a/arch/ia64/include/asm/tlb.h
4985+++ b/arch/ia64/include/asm/tlb.h
4986@@ -22,7 +22,7 @@
4987 * unmapping a portion of the virtual address space, these hooks are called according to
4988 * the following template:
4989 *
4990- * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
4991+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
4992 * {
4993 * for each vma that needs a shootdown do {
4994 * tlb_start_vma(tlb, vma);
4995@@ -58,6 +58,7 @@ struct mmu_gather {
4996 unsigned int max;
4997 unsigned char fullmm; /* non-zero means full mm flush */
4998 unsigned char need_flush; /* really unmapped some PTEs? */
4999+ unsigned long start, end;
5000 unsigned long start_addr;
5001 unsigned long end_addr;
5002 struct page **pages;
5003@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
5004
5005
5006 static inline void
5007-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
5008+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
5009 {
5010 tlb->mm = mm;
5011 tlb->max = ARRAY_SIZE(tlb->local);
5012 tlb->pages = tlb->local;
5013 tlb->nr = 0;
5014- tlb->fullmm = full_mm_flush;
5015+ tlb->fullmm = !(start | (end+1));
5016+ tlb->start = start;
5017+ tlb->end = end;
5018 tlb->start_addr = ~0UL;
5019 }
5020
5021diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5022index 449c8c0..18965fb 100644
5023--- a/arch/ia64/include/asm/uaccess.h
5024+++ b/arch/ia64/include/asm/uaccess.h
5025@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5026 static inline unsigned long
5027 __copy_to_user (void __user *to, const void *from, unsigned long count)
5028 {
5029+ if (count > INT_MAX)
5030+ return count;
5031+
5032+ if (!__builtin_constant_p(count))
5033+ check_object_size(from, count, true);
5034+
5035 return __copy_user(to, (__force void __user *) from, count);
5036 }
5037
5038 static inline unsigned long
5039 __copy_from_user (void *to, const void __user *from, unsigned long count)
5040 {
5041+ if (count > INT_MAX)
5042+ return count;
5043+
5044+ if (!__builtin_constant_p(count))
5045+ check_object_size(to, count, false);
5046+
5047 return __copy_user((__force void __user *) to, from, count);
5048 }
5049
5050@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5051 ({ \
5052 void __user *__cu_to = (to); \
5053 const void *__cu_from = (from); \
5054- long __cu_len = (n); \
5055+ unsigned long __cu_len = (n); \
5056 \
5057- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5058+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5059+ if (!__builtin_constant_p(n)) \
5060+ check_object_size(__cu_from, __cu_len, true); \
5061 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5062+ } \
5063 __cu_len; \
5064 })
5065
5066@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5067 ({ \
5068 void *__cu_to = (to); \
5069 const void __user *__cu_from = (from); \
5070- long __cu_len = (n); \
5071+ unsigned long __cu_len = (n); \
5072 \
5073 __chk_user_ptr(__cu_from); \
5074- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5075+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5076+ if (!__builtin_constant_p(n)) \
5077+ check_object_size(__cu_to, __cu_len, false); \
5078 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5079+ } \
5080 __cu_len; \
5081 })
5082
5083diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
5084index 2d67317..07d8bfa 100644
5085--- a/arch/ia64/kernel/err_inject.c
5086+++ b/arch/ia64/kernel/err_inject.c
5087@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
5088 return NOTIFY_OK;
5089 }
5090
5091-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
5092+static struct notifier_block err_inject_cpu_notifier =
5093 {
5094 .notifier_call = err_inject_cpu_callback,
5095 };
5096diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
5097index d7396db..b33e873 100644
5098--- a/arch/ia64/kernel/mca.c
5099+++ b/arch/ia64/kernel/mca.c
5100@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
5101 return NOTIFY_OK;
5102 }
5103
5104-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
5105+static struct notifier_block mca_cpu_notifier = {
5106 .notifier_call = mca_cpu_callback
5107 };
5108
5109diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5110index 24603be..948052d 100644
5111--- a/arch/ia64/kernel/module.c
5112+++ b/arch/ia64/kernel/module.c
5113@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5114 void
5115 module_free (struct module *mod, void *module_region)
5116 {
5117- if (mod && mod->arch.init_unw_table &&
5118- module_region == mod->module_init) {
5119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5120 unw_remove_unwind_table(mod->arch.init_unw_table);
5121 mod->arch.init_unw_table = NULL;
5122 }
5123@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5124 }
5125
5126 static inline int
5127+in_init_rx (const struct module *mod, uint64_t addr)
5128+{
5129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5130+}
5131+
5132+static inline int
5133+in_init_rw (const struct module *mod, uint64_t addr)
5134+{
5135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5136+}
5137+
5138+static inline int
5139 in_init (const struct module *mod, uint64_t addr)
5140 {
5141- return addr - (uint64_t) mod->module_init < mod->init_size;
5142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5143+}
5144+
5145+static inline int
5146+in_core_rx (const struct module *mod, uint64_t addr)
5147+{
5148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5149+}
5150+
5151+static inline int
5152+in_core_rw (const struct module *mod, uint64_t addr)
5153+{
5154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5155 }
5156
5157 static inline int
5158 in_core (const struct module *mod, uint64_t addr)
5159 {
5160- return addr - (uint64_t) mod->module_core < mod->core_size;
5161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5162 }
5163
5164 static inline int
5165@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5166 break;
5167
5168 case RV_BDREL:
5169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5170+ if (in_init_rx(mod, val))
5171+ val -= (uint64_t) mod->module_init_rx;
5172+ else if (in_init_rw(mod, val))
5173+ val -= (uint64_t) mod->module_init_rw;
5174+ else if (in_core_rx(mod, val))
5175+ val -= (uint64_t) mod->module_core_rx;
5176+ else if (in_core_rw(mod, val))
5177+ val -= (uint64_t) mod->module_core_rw;
5178 break;
5179
5180 case RV_LTV:
5181@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5182 * addresses have been selected...
5183 */
5184 uint64_t gp;
5185- if (mod->core_size > MAX_LTOFF)
5186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5187 /*
5188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5189 * at the end of the module.
5190 */
5191- gp = mod->core_size - MAX_LTOFF / 2;
5192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5193 else
5194- gp = mod->core_size / 2;
5195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5198 mod->arch.gp = gp;
5199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5200 }
5201diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5202index 2b3c2d7..a318d84 100644
5203--- a/arch/ia64/kernel/palinfo.c
5204+++ b/arch/ia64/kernel/palinfo.c
5205@@ -980,7 +980,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5206 return NOTIFY_OK;
5207 }
5208
5209-static struct notifier_block __refdata palinfo_cpu_notifier =
5210+static struct notifier_block palinfo_cpu_notifier =
5211 {
5212 .notifier_call = palinfo_cpu_callback,
5213 .priority = 0,
5214diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5215index 4bc580a..7767f24 100644
5216--- a/arch/ia64/kernel/salinfo.c
5217+++ b/arch/ia64/kernel/salinfo.c
5218@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5219 return NOTIFY_OK;
5220 }
5221
5222-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5223+static struct notifier_block salinfo_cpu_notifier =
5224 {
5225 .notifier_call = salinfo_cpu_callback,
5226 .priority = 0,
5227diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5228index 41e33f8..65180b2 100644
5229--- a/arch/ia64/kernel/sys_ia64.c
5230+++ b/arch/ia64/kernel/sys_ia64.c
5231@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5232 unsigned long align_mask = 0;
5233 struct mm_struct *mm = current->mm;
5234 struct vm_unmapped_area_info info;
5235+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5236
5237 if (len > RGN_MAP_LIMIT)
5238 return -ENOMEM;
5239@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5240 if (REGION_NUMBER(addr) == RGN_HPAGE)
5241 addr = 0;
5242 #endif
5243+
5244+#ifdef CONFIG_PAX_RANDMMAP
5245+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5246+ addr = mm->free_area_cache;
5247+ else
5248+#endif
5249+
5250 if (!addr)
5251 addr = TASK_UNMAPPED_BASE;
5252
5253@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5254 info.high_limit = TASK_SIZE;
5255 info.align_mask = align_mask;
5256 info.align_offset = 0;
5257+ info.threadstack_offset = offset;
5258 return vm_unmapped_area(&info);
5259 }
5260
5261diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5262index dc00b2c..cce53c2 100644
5263--- a/arch/ia64/kernel/topology.c
5264+++ b/arch/ia64/kernel/topology.c
5265@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5266 return NOTIFY_OK;
5267 }
5268
5269-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5270+static struct notifier_block cache_cpu_notifier =
5271 {
5272 .notifier_call = cache_cpu_callback
5273 };
5274diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5275index 0ccb28f..8992469 100644
5276--- a/arch/ia64/kernel/vmlinux.lds.S
5277+++ b/arch/ia64/kernel/vmlinux.lds.S
5278@@ -198,7 +198,7 @@ SECTIONS {
5279 /* Per-cpu data: */
5280 . = ALIGN(PERCPU_PAGE_SIZE);
5281 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5282- __phys_per_cpu_start = __per_cpu_load;
5283+ __phys_per_cpu_start = per_cpu_load;
5284 /*
5285 * ensure percpu data fits
5286 * into percpu page size
5287diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5288index 6cf0341..d352594 100644
5289--- a/arch/ia64/mm/fault.c
5290+++ b/arch/ia64/mm/fault.c
5291@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5292 return pte_present(pte);
5293 }
5294
5295+#ifdef CONFIG_PAX_PAGEEXEC
5296+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5297+{
5298+ unsigned long i;
5299+
5300+ printk(KERN_ERR "PAX: bytes at PC: ");
5301+ for (i = 0; i < 8; i++) {
5302+ unsigned int c;
5303+ if (get_user(c, (unsigned int *)pc+i))
5304+ printk(KERN_CONT "???????? ");
5305+ else
5306+ printk(KERN_CONT "%08x ", c);
5307+ }
5308+ printk("\n");
5309+}
5310+#endif
5311+
5312 # define VM_READ_BIT 0
5313 # define VM_WRITE_BIT 1
5314 # define VM_EXEC_BIT 2
5315@@ -149,8 +166,21 @@ retry:
5316 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5317 goto bad_area;
5318
5319- if ((vma->vm_flags & mask) != mask)
5320+ if ((vma->vm_flags & mask) != mask) {
5321+
5322+#ifdef CONFIG_PAX_PAGEEXEC
5323+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5324+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5325+ goto bad_area;
5326+
5327+ up_read(&mm->mmap_sem);
5328+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5329+ do_group_exit(SIGKILL);
5330+ }
5331+#endif
5332+
5333 goto bad_area;
5334+ }
5335
5336 /*
5337 * If for any reason at all we couldn't handle the fault, make
5338diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5339index 76069c1..c2aa816 100644
5340--- a/arch/ia64/mm/hugetlbpage.c
5341+++ b/arch/ia64/mm/hugetlbpage.c
5342@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5343 unsigned long pgoff, unsigned long flags)
5344 {
5345 struct vm_unmapped_area_info info;
5346+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5347
5348 if (len > RGN_MAP_LIMIT)
5349 return -ENOMEM;
5350@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5351 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5352 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5353 info.align_offset = 0;
5354+ info.threadstack_offset = offset;
5355 return vm_unmapped_area(&info);
5356 }
5357
5358diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5359index d1fe4b4..2628f37 100644
5360--- a/arch/ia64/mm/init.c
5361+++ b/arch/ia64/mm/init.c
5362@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5363 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5364 vma->vm_end = vma->vm_start + PAGE_SIZE;
5365 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5366+
5367+#ifdef CONFIG_PAX_PAGEEXEC
5368+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5369+ vma->vm_flags &= ~VM_EXEC;
5370+
5371+#ifdef CONFIG_PAX_MPROTECT
5372+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5373+ vma->vm_flags &= ~VM_MAYEXEC;
5374+#endif
5375+
5376+ }
5377+#endif
5378+
5379 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5380 down_write(&current->mm->mmap_sem);
5381 if (insert_vm_struct(current->mm, vma)) {
5382diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5383index 40b3ee9..8c2c112 100644
5384--- a/arch/m32r/include/asm/cache.h
5385+++ b/arch/m32r/include/asm/cache.h
5386@@ -1,8 +1,10 @@
5387 #ifndef _ASM_M32R_CACHE_H
5388 #define _ASM_M32R_CACHE_H
5389
5390+#include <linux/const.h>
5391+
5392 /* L1 cache line size */
5393 #define L1_CACHE_SHIFT 4
5394-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5395+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5396
5397 #endif /* _ASM_M32R_CACHE_H */
5398diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5399index 82abd15..d95ae5d 100644
5400--- a/arch/m32r/lib/usercopy.c
5401+++ b/arch/m32r/lib/usercopy.c
5402@@ -14,6 +14,9 @@
5403 unsigned long
5404 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5405 {
5406+ if ((long)n < 0)
5407+ return n;
5408+
5409 prefetch(from);
5410 if (access_ok(VERIFY_WRITE, to, n))
5411 __copy_user(to,from,n);
5412@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5413 unsigned long
5414 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5415 {
5416+ if ((long)n < 0)
5417+ return n;
5418+
5419 prefetchw(to);
5420 if (access_ok(VERIFY_READ, from, n))
5421 __copy_user_zeroing(to,from,n);
5422diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5423index 0395c51..5f26031 100644
5424--- a/arch/m68k/include/asm/cache.h
5425+++ b/arch/m68k/include/asm/cache.h
5426@@ -4,9 +4,11 @@
5427 #ifndef __ARCH_M68K_CACHE_H
5428 #define __ARCH_M68K_CACHE_H
5429
5430+#include <linux/const.h>
5431+
5432 /* bytes per L1 cache line */
5433 #define L1_CACHE_SHIFT 4
5434-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5435+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5436
5437 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5438
5439diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5440index 3c52fa6..11b2ad8 100644
5441--- a/arch/metag/mm/hugetlbpage.c
5442+++ b/arch/metag/mm/hugetlbpage.c
5443@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5444 info.high_limit = TASK_SIZE;
5445 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5446 info.align_offset = 0;
5447+ info.threadstack_offset = 0;
5448 return vm_unmapped_area(&info);
5449 }
5450
5451diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5452index 4efe96a..60e8699 100644
5453--- a/arch/microblaze/include/asm/cache.h
5454+++ b/arch/microblaze/include/asm/cache.h
5455@@ -13,11 +13,12 @@
5456 #ifndef _ASM_MICROBLAZE_CACHE_H
5457 #define _ASM_MICROBLAZE_CACHE_H
5458
5459+#include <linux/const.h>
5460 #include <asm/registers.h>
5461
5462 #define L1_CACHE_SHIFT 5
5463 /* word-granular cache in microblaze */
5464-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5465+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5466
5467 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5468
5469diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5470index 08b6079..eb272cf 100644
5471--- a/arch/mips/include/asm/atomic.h
5472+++ b/arch/mips/include/asm/atomic.h
5473@@ -21,6 +21,10 @@
5474 #include <asm/cmpxchg.h>
5475 #include <asm/war.h>
5476
5477+#ifdef CONFIG_GENERIC_ATOMIC64
5478+#include <asm-generic/atomic64.h>
5479+#endif
5480+
5481 #define ATOMIC_INIT(i) { (i) }
5482
5483 /*
5484@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5485 */
5486 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5487
5488+#define atomic64_read_unchecked(v) atomic64_read(v)
5489+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5490+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5491+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5492+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5493+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5494+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5495+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5496+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5497+
5498 #endif /* CONFIG_64BIT */
5499
5500 /*
5501diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5502index b4db69f..8f3b093 100644
5503--- a/arch/mips/include/asm/cache.h
5504+++ b/arch/mips/include/asm/cache.h
5505@@ -9,10 +9,11 @@
5506 #ifndef _ASM_CACHE_H
5507 #define _ASM_CACHE_H
5508
5509+#include <linux/const.h>
5510 #include <kmalloc.h>
5511
5512 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5513-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5514+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5515
5516 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5517 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5518diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5519index cf3ae24..238d22f 100644
5520--- a/arch/mips/include/asm/elf.h
5521+++ b/arch/mips/include/asm/elf.h
5522@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5523 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5524 #endif
5525
5526+#ifdef CONFIG_PAX_ASLR
5527+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5528+
5529+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5530+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5531+#endif
5532+
5533 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5534 struct linux_binprm;
5535 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5536 int uses_interp);
5537
5538-struct mm_struct;
5539-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5540-#define arch_randomize_brk arch_randomize_brk
5541-
5542 #endif /* _ASM_ELF_H */
5543diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5544index c1f6afa..38cc6e9 100644
5545--- a/arch/mips/include/asm/exec.h
5546+++ b/arch/mips/include/asm/exec.h
5547@@ -12,6 +12,6 @@
5548 #ifndef _ASM_EXEC_H
5549 #define _ASM_EXEC_H
5550
5551-extern unsigned long arch_align_stack(unsigned long sp);
5552+#define arch_align_stack(x) ((x) & ~0xfUL)
5553
5554 #endif /* _ASM_EXEC_H */
5555diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
5556index d44622c..64990d2 100644
5557--- a/arch/mips/include/asm/local.h
5558+++ b/arch/mips/include/asm/local.h
5559@@ -12,15 +12,25 @@ typedef struct
5560 atomic_long_t a;
5561 } local_t;
5562
5563+typedef struct {
5564+ atomic_long_unchecked_t a;
5565+} local_unchecked_t;
5566+
5567 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
5568
5569 #define local_read(l) atomic_long_read(&(l)->a)
5570+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
5571 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
5572+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
5573
5574 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
5575+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
5576 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
5577+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
5578 #define local_inc(l) atomic_long_inc(&(l)->a)
5579+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
5580 #define local_dec(l) atomic_long_dec(&(l)->a)
5581+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
5582
5583 /*
5584 * Same as above, but return the result value
5585@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
5586 return result;
5587 }
5588
5589+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
5590+{
5591+ unsigned long result;
5592+
5593+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5594+ unsigned long temp;
5595+
5596+ __asm__ __volatile__(
5597+ " .set mips3 \n"
5598+ "1:" __LL "%1, %2 # local_add_return \n"
5599+ " addu %0, %1, %3 \n"
5600+ __SC "%0, %2 \n"
5601+ " beqzl %0, 1b \n"
5602+ " addu %0, %1, %3 \n"
5603+ " .set mips0 \n"
5604+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
5605+ : "Ir" (i), "m" (l->a.counter)
5606+ : "memory");
5607+ } else if (kernel_uses_llsc) {
5608+ unsigned long temp;
5609+
5610+ __asm__ __volatile__(
5611+ " .set mips3 \n"
5612+ "1:" __LL "%1, %2 # local_add_return \n"
5613+ " addu %0, %1, %3 \n"
5614+ __SC "%0, %2 \n"
5615+ " beqz %0, 1b \n"
5616+ " addu %0, %1, %3 \n"
5617+ " .set mips0 \n"
5618+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
5619+ : "Ir" (i), "m" (l->a.counter)
5620+ : "memory");
5621+ } else {
5622+ unsigned long flags;
5623+
5624+ local_irq_save(flags);
5625+ result = l->a.counter;
5626+ result += i;
5627+ l->a.counter = result;
5628+ local_irq_restore(flags);
5629+ }
5630+
5631+ return result;
5632+}
5633+
5634 static __inline__ long local_sub_return(long i, local_t * l)
5635 {
5636 unsigned long result;
5637@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
5638
5639 #define local_cmpxchg(l, o, n) \
5640 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
5641+#define local_cmpxchg_unchecked(l, o, n) \
5642+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
5643 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
5644
5645 /**
5646diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5647index f59552f..3abe9b9 100644
5648--- a/arch/mips/include/asm/page.h
5649+++ b/arch/mips/include/asm/page.h
5650@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5651 #ifdef CONFIG_CPU_MIPS32
5652 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5653 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5654- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5655+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5656 #else
5657 typedef struct { unsigned long long pte; } pte_t;
5658 #define pte_val(x) ((x).pte)
5659diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5660index 881d18b..cea38bc 100644
5661--- a/arch/mips/include/asm/pgalloc.h
5662+++ b/arch/mips/include/asm/pgalloc.h
5663@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5664 {
5665 set_pud(pud, __pud((unsigned long)pmd));
5666 }
5667+
5668+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5669+{
5670+ pud_populate(mm, pud, pmd);
5671+}
5672 #endif
5673
5674 /*
5675diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5676index 895320e..bf63e10 100644
5677--- a/arch/mips/include/asm/thread_info.h
5678+++ b/arch/mips/include/asm/thread_info.h
5679@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
5680 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5681 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5682 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5683+/* li takes a 32bit immediate */
5684+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5685 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5686
5687 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5688@@ -130,15 +132,18 @@ static inline struct thread_info *current_thread_info(void)
5689 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5690 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5691 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5692+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5693+
5694+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5695
5696 /* work to do in syscall_trace_leave() */
5697-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5698+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5699
5700 /* work to do on interrupt/exception return */
5701 #define _TIF_WORK_MASK \
5702 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5703 /* work to do on any return to u-space */
5704-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5705+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5706
5707 #endif /* __KERNEL__ */
5708
5709diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5710index 1188e00..41cf144 100644
5711--- a/arch/mips/kernel/binfmt_elfn32.c
5712+++ b/arch/mips/kernel/binfmt_elfn32.c
5713@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5714 #undef ELF_ET_DYN_BASE
5715 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5716
5717+#ifdef CONFIG_PAX_ASLR
5718+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5719+
5720+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5721+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5722+#endif
5723+
5724 #include <asm/processor.h>
5725 #include <linux/module.h>
5726 #include <linux/elfcore.h>
5727diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5728index 202e581..689ca79 100644
5729--- a/arch/mips/kernel/binfmt_elfo32.c
5730+++ b/arch/mips/kernel/binfmt_elfo32.c
5731@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5732 #undef ELF_ET_DYN_BASE
5733 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5734
5735+#ifdef CONFIG_PAX_ASLR
5736+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5737+
5738+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5739+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5740+#endif
5741+
5742 #include <asm/processor.h>
5743
5744 /*
5745diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5746index c6a041d..b3e7318 100644
5747--- a/arch/mips/kernel/process.c
5748+++ b/arch/mips/kernel/process.c
5749@@ -563,15 +563,3 @@ unsigned long get_wchan(struct task_struct *task)
5750 out:
5751 return pc;
5752 }
5753-
5754-/*
5755- * Don't forget that the stack pointer must be aligned on a 8 bytes
5756- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5757- */
5758-unsigned long arch_align_stack(unsigned long sp)
5759-{
5760- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5761- sp -= get_random_int() & ~PAGE_MASK;
5762-
5763- return sp & ALMASK;
5764-}
5765diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5766index 9c6299c..2fb4c22 100644
5767--- a/arch/mips/kernel/ptrace.c
5768+++ b/arch/mips/kernel/ptrace.c
5769@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5770 return arch;
5771 }
5772
5773+#ifdef CONFIG_GRKERNSEC_SETXID
5774+extern void gr_delayed_cred_worker(void);
5775+#endif
5776+
5777 /*
5778 * Notification of system call entry/exit
5779 * - triggered by current->work.syscall_trace
5780@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5781 /* do the secure computing check first */
5782 secure_computing_strict(regs->regs[2]);
5783
5784+#ifdef CONFIG_GRKERNSEC_SETXID
5785+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5786+ gr_delayed_cred_worker();
5787+#endif
5788+
5789 if (!(current->ptrace & PT_PTRACED))
5790 goto out;
5791
5792diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5793index 9b36424..e7f4154 100644
5794--- a/arch/mips/kernel/scall32-o32.S
5795+++ b/arch/mips/kernel/scall32-o32.S
5796@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5797
5798 stack_done:
5799 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5800- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5801+ li t1, _TIF_SYSCALL_WORK
5802 and t0, t1
5803 bnez t0, syscall_trace_entry # -> yes
5804
5805diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5806index 97a5909..59622f8 100644
5807--- a/arch/mips/kernel/scall64-64.S
5808+++ b/arch/mips/kernel/scall64-64.S
5809@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5810
5811 sd a3, PT_R26(sp) # save a3 for syscall restarting
5812
5813- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5814+ li t1, _TIF_SYSCALL_WORK
5815 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5816 and t0, t1, t0
5817 bnez t0, syscall_trace_entry
5818diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5819index edcb659..fb2ab09 100644
5820--- a/arch/mips/kernel/scall64-n32.S
5821+++ b/arch/mips/kernel/scall64-n32.S
5822@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5823
5824 sd a3, PT_R26(sp) # save a3 for syscall restarting
5825
5826- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5827+ li t1, _TIF_SYSCALL_WORK
5828 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5829 and t0, t1, t0
5830 bnez t0, n32_syscall_trace_entry
5831diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5832index 74f485d..47d2c38 100644
5833--- a/arch/mips/kernel/scall64-o32.S
5834+++ b/arch/mips/kernel/scall64-o32.S
5835@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5836 PTR 4b, bad_stack
5837 .previous
5838
5839- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5840+ li t1, _TIF_SYSCALL_WORK
5841 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5842 and t0, t1, t0
5843 bnez t0, trace_a_syscall
5844diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5845index 0fead53..eeb00a6 100644
5846--- a/arch/mips/mm/fault.c
5847+++ b/arch/mips/mm/fault.c
5848@@ -27,6 +27,23 @@
5849 #include <asm/highmem.h> /* For VMALLOC_END */
5850 #include <linux/kdebug.h>
5851
5852+#ifdef CONFIG_PAX_PAGEEXEC
5853+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5854+{
5855+ unsigned long i;
5856+
5857+ printk(KERN_ERR "PAX: bytes at PC: ");
5858+ for (i = 0; i < 5; i++) {
5859+ unsigned int c;
5860+ if (get_user(c, (unsigned int *)pc+i))
5861+ printk(KERN_CONT "???????? ");
5862+ else
5863+ printk(KERN_CONT "%08x ", c);
5864+ }
5865+ printk("\n");
5866+}
5867+#endif
5868+
5869 /*
5870 * This routine handles page faults. It determines the address,
5871 * and the problem, and then passes it off to one of the appropriate
5872@@ -196,6 +213,14 @@ bad_area:
5873 bad_area_nosemaphore:
5874 /* User mode accesses just cause a SIGSEGV */
5875 if (user_mode(regs)) {
5876+
5877+#ifdef CONFIG_PAX_PAGEEXEC
5878+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
5879+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
5880+ do_group_exit(SIGKILL);
5881+ }
5882+#endif
5883+
5884 tsk->thread.cp0_badvaddr = address;
5885 tsk->thread.error_code = write;
5886 #if 0
5887diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5888index 7e5fe27..9656513 100644
5889--- a/arch/mips/mm/mmap.c
5890+++ b/arch/mips/mm/mmap.c
5891@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5892 struct vm_area_struct *vma;
5893 unsigned long addr = addr0;
5894 int do_color_align;
5895+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5896 struct vm_unmapped_area_info info;
5897
5898 if (unlikely(len > TASK_SIZE))
5899@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5900 do_color_align = 1;
5901
5902 /* requesting a specific address */
5903+
5904+#ifdef CONFIG_PAX_RANDMMAP
5905+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5906+#endif
5907+
5908 if (addr) {
5909 if (do_color_align)
5910 addr = COLOUR_ALIGN(addr, pgoff);
5911@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5912 addr = PAGE_ALIGN(addr);
5913
5914 vma = find_vma(mm, addr);
5915- if (TASK_SIZE - len >= addr &&
5916- (!vma || addr + len <= vma->vm_start))
5917+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5918 return addr;
5919 }
5920
5921 info.length = len;
5922 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
5923 info.align_offset = pgoff << PAGE_SHIFT;
5924+ info.threadstack_offset = offset;
5925
5926 if (dir == DOWN) {
5927 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
5928@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5929 {
5930 unsigned long random_factor = 0UL;
5931
5932+#ifdef CONFIG_PAX_RANDMMAP
5933+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5934+#endif
5935+
5936 if (current->flags & PF_RANDOMIZE) {
5937 random_factor = get_random_int();
5938 random_factor = random_factor << PAGE_SHIFT;
5939@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5940
5941 if (mmap_is_legacy()) {
5942 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5943+
5944+#ifdef CONFIG_PAX_RANDMMAP
5945+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5946+ mm->mmap_base += mm->delta_mmap;
5947+#endif
5948+
5949 mm->get_unmapped_area = arch_get_unmapped_area;
5950 mm->unmap_area = arch_unmap_area;
5951 } else {
5952 mm->mmap_base = mmap_base(random_factor);
5953+
5954+#ifdef CONFIG_PAX_RANDMMAP
5955+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5956+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5957+#endif
5958+
5959 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5960 mm->unmap_area = arch_unmap_area_topdown;
5961 }
5962 }
5963
5964-static inline unsigned long brk_rnd(void)
5965-{
5966- unsigned long rnd = get_random_int();
5967-
5968- rnd = rnd << PAGE_SHIFT;
5969- /* 8MB for 32bit, 256MB for 64bit */
5970- if (TASK_IS_32BIT_ADDR)
5971- rnd = rnd & 0x7ffffful;
5972- else
5973- rnd = rnd & 0xffffffful;
5974-
5975- return rnd;
5976-}
5977-
5978-unsigned long arch_randomize_brk(struct mm_struct *mm)
5979-{
5980- unsigned long base = mm->brk;
5981- unsigned long ret;
5982-
5983- ret = PAGE_ALIGN(base + brk_rnd());
5984-
5985- if (ret < mm->brk)
5986- return mm->brk;
5987-
5988- return ret;
5989-}
5990-
5991 int __virt_addr_valid(const volatile void *kaddr)
5992 {
5993 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5994diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5995index 967d144..db12197 100644
5996--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5997+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5998@@ -11,12 +11,14 @@
5999 #ifndef _ASM_PROC_CACHE_H
6000 #define _ASM_PROC_CACHE_H
6001
6002+#include <linux/const.h>
6003+
6004 /* L1 cache */
6005
6006 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6007 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
6008-#define L1_CACHE_BYTES 16 /* bytes per entry */
6009 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
6010+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6011 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
6012
6013 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6014diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6015index bcb5df2..84fabd2 100644
6016--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6017+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6018@@ -16,13 +16,15 @@
6019 #ifndef _ASM_PROC_CACHE_H
6020 #define _ASM_PROC_CACHE_H
6021
6022+#include <linux/const.h>
6023+
6024 /*
6025 * L1 cache
6026 */
6027 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6028 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
6029-#define L1_CACHE_BYTES 32 /* bytes per entry */
6030 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
6031+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6032 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
6033
6034 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6035diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
6036index 4ce7a01..449202a 100644
6037--- a/arch/openrisc/include/asm/cache.h
6038+++ b/arch/openrisc/include/asm/cache.h
6039@@ -19,11 +19,13 @@
6040 #ifndef __ASM_OPENRISC_CACHE_H
6041 #define __ASM_OPENRISC_CACHE_H
6042
6043+#include <linux/const.h>
6044+
6045 /* FIXME: How can we replace these with values from the CPU...
6046 * they shouldn't be hard-coded!
6047 */
6048
6049-#define L1_CACHE_BYTES 16
6050 #define L1_CACHE_SHIFT 4
6051+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6052
6053 #endif /* __ASM_OPENRISC_CACHE_H */
6054diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
6055index 472886c..00e7df9 100644
6056--- a/arch/parisc/include/asm/atomic.h
6057+++ b/arch/parisc/include/asm/atomic.h
6058@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
6059 return dec;
6060 }
6061
6062+#define atomic64_read_unchecked(v) atomic64_read(v)
6063+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6064+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6065+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6066+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6067+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6068+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6069+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6070+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6071+
6072 #endif /* !CONFIG_64BIT */
6073
6074
6075diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
6076index 47f11c7..3420df2 100644
6077--- a/arch/parisc/include/asm/cache.h
6078+++ b/arch/parisc/include/asm/cache.h
6079@@ -5,6 +5,7 @@
6080 #ifndef __ARCH_PARISC_CACHE_H
6081 #define __ARCH_PARISC_CACHE_H
6082
6083+#include <linux/const.h>
6084
6085 /*
6086 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
6087@@ -15,13 +16,13 @@
6088 * just ruin performance.
6089 */
6090 #ifdef CONFIG_PA20
6091-#define L1_CACHE_BYTES 64
6092 #define L1_CACHE_SHIFT 6
6093 #else
6094-#define L1_CACHE_BYTES 32
6095 #define L1_CACHE_SHIFT 5
6096 #endif
6097
6098+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6099+
6100 #ifndef __ASSEMBLY__
6101
6102 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6103diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
6104index ad2b503..bdf1651 100644
6105--- a/arch/parisc/include/asm/elf.h
6106+++ b/arch/parisc/include/asm/elf.h
6107@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
6108
6109 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
6110
6111+#ifdef CONFIG_PAX_ASLR
6112+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6113+
6114+#define PAX_DELTA_MMAP_LEN 16
6115+#define PAX_DELTA_STACK_LEN 16
6116+#endif
6117+
6118 /* This yields a mask that user programs can use to figure out what
6119 instruction set this CPU supports. This could be done in user space,
6120 but it's not easy, and we've already done it here. */
6121diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
6122index fc987a1..6e068ef 100644
6123--- a/arch/parisc/include/asm/pgalloc.h
6124+++ b/arch/parisc/include/asm/pgalloc.h
6125@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6126 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
6127 }
6128
6129+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6130+{
6131+ pgd_populate(mm, pgd, pmd);
6132+}
6133+
6134 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6135 {
6136 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
6137@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
6138 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
6139 #define pmd_free(mm, x) do { } while (0)
6140 #define pgd_populate(mm, pmd, pte) BUG()
6141+#define pgd_populate_kernel(mm, pmd, pte) BUG()
6142
6143 #endif
6144
6145diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
6146index 1e40d7f..a3eb445 100644
6147--- a/arch/parisc/include/asm/pgtable.h
6148+++ b/arch/parisc/include/asm/pgtable.h
6149@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6150 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6151 #define PAGE_COPY PAGE_EXECREAD
6152 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6153+
6154+#ifdef CONFIG_PAX_PAGEEXEC
6155+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6156+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6157+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6158+#else
6159+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6160+# define PAGE_COPY_NOEXEC PAGE_COPY
6161+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6162+#endif
6163+
6164 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6165 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6166 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6167diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6168index e0a8235..ce2f1e1 100644
6169--- a/arch/parisc/include/asm/uaccess.h
6170+++ b/arch/parisc/include/asm/uaccess.h
6171@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6172 const void __user *from,
6173 unsigned long n)
6174 {
6175- int sz = __compiletime_object_size(to);
6176+ size_t sz = __compiletime_object_size(to);
6177 int ret = -EFAULT;
6178
6179- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6180+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6181 ret = __copy_from_user(to, from, n);
6182 else
6183 copy_from_user_overflow();
6184diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6185index 2a625fb..9908930 100644
6186--- a/arch/parisc/kernel/module.c
6187+++ b/arch/parisc/kernel/module.c
6188@@ -98,16 +98,38 @@
6189
6190 /* three functions to determine where in the module core
6191 * or init pieces the location is */
6192+static inline int in_init_rx(struct module *me, void *loc)
6193+{
6194+ return (loc >= me->module_init_rx &&
6195+ loc < (me->module_init_rx + me->init_size_rx));
6196+}
6197+
6198+static inline int in_init_rw(struct module *me, void *loc)
6199+{
6200+ return (loc >= me->module_init_rw &&
6201+ loc < (me->module_init_rw + me->init_size_rw));
6202+}
6203+
6204 static inline int in_init(struct module *me, void *loc)
6205 {
6206- return (loc >= me->module_init &&
6207- loc <= (me->module_init + me->init_size));
6208+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6209+}
6210+
6211+static inline int in_core_rx(struct module *me, void *loc)
6212+{
6213+ return (loc >= me->module_core_rx &&
6214+ loc < (me->module_core_rx + me->core_size_rx));
6215+}
6216+
6217+static inline int in_core_rw(struct module *me, void *loc)
6218+{
6219+ return (loc >= me->module_core_rw &&
6220+ loc < (me->module_core_rw + me->core_size_rw));
6221 }
6222
6223 static inline int in_core(struct module *me, void *loc)
6224 {
6225- return (loc >= me->module_core &&
6226- loc <= (me->module_core + me->core_size));
6227+ return in_core_rx(me, loc) || in_core_rw(me, loc);
6228 }
6229
6230 static inline int in_local(struct module *me, void *loc)
6231@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
6232 }
6233
6234 /* align things a bit */
6235- me->core_size = ALIGN(me->core_size, 16);
6236- me->arch.got_offset = me->core_size;
6237- me->core_size += gots * sizeof(struct got_entry);
6238+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6239+ me->arch.got_offset = me->core_size_rw;
6240+ me->core_size_rw += gots * sizeof(struct got_entry);
6241
6242- me->core_size = ALIGN(me->core_size, 16);
6243- me->arch.fdesc_offset = me->core_size;
6244- me->core_size += fdescs * sizeof(Elf_Fdesc);
6245+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6246+ me->arch.fdesc_offset = me->core_size_rw;
6247+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
6248
6249 me->arch.got_max = gots;
6250 me->arch.fdesc_max = fdescs;
6251@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6252
6253 BUG_ON(value == 0);
6254
6255- got = me->module_core + me->arch.got_offset;
6256+ got = me->module_core_rw + me->arch.got_offset;
6257 for (i = 0; got[i].addr; i++)
6258 if (got[i].addr == value)
6259 goto out;
6260@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6261 #ifdef CONFIG_64BIT
6262 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6263 {
6264- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
6265+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
6266
6267 if (!value) {
6268 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
6269@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6270
6271 /* Create new one */
6272 fdesc->addr = value;
6273- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6274+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6275 return (Elf_Addr)fdesc;
6276 }
6277 #endif /* CONFIG_64BIT */
6278@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
6279
6280 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
6281 end = table + sechdrs[me->arch.unwind_section].sh_size;
6282- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6283+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6284
6285 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
6286 me->arch.unwind_section, table, end, gp);
6287diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
6288index 5dfd248..64914ac 100644
6289--- a/arch/parisc/kernel/sys_parisc.c
6290+++ b/arch/parisc/kernel/sys_parisc.c
6291@@ -33,9 +33,11 @@
6292 #include <linux/utsname.h>
6293 #include <linux/personality.h>
6294
6295-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6296+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
6297+ unsigned long flags)
6298 {
6299 struct vm_unmapped_area_info info;
6300+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6301
6302 info.flags = 0;
6303 info.length = len;
6304@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6305 info.high_limit = TASK_SIZE;
6306 info.align_mask = 0;
6307 info.align_offset = 0;
6308+ info.threadstack_offset = offset;
6309 return vm_unmapped_area(&info);
6310 }
6311
6312@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
6313 return (unsigned long) mapping >> 8;
6314 }
6315
6316-static unsigned long get_shared_area(struct address_space *mapping,
6317- unsigned long addr, unsigned long len, unsigned long pgoff)
6318+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
6319+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6320 {
6321 struct vm_unmapped_area_info info;
6322+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6323
6324 info.flags = 0;
6325 info.length = len;
6326@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
6327 info.high_limit = TASK_SIZE;
6328 info.align_mask = PAGE_MASK & (SHMLBA - 1);
6329 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
6330+ info.threadstack_offset = offset;
6331 return vm_unmapped_area(&info);
6332 }
6333
6334@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
6335 return -EINVAL;
6336 return addr;
6337 }
6338- if (!addr)
6339+ if (!addr) {
6340 addr = TASK_UNMAPPED_BASE;
6341
6342+#ifdef CONFIG_PAX_RANDMMAP
6343+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
6344+ addr += current->mm->delta_mmap;
6345+#endif
6346+
6347+ }
6348+
6349 if (filp) {
6350- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
6351+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
6352 } else if(flags & MAP_SHARED) {
6353- addr = get_shared_area(NULL, addr, len, pgoff);
6354+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6355 } else {
6356- addr = get_unshared_area(addr, len);
6357+ addr = get_unshared_area(filp, addr, len, flags);
6358 }
6359 return addr;
6360 }
6361diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6362index 04e47c6..7a8faf6 100644
6363--- a/arch/parisc/kernel/traps.c
6364+++ b/arch/parisc/kernel/traps.c
6365@@ -727,9 +727,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6366
6367 down_read(&current->mm->mmap_sem);
6368 vma = find_vma(current->mm,regs->iaoq[0]);
6369- if (vma && (regs->iaoq[0] >= vma->vm_start)
6370- && (vma->vm_flags & VM_EXEC)) {
6371-
6372+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6373 fault_address = regs->iaoq[0];
6374 fault_space = regs->iasq[0];
6375
6376diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6377index f247a34..dc0f219 100644
6378--- a/arch/parisc/mm/fault.c
6379+++ b/arch/parisc/mm/fault.c
6380@@ -15,6 +15,7 @@
6381 #include <linux/sched.h>
6382 #include <linux/interrupt.h>
6383 #include <linux/module.h>
6384+#include <linux/unistd.h>
6385
6386 #include <asm/uaccess.h>
6387 #include <asm/traps.h>
6388@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6389 static unsigned long
6390 parisc_acctyp(unsigned long code, unsigned int inst)
6391 {
6392- if (code == 6 || code == 16)
6393+ if (code == 6 || code == 7 || code == 16)
6394 return VM_EXEC;
6395
6396 switch (inst & 0xf0000000) {
6397@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6398 }
6399 #endif
6400
6401+#ifdef CONFIG_PAX_PAGEEXEC
6402+/*
6403+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6404+ *
6405+ * returns 1 when task should be killed
6406+ * 2 when rt_sigreturn trampoline was detected
6407+ * 3 when unpatched PLT trampoline was detected
6408+ */
6409+static int pax_handle_fetch_fault(struct pt_regs *regs)
6410+{
6411+
6412+#ifdef CONFIG_PAX_EMUPLT
6413+ int err;
6414+
6415+ do { /* PaX: unpatched PLT emulation */
6416+ unsigned int bl, depwi;
6417+
6418+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6419+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6420+
6421+ if (err)
6422+ break;
6423+
6424+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6425+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6426+
6427+ err = get_user(ldw, (unsigned int *)addr);
6428+ err |= get_user(bv, (unsigned int *)(addr+4));
6429+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6430+
6431+ if (err)
6432+ break;
6433+
6434+ if (ldw == 0x0E801096U &&
6435+ bv == 0xEAC0C000U &&
6436+ ldw2 == 0x0E881095U)
6437+ {
6438+ unsigned int resolver, map;
6439+
6440+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6441+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6442+ if (err)
6443+ break;
6444+
6445+ regs->gr[20] = instruction_pointer(regs)+8;
6446+ regs->gr[21] = map;
6447+ regs->gr[22] = resolver;
6448+ regs->iaoq[0] = resolver | 3UL;
6449+ regs->iaoq[1] = regs->iaoq[0] + 4;
6450+ return 3;
6451+ }
6452+ }
6453+ } while (0);
6454+#endif
6455+
6456+#ifdef CONFIG_PAX_EMUTRAMP
6457+
6458+#ifndef CONFIG_PAX_EMUSIGRT
6459+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6460+ return 1;
6461+#endif
6462+
6463+ do { /* PaX: rt_sigreturn emulation */
6464+ unsigned int ldi1, ldi2, bel, nop;
6465+
6466+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6467+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6468+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6469+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6470+
6471+ if (err)
6472+ break;
6473+
6474+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6475+ ldi2 == 0x3414015AU &&
6476+ bel == 0xE4008200U &&
6477+ nop == 0x08000240U)
6478+ {
6479+ regs->gr[25] = (ldi1 & 2) >> 1;
6480+ regs->gr[20] = __NR_rt_sigreturn;
6481+ regs->gr[31] = regs->iaoq[1] + 16;
6482+ regs->sr[0] = regs->iasq[1];
6483+ regs->iaoq[0] = 0x100UL;
6484+ regs->iaoq[1] = regs->iaoq[0] + 4;
6485+ regs->iasq[0] = regs->sr[2];
6486+ regs->iasq[1] = regs->sr[2];
6487+ return 2;
6488+ }
6489+ } while (0);
6490+#endif
6491+
6492+ return 1;
6493+}
6494+
6495+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6496+{
6497+ unsigned long i;
6498+
6499+ printk(KERN_ERR "PAX: bytes at PC: ");
6500+ for (i = 0; i < 5; i++) {
6501+ unsigned int c;
6502+ if (get_user(c, (unsigned int *)pc+i))
6503+ printk(KERN_CONT "???????? ");
6504+ else
6505+ printk(KERN_CONT "%08x ", c);
6506+ }
6507+ printk("\n");
6508+}
6509+#endif
6510+
6511 int fixup_exception(struct pt_regs *regs)
6512 {
6513 const struct exception_table_entry *fix;
6514@@ -194,8 +305,33 @@ good_area:
6515
6516 acc_type = parisc_acctyp(code,regs->iir);
6517
6518- if ((vma->vm_flags & acc_type) != acc_type)
6519+ if ((vma->vm_flags & acc_type) != acc_type) {
6520+
6521+#ifdef CONFIG_PAX_PAGEEXEC
6522+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6523+ (address & ~3UL) == instruction_pointer(regs))
6524+ {
6525+ up_read(&mm->mmap_sem);
6526+ switch (pax_handle_fetch_fault(regs)) {
6527+
6528+#ifdef CONFIG_PAX_EMUPLT
6529+ case 3:
6530+ return;
6531+#endif
6532+
6533+#ifdef CONFIG_PAX_EMUTRAMP
6534+ case 2:
6535+ return;
6536+#endif
6537+
6538+ }
6539+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6540+ do_group_exit(SIGKILL);
6541+ }
6542+#endif
6543+
6544 goto bad_area;
6545+ }
6546
6547 /*
6548 * If for any reason at all we couldn't handle the fault, make
6549diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6550index e3b1d41..8e81edf 100644
6551--- a/arch/powerpc/include/asm/atomic.h
6552+++ b/arch/powerpc/include/asm/atomic.h
6553@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6554 return t1;
6555 }
6556
6557+#define atomic64_read_unchecked(v) atomic64_read(v)
6558+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6559+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6560+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6561+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6562+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6563+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6564+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6565+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6566+
6567 #endif /* __powerpc64__ */
6568
6569 #endif /* __KERNEL__ */
6570diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6571index 9e495c9..b6878e5 100644
6572--- a/arch/powerpc/include/asm/cache.h
6573+++ b/arch/powerpc/include/asm/cache.h
6574@@ -3,6 +3,7 @@
6575
6576 #ifdef __KERNEL__
6577
6578+#include <linux/const.h>
6579
6580 /* bytes per L1 cache line */
6581 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6582@@ -22,7 +23,7 @@
6583 #define L1_CACHE_SHIFT 7
6584 #endif
6585
6586-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6587+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6588
6589 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6590
6591diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6592index cc0655a..13eac2e 100644
6593--- a/arch/powerpc/include/asm/elf.h
6594+++ b/arch/powerpc/include/asm/elf.h
6595@@ -28,8 +28,19 @@
6596 the loader. We need to make sure that it is out of the way of the program
6597 that it will "exec", and that there is sufficient room for the brk. */
6598
6599-extern unsigned long randomize_et_dyn(unsigned long base);
6600-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6601+#define ELF_ET_DYN_BASE (0x20000000)
6602+
6603+#ifdef CONFIG_PAX_ASLR
6604+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6605+
6606+#ifdef __powerpc64__
6607+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6608+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6609+#else
6610+#define PAX_DELTA_MMAP_LEN 15
6611+#define PAX_DELTA_STACK_LEN 15
6612+#endif
6613+#endif
6614
6615 /*
6616 * Our registers are always unsigned longs, whether we're a 32 bit
6617@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6618 (0x7ff >> (PAGE_SHIFT - 12)) : \
6619 (0x3ffff >> (PAGE_SHIFT - 12)))
6620
6621-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6622-#define arch_randomize_brk arch_randomize_brk
6623-
6624-
6625 #ifdef CONFIG_SPU_BASE
6626 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6627 #define NT_SPU 1
6628diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6629index 8196e9c..d83a9f3 100644
6630--- a/arch/powerpc/include/asm/exec.h
6631+++ b/arch/powerpc/include/asm/exec.h
6632@@ -4,6 +4,6 @@
6633 #ifndef _ASM_POWERPC_EXEC_H
6634 #define _ASM_POWERPC_EXEC_H
6635
6636-extern unsigned long arch_align_stack(unsigned long sp);
6637+#define arch_align_stack(x) ((x) & ~0xfUL)
6638
6639 #endif /* _ASM_POWERPC_EXEC_H */
6640diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6641index 5acabbd..7ea14fa 100644
6642--- a/arch/powerpc/include/asm/kmap_types.h
6643+++ b/arch/powerpc/include/asm/kmap_types.h
6644@@ -10,7 +10,7 @@
6645 * 2 of the License, or (at your option) any later version.
6646 */
6647
6648-#define KM_TYPE_NR 16
6649+#define KM_TYPE_NR 17
6650
6651 #endif /* __KERNEL__ */
6652 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6653diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6654index 8565c25..2865190 100644
6655--- a/arch/powerpc/include/asm/mman.h
6656+++ b/arch/powerpc/include/asm/mman.h
6657@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6658 }
6659 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6660
6661-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6662+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6663 {
6664 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6665 }
6666diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6667index 988c812..63c7d70 100644
6668--- a/arch/powerpc/include/asm/page.h
6669+++ b/arch/powerpc/include/asm/page.h
6670@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6671 * and needs to be executable. This means the whole heap ends
6672 * up being executable.
6673 */
6674-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6675- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6676+#define VM_DATA_DEFAULT_FLAGS32 \
6677+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6678+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6679
6680 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6681 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6682@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6683 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6684 #endif
6685
6686+#define ktla_ktva(addr) (addr)
6687+#define ktva_ktla(addr) (addr)
6688+
6689 #ifndef CONFIG_PPC_BOOK3S_64
6690 /*
6691 * Use the top bit of the higher-level page table entries to indicate whether
6692diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6693index 88693ce..ac6f9ab 100644
6694--- a/arch/powerpc/include/asm/page_64.h
6695+++ b/arch/powerpc/include/asm/page_64.h
6696@@ -153,15 +153,18 @@ do { \
6697 * stack by default, so in the absence of a PT_GNU_STACK program header
6698 * we turn execute permission off.
6699 */
6700-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6701- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6702+#define VM_STACK_DEFAULT_FLAGS32 \
6703+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6704+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6705
6706 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6707 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6708
6709+#ifndef CONFIG_PAX_PAGEEXEC
6710 #define VM_STACK_DEFAULT_FLAGS \
6711 (is_32bit_task() ? \
6712 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6713+#endif
6714
6715 #include <asm-generic/getorder.h>
6716
6717diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6718index b66ae72..4a378cd 100644
6719--- a/arch/powerpc/include/asm/pgalloc-64.h
6720+++ b/arch/powerpc/include/asm/pgalloc-64.h
6721@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6722 #ifndef CONFIG_PPC_64K_PAGES
6723
6724 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6725+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6726
6727 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6728 {
6729@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6730 pud_set(pud, (unsigned long)pmd);
6731 }
6732
6733+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6734+{
6735+ pud_populate(mm, pud, pmd);
6736+}
6737+
6738 #define pmd_populate(mm, pmd, pte_page) \
6739 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6740 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6741@@ -171,6 +177,7 @@ extern void __tlb_remove_table(void *_table);
6742 #endif
6743
6744 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6745+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6746
6747 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6748 pte_t *pte)
6749diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6750index 7aeb955..19f748e 100644
6751--- a/arch/powerpc/include/asm/pgtable.h
6752+++ b/arch/powerpc/include/asm/pgtable.h
6753@@ -2,6 +2,7 @@
6754 #define _ASM_POWERPC_PGTABLE_H
6755 #ifdef __KERNEL__
6756
6757+#include <linux/const.h>
6758 #ifndef __ASSEMBLY__
6759 #include <asm/processor.h> /* For TASK_SIZE */
6760 #include <asm/mmu.h>
6761diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6762index 4aad413..85d86bf 100644
6763--- a/arch/powerpc/include/asm/pte-hash32.h
6764+++ b/arch/powerpc/include/asm/pte-hash32.h
6765@@ -21,6 +21,7 @@
6766 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6767 #define _PAGE_USER 0x004 /* usermode access allowed */
6768 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6769+#define _PAGE_EXEC _PAGE_GUARDED
6770 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6771 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6772 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6773diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6774index e1fb161..2290d1d 100644
6775--- a/arch/powerpc/include/asm/reg.h
6776+++ b/arch/powerpc/include/asm/reg.h
6777@@ -234,6 +234,7 @@
6778 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6779 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6780 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6781+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6782 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6783 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6784 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6785diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
6786index 48cfc85..891382f 100644
6787--- a/arch/powerpc/include/asm/smp.h
6788+++ b/arch/powerpc/include/asm/smp.h
6789@@ -50,7 +50,7 @@ struct smp_ops_t {
6790 int (*cpu_disable)(void);
6791 void (*cpu_die)(unsigned int nr);
6792 int (*cpu_bootable)(unsigned int nr);
6793-};
6794+} __no_const;
6795
6796 extern void smp_send_debugger_break(void);
6797 extern void start_secondary_resume(void);
6798diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6799index ba7b197..d292e26 100644
6800--- a/arch/powerpc/include/asm/thread_info.h
6801+++ b/arch/powerpc/include/asm/thread_info.h
6802@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
6803 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
6804 TIF_NEED_RESCHED */
6805 #define TIF_32BIT 4 /* 32 bit binary */
6806-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
6807 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6808 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6809 #define TIF_SINGLESTEP 8 /* singlestepping active */
6810@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6811 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6812 for stack store? */
6813 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6814+#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
6815+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6816+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
6817
6818 /* as above, but as bit values */
6819 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6820@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
6821 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6822 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6823 #define _TIF_NOHZ (1<<TIF_NOHZ)
6824+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6825 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6826 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6827- _TIF_NOHZ)
6828+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
6829
6830 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6831 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6832diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6833index 4db4959..aba5c41 100644
6834--- a/arch/powerpc/include/asm/uaccess.h
6835+++ b/arch/powerpc/include/asm/uaccess.h
6836@@ -318,52 +318,6 @@ do { \
6837 extern unsigned long __copy_tofrom_user(void __user *to,
6838 const void __user *from, unsigned long size);
6839
6840-#ifndef __powerpc64__
6841-
6842-static inline unsigned long copy_from_user(void *to,
6843- const void __user *from, unsigned long n)
6844-{
6845- unsigned long over;
6846-
6847- if (access_ok(VERIFY_READ, from, n))
6848- return __copy_tofrom_user((__force void __user *)to, from, n);
6849- if ((unsigned long)from < TASK_SIZE) {
6850- over = (unsigned long)from + n - TASK_SIZE;
6851- return __copy_tofrom_user((__force void __user *)to, from,
6852- n - over) + over;
6853- }
6854- return n;
6855-}
6856-
6857-static inline unsigned long copy_to_user(void __user *to,
6858- const void *from, unsigned long n)
6859-{
6860- unsigned long over;
6861-
6862- if (access_ok(VERIFY_WRITE, to, n))
6863- return __copy_tofrom_user(to, (__force void __user *)from, n);
6864- if ((unsigned long)to < TASK_SIZE) {
6865- over = (unsigned long)to + n - TASK_SIZE;
6866- return __copy_tofrom_user(to, (__force void __user *)from,
6867- n - over) + over;
6868- }
6869- return n;
6870-}
6871-
6872-#else /* __powerpc64__ */
6873-
6874-#define __copy_in_user(to, from, size) \
6875- __copy_tofrom_user((to), (from), (size))
6876-
6877-extern unsigned long copy_from_user(void *to, const void __user *from,
6878- unsigned long n);
6879-extern unsigned long copy_to_user(void __user *to, const void *from,
6880- unsigned long n);
6881-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6882- unsigned long n);
6883-
6884-#endif /* __powerpc64__ */
6885-
6886 static inline unsigned long __copy_from_user_inatomic(void *to,
6887 const void __user *from, unsigned long n)
6888 {
6889@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6890 if (ret == 0)
6891 return 0;
6892 }
6893+
6894+ if (!__builtin_constant_p(n))
6895+ check_object_size(to, n, false);
6896+
6897 return __copy_tofrom_user((__force void __user *)to, from, n);
6898 }
6899
6900@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6901 if (ret == 0)
6902 return 0;
6903 }
6904+
6905+ if (!__builtin_constant_p(n))
6906+ check_object_size(from, n, true);
6907+
6908 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6909 }
6910
6911@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6912 return __copy_to_user_inatomic(to, from, size);
6913 }
6914
6915+#ifndef __powerpc64__
6916+
6917+static inline unsigned long __must_check copy_from_user(void *to,
6918+ const void __user *from, unsigned long n)
6919+{
6920+ unsigned long over;
6921+
6922+ if ((long)n < 0)
6923+ return n;
6924+
6925+ if (access_ok(VERIFY_READ, from, n)) {
6926+ if (!__builtin_constant_p(n))
6927+ check_object_size(to, n, false);
6928+ return __copy_tofrom_user((__force void __user *)to, from, n);
6929+ }
6930+ if ((unsigned long)from < TASK_SIZE) {
6931+ over = (unsigned long)from + n - TASK_SIZE;
6932+ if (!__builtin_constant_p(n - over))
6933+ check_object_size(to, n - over, false);
6934+ return __copy_tofrom_user((__force void __user *)to, from,
6935+ n - over) + over;
6936+ }
6937+ return n;
6938+}
6939+
6940+static inline unsigned long __must_check copy_to_user(void __user *to,
6941+ const void *from, unsigned long n)
6942+{
6943+ unsigned long over;
6944+
6945+ if ((long)n < 0)
6946+ return n;
6947+
6948+ if (access_ok(VERIFY_WRITE, to, n)) {
6949+ if (!__builtin_constant_p(n))
6950+ check_object_size(from, n, true);
6951+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6952+ }
6953+ if ((unsigned long)to < TASK_SIZE) {
6954+ over = (unsigned long)to + n - TASK_SIZE;
6955+ if (!__builtin_constant_p(n))
6956+ check_object_size(from, n - over, true);
6957+ return __copy_tofrom_user(to, (__force void __user *)from,
6958+ n - over) + over;
6959+ }
6960+ return n;
6961+}
6962+
6963+#else /* __powerpc64__ */
6964+
6965+#define __copy_in_user(to, from, size) \
6966+ __copy_tofrom_user((to), (from), (size))
6967+
6968+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6969+{
6970+ if ((long)n < 0 || n > INT_MAX)
6971+ return n;
6972+
6973+ if (!__builtin_constant_p(n))
6974+ check_object_size(to, n, false);
6975+
6976+ if (likely(access_ok(VERIFY_READ, from, n)))
6977+ n = __copy_from_user(to, from, n);
6978+ else
6979+ memset(to, 0, n);
6980+ return n;
6981+}
6982+
6983+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6984+{
6985+ if ((long)n < 0 || n > INT_MAX)
6986+ return n;
6987+
6988+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6989+ if (!__builtin_constant_p(n))
6990+ check_object_size(from, n, true);
6991+ n = __copy_to_user(to, from, n);
6992+ }
6993+ return n;
6994+}
6995+
6996+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6997+ unsigned long n);
6998+
6999+#endif /* __powerpc64__ */
7000+
7001 extern unsigned long __clear_user(void __user *addr, unsigned long size);
7002
7003 static inline unsigned long clear_user(void __user *addr, unsigned long size)
7004diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
7005index 645170a..6cf0271 100644
7006--- a/arch/powerpc/kernel/exceptions-64e.S
7007+++ b/arch/powerpc/kernel/exceptions-64e.S
7008@@ -757,6 +757,7 @@ storage_fault_common:
7009 std r14,_DAR(r1)
7010 std r15,_DSISR(r1)
7011 addi r3,r1,STACK_FRAME_OVERHEAD
7012+ bl .save_nvgprs
7013 mr r4,r14
7014 mr r5,r15
7015 ld r14,PACA_EXGEN+EX_R14(r13)
7016@@ -765,8 +766,7 @@ storage_fault_common:
7017 cmpdi r3,0
7018 bne- 1f
7019 b .ret_from_except_lite
7020-1: bl .save_nvgprs
7021- mr r5,r3
7022+1: mr r5,r3
7023 addi r3,r1,STACK_FRAME_OVERHEAD
7024 ld r4,_DAR(r1)
7025 bl .bad_page_fault
7026diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
7027index 902ca3c..e942155 100644
7028--- a/arch/powerpc/kernel/exceptions-64s.S
7029+++ b/arch/powerpc/kernel/exceptions-64s.S
7030@@ -1357,10 +1357,10 @@ handle_page_fault:
7031 11: ld r4,_DAR(r1)
7032 ld r5,_DSISR(r1)
7033 addi r3,r1,STACK_FRAME_OVERHEAD
7034+ bl .save_nvgprs
7035 bl .do_page_fault
7036 cmpdi r3,0
7037 beq+ 12f
7038- bl .save_nvgprs
7039 mr r5,r3
7040 addi r3,r1,STACK_FRAME_OVERHEAD
7041 lwz r4,_DAR(r1)
7042diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
7043index 2e3200c..72095ce 100644
7044--- a/arch/powerpc/kernel/module_32.c
7045+++ b/arch/powerpc/kernel/module_32.c
7046@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
7047 me->arch.core_plt_section = i;
7048 }
7049 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
7050- printk("Module doesn't contain .plt or .init.plt sections.\n");
7051+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
7052 return -ENOEXEC;
7053 }
7054
7055@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
7056
7057 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
7058 /* Init, or core PLT? */
7059- if (location >= mod->module_core
7060- && location < mod->module_core + mod->core_size)
7061+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
7062+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
7063 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
7064- else
7065+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
7066+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
7067 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
7068+ else {
7069+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
7070+ return ~0UL;
7071+ }
7072
7073 /* Find this entry, or if that fails, the next avail. entry */
7074 while (entry->jump[0]) {
7075diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
7076index 7baa27b..f6b394a 100644
7077--- a/arch/powerpc/kernel/process.c
7078+++ b/arch/powerpc/kernel/process.c
7079@@ -884,8 +884,8 @@ void show_regs(struct pt_regs * regs)
7080 * Lookup NIP late so we have the best change of getting the
7081 * above info out without failing
7082 */
7083- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
7084- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
7085+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
7086+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
7087 #endif
7088 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
7089 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
7090@@ -1345,10 +1345,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7091 newsp = stack[0];
7092 ip = stack[STACK_FRAME_LR_SAVE];
7093 if (!firstframe || ip != lr) {
7094- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
7095+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
7096 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
7097 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
7098- printk(" (%pS)",
7099+ printk(" (%pA)",
7100 (void *)current->ret_stack[curr_frame].ret);
7101 curr_frame--;
7102 }
7103@@ -1368,7 +1368,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
7104 struct pt_regs *regs = (struct pt_regs *)
7105 (sp + STACK_FRAME_OVERHEAD);
7106 lr = regs->link;
7107- printk("--- Exception: %lx at %pS\n LR = %pS\n",
7108+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
7109 regs->trap, (void *)regs->nip, (void *)lr);
7110 firstframe = 1;
7111 }
7112@@ -1404,58 +1404,3 @@ void notrace __ppc64_runlatch_off(void)
7113 mtspr(SPRN_CTRLT, ctrl);
7114 }
7115 #endif /* CONFIG_PPC64 */
7116-
7117-unsigned long arch_align_stack(unsigned long sp)
7118-{
7119- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7120- sp -= get_random_int() & ~PAGE_MASK;
7121- return sp & ~0xf;
7122-}
7123-
7124-static inline unsigned long brk_rnd(void)
7125-{
7126- unsigned long rnd = 0;
7127-
7128- /* 8MB for 32bit, 1GB for 64bit */
7129- if (is_32bit_task())
7130- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
7131- else
7132- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7133-
7134- return rnd << PAGE_SHIFT;
7135-}
7136-
7137-unsigned long arch_randomize_brk(struct mm_struct *mm)
7138-{
7139- unsigned long base = mm->brk;
7140- unsigned long ret;
7141-
7142-#ifdef CONFIG_PPC_STD_MMU_64
7143- /*
7144- * If we are using 1TB segments and we are allowed to randomise
7145- * the heap, we can put it above 1TB so it is backed by a 1TB
7146- * segment. Otherwise the heap will be in the bottom 1TB
7147- * which always uses 256MB segments and this may result in a
7148- * performance penalty.
7149- */
7150- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7151- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7152-#endif
7153-
7154- ret = PAGE_ALIGN(base + brk_rnd());
7155-
7156- if (ret < mm->brk)
7157- return mm->brk;
7158-
7159- return ret;
7160-}
7161-
7162-unsigned long randomize_et_dyn(unsigned long base)
7163-{
7164- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7165-
7166- if (ret < base)
7167- return base;
7168-
7169- return ret;
7170-}
7171diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7172index 64f7bd5..8dd550f 100644
7173--- a/arch/powerpc/kernel/ptrace.c
7174+++ b/arch/powerpc/kernel/ptrace.c
7175@@ -1783,6 +1783,10 @@ long arch_ptrace(struct task_struct *child, long request,
7176 return ret;
7177 }
7178
7179+#ifdef CONFIG_GRKERNSEC_SETXID
7180+extern void gr_delayed_cred_worker(void);
7181+#endif
7182+
7183 /*
7184 * We must return the syscall number to actually look up in the table.
7185 * This can be -1L to skip running any syscall at all.
7186@@ -1795,6 +1799,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7187
7188 secure_computing_strict(regs->gpr[0]);
7189
7190+#ifdef CONFIG_GRKERNSEC_SETXID
7191+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7192+ gr_delayed_cred_worker();
7193+#endif
7194+
7195 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7196 tracehook_report_syscall_entry(regs))
7197 /*
7198@@ -1829,6 +1838,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7199 {
7200 int step;
7201
7202+#ifdef CONFIG_GRKERNSEC_SETXID
7203+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7204+ gr_delayed_cred_worker();
7205+#endif
7206+
7207 audit_syscall_exit(regs);
7208
7209 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7210diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7211index 0f83122..c0aca6a 100644
7212--- a/arch/powerpc/kernel/signal_32.c
7213+++ b/arch/powerpc/kernel/signal_32.c
7214@@ -987,7 +987,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7215 /* Save user registers on the stack */
7216 frame = &rt_sf->uc.uc_mcontext;
7217 addr = frame;
7218- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7219+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7220 sigret = 0;
7221 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7222 } else {
7223diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
7224index 887e99d..310bc11 100644
7225--- a/arch/powerpc/kernel/signal_64.c
7226+++ b/arch/powerpc/kernel/signal_64.c
7227@@ -751,7 +751,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
7228 #endif
7229
7230 /* Set up to return from userspace. */
7231- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
7232+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7233 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
7234 } else {
7235 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
7236diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
7237index e68a845..8b140e6 100644
7238--- a/arch/powerpc/kernel/sysfs.c
7239+++ b/arch/powerpc/kernel/sysfs.c
7240@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
7241 return NOTIFY_OK;
7242 }
7243
7244-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
7245+static struct notifier_block sysfs_cpu_nb = {
7246 .notifier_call = sysfs_cpu_notify,
7247 };
7248
7249diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
7250index 88929b1..bece8f8 100644
7251--- a/arch/powerpc/kernel/traps.c
7252+++ b/arch/powerpc/kernel/traps.c
7253@@ -141,6 +141,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
7254 return flags;
7255 }
7256
7257+extern void gr_handle_kernel_exploit(void);
7258+
7259 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7260 int signr)
7261 {
7262@@ -190,6 +192,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7263 panic("Fatal exception in interrupt");
7264 if (panic_on_oops)
7265 panic("Fatal exception");
7266+
7267+ gr_handle_kernel_exploit();
7268+
7269 do_exit(signr);
7270 }
7271
7272diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
7273index d4f463a..8fb7431 100644
7274--- a/arch/powerpc/kernel/vdso.c
7275+++ b/arch/powerpc/kernel/vdso.c
7276@@ -34,6 +34,7 @@
7277 #include <asm/firmware.h>
7278 #include <asm/vdso.h>
7279 #include <asm/vdso_datapage.h>
7280+#include <asm/mman.h>
7281
7282 #include "setup.h"
7283
7284@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7285 vdso_base = VDSO32_MBASE;
7286 #endif
7287
7288- current->mm->context.vdso_base = 0;
7289+ current->mm->context.vdso_base = ~0UL;
7290
7291 /* vDSO has a problem and was disabled, just don't "enable" it for the
7292 * process
7293@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7294 vdso_base = get_unmapped_area(NULL, vdso_base,
7295 (vdso_pages << PAGE_SHIFT) +
7296 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
7297- 0, 0);
7298+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
7299 if (IS_ERR_VALUE(vdso_base)) {
7300 rc = vdso_base;
7301 goto fail_mmapsem;
7302diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
7303index 5eea6f3..5d10396 100644
7304--- a/arch/powerpc/lib/usercopy_64.c
7305+++ b/arch/powerpc/lib/usercopy_64.c
7306@@ -9,22 +9,6 @@
7307 #include <linux/module.h>
7308 #include <asm/uaccess.h>
7309
7310-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7311-{
7312- if (likely(access_ok(VERIFY_READ, from, n)))
7313- n = __copy_from_user(to, from, n);
7314- else
7315- memset(to, 0, n);
7316- return n;
7317-}
7318-
7319-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7320-{
7321- if (likely(access_ok(VERIFY_WRITE, to, n)))
7322- n = __copy_to_user(to, from, n);
7323- return n;
7324-}
7325-
7326 unsigned long copy_in_user(void __user *to, const void __user *from,
7327 unsigned long n)
7328 {
7329@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
7330 return n;
7331 }
7332
7333-EXPORT_SYMBOL(copy_from_user);
7334-EXPORT_SYMBOL(copy_to_user);
7335 EXPORT_SYMBOL(copy_in_user);
7336
7337diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
7338index 8726779..a33c512 100644
7339--- a/arch/powerpc/mm/fault.c
7340+++ b/arch/powerpc/mm/fault.c
7341@@ -33,6 +33,10 @@
7342 #include <linux/magic.h>
7343 #include <linux/ratelimit.h>
7344 #include <linux/context_tracking.h>
7345+#include <linux/slab.h>
7346+#include <linux/pagemap.h>
7347+#include <linux/compiler.h>
7348+#include <linux/unistd.h>
7349
7350 #include <asm/firmware.h>
7351 #include <asm/page.h>
7352@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
7353 }
7354 #endif
7355
7356+#ifdef CONFIG_PAX_PAGEEXEC
7357+/*
7358+ * PaX: decide what to do with offenders (regs->nip = fault address)
7359+ *
7360+ * returns 1 when task should be killed
7361+ */
7362+static int pax_handle_fetch_fault(struct pt_regs *regs)
7363+{
7364+ return 1;
7365+}
7366+
7367+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7368+{
7369+ unsigned long i;
7370+
7371+ printk(KERN_ERR "PAX: bytes at PC: ");
7372+ for (i = 0; i < 5; i++) {
7373+ unsigned int c;
7374+ if (get_user(c, (unsigned int __user *)pc+i))
7375+ printk(KERN_CONT "???????? ");
7376+ else
7377+ printk(KERN_CONT "%08x ", c);
7378+ }
7379+ printk("\n");
7380+}
7381+#endif
7382+
7383 /*
7384 * Check whether the instruction at regs->nip is a store using
7385 * an update addressing form which will update r1.
7386@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7387 * indicate errors in DSISR but can validly be set in SRR1.
7388 */
7389 if (trap == 0x400)
7390- error_code &= 0x48200000;
7391+ error_code &= 0x58200000;
7392 else
7393 is_write = error_code & DSISR_ISSTORE;
7394 #else
7395@@ -371,7 +402,7 @@ good_area:
7396 * "undefined". Of those that can be set, this is the only
7397 * one which seems bad.
7398 */
7399- if (error_code & 0x10000000)
7400+ if (error_code & DSISR_GUARDED)
7401 /* Guarded storage error. */
7402 goto bad_area;
7403 #endif /* CONFIG_8xx */
7404@@ -386,7 +417,7 @@ good_area:
7405 * processors use the same I/D cache coherency mechanism
7406 * as embedded.
7407 */
7408- if (error_code & DSISR_PROTFAULT)
7409+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7410 goto bad_area;
7411 #endif /* CONFIG_PPC_STD_MMU */
7412
7413@@ -471,6 +502,23 @@ bad_area:
7414 bad_area_nosemaphore:
7415 /* User mode accesses cause a SIGSEGV */
7416 if (user_mode(regs)) {
7417+
7418+#ifdef CONFIG_PAX_PAGEEXEC
7419+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7420+#ifdef CONFIG_PPC_STD_MMU
7421+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7422+#else
7423+ if (is_exec && regs->nip == address) {
7424+#endif
7425+ switch (pax_handle_fetch_fault(regs)) {
7426+ }
7427+
7428+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7429+ do_group_exit(SIGKILL);
7430+ }
7431+ }
7432+#endif
7433+
7434 _exception(SIGSEGV, regs, code, address);
7435 goto bail;
7436 }
7437diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7438index 67a42ed..cd463e0 100644
7439--- a/arch/powerpc/mm/mmap_64.c
7440+++ b/arch/powerpc/mm/mmap_64.c
7441@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7442 {
7443 unsigned long rnd = 0;
7444
7445+#ifdef CONFIG_PAX_RANDMMAP
7446+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7447+#endif
7448+
7449 if (current->flags & PF_RANDOMIZE) {
7450 /* 8MB for 32bit, 1GB for 64bit */
7451 if (is_32bit_task())
7452@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7453 */
7454 if (mmap_is_legacy()) {
7455 mm->mmap_base = TASK_UNMAPPED_BASE;
7456+
7457+#ifdef CONFIG_PAX_RANDMMAP
7458+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7459+ mm->mmap_base += mm->delta_mmap;
7460+#endif
7461+
7462 mm->get_unmapped_area = arch_get_unmapped_area;
7463 mm->unmap_area = arch_unmap_area;
7464 } else {
7465 mm->mmap_base = mmap_base();
7466+
7467+#ifdef CONFIG_PAX_RANDMMAP
7468+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7469+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7470+#endif
7471+
7472 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7473 mm->unmap_area = arch_unmap_area_topdown;
7474 }
7475diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7476index e779642..e5bb889 100644
7477--- a/arch/powerpc/mm/mmu_context_nohash.c
7478+++ b/arch/powerpc/mm/mmu_context_nohash.c
7479@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7480 return NOTIFY_OK;
7481 }
7482
7483-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7484+static struct notifier_block mmu_context_cpu_nb = {
7485 .notifier_call = mmu_context_cpu_notify,
7486 };
7487
7488diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7489index cafad40..9cbc0fc 100644
7490--- a/arch/powerpc/mm/numa.c
7491+++ b/arch/powerpc/mm/numa.c
7492@@ -920,7 +920,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7493 return ret;
7494 }
7495
7496-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7497+static struct notifier_block ppc64_numa_nb = {
7498 .notifier_call = cpu_numa_callback,
7499 .priority = 1 /* Must run before sched domains notifier. */
7500 };
7501diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7502index 3e99c14..f00953c 100644
7503--- a/arch/powerpc/mm/slice.c
7504+++ b/arch/powerpc/mm/slice.c
7505@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7506 if ((mm->task_size - len) < addr)
7507 return 0;
7508 vma = find_vma(mm, addr);
7509- return (!vma || (addr + len) <= vma->vm_start);
7510+ return check_heap_stack_gap(vma, addr, len, 0);
7511 }
7512
7513 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7514@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
7515 info.align_offset = 0;
7516
7517 addr = TASK_UNMAPPED_BASE;
7518+
7519+#ifdef CONFIG_PAX_RANDMMAP
7520+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7521+ addr += mm->delta_mmap;
7522+#endif
7523+
7524 while (addr < TASK_SIZE) {
7525 info.low_limit = addr;
7526 if (!slice_scan_available(addr, available, 1, &addr))
7527@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7528 if (fixed && addr > (mm->task_size - len))
7529 return -EINVAL;
7530
7531+#ifdef CONFIG_PAX_RANDMMAP
7532+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7533+ addr = 0;
7534+#endif
7535+
7536 /* If hint, make sure it matches our alignment restrictions */
7537 if (!fixed && addr) {
7538 addr = _ALIGN_UP(addr, 1ul << pshift);
7539diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7540index 9098692..3d54cd1 100644
7541--- a/arch/powerpc/platforms/cell/spufs/file.c
7542+++ b/arch/powerpc/platforms/cell/spufs/file.c
7543@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7544 return VM_FAULT_NOPAGE;
7545 }
7546
7547-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7548+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7549 unsigned long address,
7550- void *buf, int len, int write)
7551+ void *buf, size_t len, int write)
7552 {
7553 struct spu_context *ctx = vma->vm_file->private_data;
7554 unsigned long offset = address - vma->vm_start;
7555diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7556index bdb738a..49c9f95 100644
7557--- a/arch/powerpc/platforms/powermac/smp.c
7558+++ b/arch/powerpc/platforms/powermac/smp.c
7559@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7560 return NOTIFY_OK;
7561 }
7562
7563-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7564+static struct notifier_block smp_core99_cpu_nb = {
7565 .notifier_call = smp_core99_cpu_notify,
7566 };
7567 #endif /* CONFIG_HOTPLUG_CPU */
7568diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7569index c797832..ce575c8 100644
7570--- a/arch/s390/include/asm/atomic.h
7571+++ b/arch/s390/include/asm/atomic.h
7572@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7573 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7574 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7575
7576+#define atomic64_read_unchecked(v) atomic64_read(v)
7577+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7578+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7579+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7580+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7581+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7582+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7583+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7584+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7585+
7586 #define smp_mb__before_atomic_dec() smp_mb()
7587 #define smp_mb__after_atomic_dec() smp_mb()
7588 #define smp_mb__before_atomic_inc() smp_mb()
7589diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7590index 4d7ccac..d03d0ad 100644
7591--- a/arch/s390/include/asm/cache.h
7592+++ b/arch/s390/include/asm/cache.h
7593@@ -9,8 +9,10 @@
7594 #ifndef __ARCH_S390_CACHE_H
7595 #define __ARCH_S390_CACHE_H
7596
7597-#define L1_CACHE_BYTES 256
7598+#include <linux/const.h>
7599+
7600 #define L1_CACHE_SHIFT 8
7601+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7602 #define NET_SKB_PAD 32
7603
7604 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7605diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7606index 78f4f87..598ce39 100644
7607--- a/arch/s390/include/asm/elf.h
7608+++ b/arch/s390/include/asm/elf.h
7609@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
7610 the loader. We need to make sure that it is out of the way of the program
7611 that it will "exec", and that there is sufficient room for the brk. */
7612
7613-extern unsigned long randomize_et_dyn(unsigned long base);
7614-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7615+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7616+
7617+#ifdef CONFIG_PAX_ASLR
7618+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7619+
7620+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7621+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7622+#endif
7623
7624 /* This yields a mask that user programs can use to figure out what
7625 instruction set this CPU supports. */
7626@@ -222,9 +228,6 @@ struct linux_binprm;
7627 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7628 int arch_setup_additional_pages(struct linux_binprm *, int);
7629
7630-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7631-#define arch_randomize_brk arch_randomize_brk
7632-
7633 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7634
7635 #endif
7636diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7637index c4a93d6..4d2a9b4 100644
7638--- a/arch/s390/include/asm/exec.h
7639+++ b/arch/s390/include/asm/exec.h
7640@@ -7,6 +7,6 @@
7641 #ifndef __ASM_EXEC_H
7642 #define __ASM_EXEC_H
7643
7644-extern unsigned long arch_align_stack(unsigned long sp);
7645+#define arch_align_stack(x) ((x) & ~0xfUL)
7646
7647 #endif /* __ASM_EXEC_H */
7648diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
7649index b75d7d6..6d6d92b 100644
7650--- a/arch/s390/include/asm/tlb.h
7651+++ b/arch/s390/include/asm/tlb.h
7652@@ -32,6 +32,7 @@ struct mmu_gather {
7653 struct mm_struct *mm;
7654 struct mmu_table_batch *batch;
7655 unsigned int fullmm;
7656+ unsigned long start, end;
7657 };
7658
7659 struct mmu_table_batch {
7660@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
7661
7662 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
7663 struct mm_struct *mm,
7664- unsigned int full_mm_flush)
7665+ unsigned long start,
7666+ unsigned long end)
7667 {
7668 tlb->mm = mm;
7669- tlb->fullmm = full_mm_flush;
7670+ tlb->start = start;
7671+ tlb->end = end;
7672+ tlb->fullmm = !(start | (end+1));
7673 tlb->batch = NULL;
7674 if (tlb->fullmm)
7675 __tlb_flush_mm(mm);
7676diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7677index 9c33ed4..e40cbef 100644
7678--- a/arch/s390/include/asm/uaccess.h
7679+++ b/arch/s390/include/asm/uaccess.h
7680@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7681 copy_to_user(void __user *to, const void *from, unsigned long n)
7682 {
7683 might_fault();
7684+
7685+ if ((long)n < 0)
7686+ return n;
7687+
7688 return __copy_to_user(to, from, n);
7689 }
7690
7691@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7692 static inline unsigned long __must_check
7693 __copy_from_user(void *to, const void __user *from, unsigned long n)
7694 {
7695+ if ((long)n < 0)
7696+ return n;
7697+
7698 if (__builtin_constant_p(n) && (n <= 256))
7699 return uaccess.copy_from_user_small(n, from, to);
7700 else
7701@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7702 static inline unsigned long __must_check
7703 copy_from_user(void *to, const void __user *from, unsigned long n)
7704 {
7705- unsigned int sz = __compiletime_object_size(to);
7706+ size_t sz = __compiletime_object_size(to);
7707
7708 might_fault();
7709- if (unlikely(sz != -1 && sz < n)) {
7710+
7711+ if ((long)n < 0)
7712+ return n;
7713+
7714+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7715 copy_from_user_overflow();
7716 return n;
7717 }
7718diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7719index 7845e15..59c4353 100644
7720--- a/arch/s390/kernel/module.c
7721+++ b/arch/s390/kernel/module.c
7722@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7723
7724 /* Increase core size by size of got & plt and set start
7725 offsets for got and plt. */
7726- me->core_size = ALIGN(me->core_size, 4);
7727- me->arch.got_offset = me->core_size;
7728- me->core_size += me->arch.got_size;
7729- me->arch.plt_offset = me->core_size;
7730- me->core_size += me->arch.plt_size;
7731+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7732+ me->arch.got_offset = me->core_size_rw;
7733+ me->core_size_rw += me->arch.got_size;
7734+ me->arch.plt_offset = me->core_size_rx;
7735+ me->core_size_rx += me->arch.plt_size;
7736 return 0;
7737 }
7738
7739@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7740 if (info->got_initialized == 0) {
7741 Elf_Addr *gotent;
7742
7743- gotent = me->module_core + me->arch.got_offset +
7744+ gotent = me->module_core_rw + me->arch.got_offset +
7745 info->got_offset;
7746 *gotent = val;
7747 info->got_initialized = 1;
7748@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7749 rc = apply_rela_bits(loc, val, 0, 64, 0);
7750 else if (r_type == R_390_GOTENT ||
7751 r_type == R_390_GOTPLTENT) {
7752- val += (Elf_Addr) me->module_core - loc;
7753+ val += (Elf_Addr) me->module_core_rw - loc;
7754 rc = apply_rela_bits(loc, val, 1, 32, 1);
7755 }
7756 break;
7757@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7758 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7759 if (info->plt_initialized == 0) {
7760 unsigned int *ip;
7761- ip = me->module_core + me->arch.plt_offset +
7762+ ip = me->module_core_rx + me->arch.plt_offset +
7763 info->plt_offset;
7764 #ifndef CONFIG_64BIT
7765 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7766@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7767 val - loc + 0xffffUL < 0x1ffffeUL) ||
7768 (r_type == R_390_PLT32DBL &&
7769 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7770- val = (Elf_Addr) me->module_core +
7771+ val = (Elf_Addr) me->module_core_rx +
7772 me->arch.plt_offset +
7773 info->plt_offset;
7774 val += rela->r_addend - loc;
7775@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7776 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7777 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7778 val = val + rela->r_addend -
7779- ((Elf_Addr) me->module_core + me->arch.got_offset);
7780+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7781 if (r_type == R_390_GOTOFF16)
7782 rc = apply_rela_bits(loc, val, 0, 16, 0);
7783 else if (r_type == R_390_GOTOFF32)
7784@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7785 break;
7786 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7787 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7788- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7789+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7790 rela->r_addend - loc;
7791 if (r_type == R_390_GOTPC)
7792 rc = apply_rela_bits(loc, val, 1, 32, 0);
7793diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7794index 2bc3edd..ab9d598 100644
7795--- a/arch/s390/kernel/process.c
7796+++ b/arch/s390/kernel/process.c
7797@@ -236,39 +236,3 @@ unsigned long get_wchan(struct task_struct *p)
7798 }
7799 return 0;
7800 }
7801-
7802-unsigned long arch_align_stack(unsigned long sp)
7803-{
7804- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7805- sp -= get_random_int() & ~PAGE_MASK;
7806- return sp & ~0xf;
7807-}
7808-
7809-static inline unsigned long brk_rnd(void)
7810-{
7811- /* 8MB for 32bit, 1GB for 64bit */
7812- if (is_32bit_task())
7813- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7814- else
7815- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7816-}
7817-
7818-unsigned long arch_randomize_brk(struct mm_struct *mm)
7819-{
7820- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7821-
7822- if (ret < mm->brk)
7823- return mm->brk;
7824- return ret;
7825-}
7826-
7827-unsigned long randomize_et_dyn(unsigned long base)
7828-{
7829- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7830-
7831- if (!(current->flags & PF_RANDOMIZE))
7832- return base;
7833- if (ret < base)
7834- return base;
7835- return ret;
7836-}
7837diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7838index 06bafec..2bca531 100644
7839--- a/arch/s390/mm/mmap.c
7840+++ b/arch/s390/mm/mmap.c
7841@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7842 */
7843 if (mmap_is_legacy()) {
7844 mm->mmap_base = TASK_UNMAPPED_BASE;
7845+
7846+#ifdef CONFIG_PAX_RANDMMAP
7847+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7848+ mm->mmap_base += mm->delta_mmap;
7849+#endif
7850+
7851 mm->get_unmapped_area = arch_get_unmapped_area;
7852 mm->unmap_area = arch_unmap_area;
7853 } else {
7854 mm->mmap_base = mmap_base();
7855+
7856+#ifdef CONFIG_PAX_RANDMMAP
7857+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7858+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7859+#endif
7860+
7861 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7862 mm->unmap_area = arch_unmap_area_topdown;
7863 }
7864@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7865 */
7866 if (mmap_is_legacy()) {
7867 mm->mmap_base = TASK_UNMAPPED_BASE;
7868+
7869+#ifdef CONFIG_PAX_RANDMMAP
7870+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7871+ mm->mmap_base += mm->delta_mmap;
7872+#endif
7873+
7874 mm->get_unmapped_area = s390_get_unmapped_area;
7875 mm->unmap_area = arch_unmap_area;
7876 } else {
7877 mm->mmap_base = mmap_base();
7878+
7879+#ifdef CONFIG_PAX_RANDMMAP
7880+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7881+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7882+#endif
7883+
7884 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7885 mm->unmap_area = arch_unmap_area_topdown;
7886 }
7887diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7888index ae3d59f..f65f075 100644
7889--- a/arch/score/include/asm/cache.h
7890+++ b/arch/score/include/asm/cache.h
7891@@ -1,7 +1,9 @@
7892 #ifndef _ASM_SCORE_CACHE_H
7893 #define _ASM_SCORE_CACHE_H
7894
7895+#include <linux/const.h>
7896+
7897 #define L1_CACHE_SHIFT 4
7898-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7899+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7900
7901 #endif /* _ASM_SCORE_CACHE_H */
7902diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7903index f9f3cd5..58ff438 100644
7904--- a/arch/score/include/asm/exec.h
7905+++ b/arch/score/include/asm/exec.h
7906@@ -1,6 +1,6 @@
7907 #ifndef _ASM_SCORE_EXEC_H
7908 #define _ASM_SCORE_EXEC_H
7909
7910-extern unsigned long arch_align_stack(unsigned long sp);
7911+#define arch_align_stack(x) (x)
7912
7913 #endif /* _ASM_SCORE_EXEC_H */
7914diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7915index f4c6d02..e9355c3 100644
7916--- a/arch/score/kernel/process.c
7917+++ b/arch/score/kernel/process.c
7918@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
7919
7920 return task_pt_regs(task)->cp0_epc;
7921 }
7922-
7923-unsigned long arch_align_stack(unsigned long sp)
7924-{
7925- return sp;
7926-}
7927diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7928index ef9e555..331bd29 100644
7929--- a/arch/sh/include/asm/cache.h
7930+++ b/arch/sh/include/asm/cache.h
7931@@ -9,10 +9,11 @@
7932 #define __ASM_SH_CACHE_H
7933 #ifdef __KERNEL__
7934
7935+#include <linux/const.h>
7936 #include <linux/init.h>
7937 #include <cpu/cache.h>
7938
7939-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7940+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7941
7942 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7943
7944diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
7945index e61d43d..362192e 100644
7946--- a/arch/sh/include/asm/tlb.h
7947+++ b/arch/sh/include/asm/tlb.h
7948@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
7949 }
7950
7951 static inline void
7952-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
7953+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
7954 {
7955 tlb->mm = mm;
7956- tlb->fullmm = full_mm_flush;
7957+ tlb->start = start;
7958+ tlb->end = end;
7959+ tlb->fullmm = !(start | (end+1));
7960
7961 init_tlb_gather(tlb);
7962 }
7963diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7964index 03f2b55..b0270327 100644
7965--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7966+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7967@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7968 return NOTIFY_OK;
7969 }
7970
7971-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7972+static struct notifier_block shx3_cpu_notifier = {
7973 .notifier_call = shx3_cpu_callback,
7974 };
7975
7976diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7977index 6777177..cb5e44f 100644
7978--- a/arch/sh/mm/mmap.c
7979+++ b/arch/sh/mm/mmap.c
7980@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7981 struct mm_struct *mm = current->mm;
7982 struct vm_area_struct *vma;
7983 int do_colour_align;
7984+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7985 struct vm_unmapped_area_info info;
7986
7987 if (flags & MAP_FIXED) {
7988@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7989 if (filp || (flags & MAP_SHARED))
7990 do_colour_align = 1;
7991
7992+#ifdef CONFIG_PAX_RANDMMAP
7993+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7994+#endif
7995+
7996 if (addr) {
7997 if (do_colour_align)
7998 addr = COLOUR_ALIGN(addr, pgoff);
7999@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8000 addr = PAGE_ALIGN(addr);
8001
8002 vma = find_vma(mm, addr);
8003- if (TASK_SIZE - len >= addr &&
8004- (!vma || addr + len <= vma->vm_start))
8005+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8006 return addr;
8007 }
8008
8009 info.flags = 0;
8010 info.length = len;
8011- info.low_limit = TASK_UNMAPPED_BASE;
8012+ info.low_limit = mm->mmap_base;
8013 info.high_limit = TASK_SIZE;
8014 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8015 info.align_offset = pgoff << PAGE_SHIFT;
8016@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8017 struct mm_struct *mm = current->mm;
8018 unsigned long addr = addr0;
8019 int do_colour_align;
8020+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8021 struct vm_unmapped_area_info info;
8022
8023 if (flags & MAP_FIXED) {
8024@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8025 if (filp || (flags & MAP_SHARED))
8026 do_colour_align = 1;
8027
8028+#ifdef CONFIG_PAX_RANDMMAP
8029+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8030+#endif
8031+
8032 /* requesting a specific address */
8033 if (addr) {
8034 if (do_colour_align)
8035@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8036 addr = PAGE_ALIGN(addr);
8037
8038 vma = find_vma(mm, addr);
8039- if (TASK_SIZE - len >= addr &&
8040- (!vma || addr + len <= vma->vm_start))
8041+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8042 return addr;
8043 }
8044
8045@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8046 VM_BUG_ON(addr != -ENOMEM);
8047 info.flags = 0;
8048 info.low_limit = TASK_UNMAPPED_BASE;
8049+
8050+#ifdef CONFIG_PAX_RANDMMAP
8051+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8052+ info.low_limit += mm->delta_mmap;
8053+#endif
8054+
8055 info.high_limit = TASK_SIZE;
8056 addr = vm_unmapped_area(&info);
8057 }
8058diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8059index be56a24..443328f 100644
8060--- a/arch/sparc/include/asm/atomic_64.h
8061+++ b/arch/sparc/include/asm/atomic_64.h
8062@@ -14,18 +14,40 @@
8063 #define ATOMIC64_INIT(i) { (i) }
8064
8065 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8066+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8067+{
8068+ return v->counter;
8069+}
8070 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8071+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8072+{
8073+ return v->counter;
8074+}
8075
8076 #define atomic_set(v, i) (((v)->counter) = i)
8077+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8078+{
8079+ v->counter = i;
8080+}
8081 #define atomic64_set(v, i) (((v)->counter) = i)
8082+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8083+{
8084+ v->counter = i;
8085+}
8086
8087 extern void atomic_add(int, atomic_t *);
8088+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
8089 extern void atomic64_add(long, atomic64_t *);
8090+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
8091 extern void atomic_sub(int, atomic_t *);
8092+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
8093 extern void atomic64_sub(long, atomic64_t *);
8094+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
8095
8096 extern int atomic_add_ret(int, atomic_t *);
8097+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
8098 extern long atomic64_add_ret(long, atomic64_t *);
8099+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
8100 extern int atomic_sub_ret(int, atomic_t *);
8101 extern long atomic64_sub_ret(long, atomic64_t *);
8102
8103@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8104 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
8105
8106 #define atomic_inc_return(v) atomic_add_ret(1, v)
8107+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8108+{
8109+ return atomic_add_ret_unchecked(1, v);
8110+}
8111 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
8112+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8113+{
8114+ return atomic64_add_ret_unchecked(1, v);
8115+}
8116
8117 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
8118 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
8119
8120 #define atomic_add_return(i, v) atomic_add_ret(i, v)
8121+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8122+{
8123+ return atomic_add_ret_unchecked(i, v);
8124+}
8125 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
8126+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8127+{
8128+ return atomic64_add_ret_unchecked(i, v);
8129+}
8130
8131 /*
8132 * atomic_inc_and_test - increment and test
8133@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8134 * other cases.
8135 */
8136 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8137+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8138+{
8139+ return atomic_inc_return_unchecked(v) == 0;
8140+}
8141 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8142
8143 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
8144@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8145 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
8146
8147 #define atomic_inc(v) atomic_add(1, v)
8148+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8149+{
8150+ atomic_add_unchecked(1, v);
8151+}
8152 #define atomic64_inc(v) atomic64_add(1, v)
8153+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8154+{
8155+ atomic64_add_unchecked(1, v);
8156+}
8157
8158 #define atomic_dec(v) atomic_sub(1, v)
8159+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8160+{
8161+ atomic_sub_unchecked(1, v);
8162+}
8163 #define atomic64_dec(v) atomic64_sub(1, v)
8164+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8165+{
8166+ atomic64_sub_unchecked(1, v);
8167+}
8168
8169 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8170 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8171
8172 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8173+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8174+{
8175+ return cmpxchg(&v->counter, old, new);
8176+}
8177 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8178+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8179+{
8180+ return xchg(&v->counter, new);
8181+}
8182
8183 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8184 {
8185- int c, old;
8186+ int c, old, new;
8187 c = atomic_read(v);
8188 for (;;) {
8189- if (unlikely(c == (u)))
8190+ if (unlikely(c == u))
8191 break;
8192- old = atomic_cmpxchg((v), c, c + (a));
8193+
8194+ asm volatile("addcc %2, %0, %0\n"
8195+
8196+#ifdef CONFIG_PAX_REFCOUNT
8197+ "tvs %%icc, 6\n"
8198+#endif
8199+
8200+ : "=r" (new)
8201+ : "0" (c), "ir" (a)
8202+ : "cc");
8203+
8204+ old = atomic_cmpxchg(v, c, new);
8205 if (likely(old == c))
8206 break;
8207 c = old;
8208@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8209 #define atomic64_cmpxchg(v, o, n) \
8210 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8211 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8212+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8213+{
8214+ return xchg(&v->counter, new);
8215+}
8216
8217 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8218 {
8219- long c, old;
8220+ long c, old, new;
8221 c = atomic64_read(v);
8222 for (;;) {
8223- if (unlikely(c == (u)))
8224+ if (unlikely(c == u))
8225 break;
8226- old = atomic64_cmpxchg((v), c, c + (a));
8227+
8228+ asm volatile("addcc %2, %0, %0\n"
8229+
8230+#ifdef CONFIG_PAX_REFCOUNT
8231+ "tvs %%xcc, 6\n"
8232+#endif
8233+
8234+ : "=r" (new)
8235+ : "0" (c), "ir" (a)
8236+ : "cc");
8237+
8238+ old = atomic64_cmpxchg(v, c, new);
8239 if (likely(old == c))
8240 break;
8241 c = old;
8242 }
8243- return c != (u);
8244+ return c != u;
8245 }
8246
8247 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8248diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8249index 5bb6991..5c2132e 100644
8250--- a/arch/sparc/include/asm/cache.h
8251+++ b/arch/sparc/include/asm/cache.h
8252@@ -7,10 +7,12 @@
8253 #ifndef _SPARC_CACHE_H
8254 #define _SPARC_CACHE_H
8255
8256+#include <linux/const.h>
8257+
8258 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8259
8260 #define L1_CACHE_SHIFT 5
8261-#define L1_CACHE_BYTES 32
8262+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8263
8264 #ifdef CONFIG_SPARC32
8265 #define SMP_CACHE_BYTES_SHIFT 5
8266diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8267index a24e41f..47677ff 100644
8268--- a/arch/sparc/include/asm/elf_32.h
8269+++ b/arch/sparc/include/asm/elf_32.h
8270@@ -114,6 +114,13 @@ typedef struct {
8271
8272 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
8273
8274+#ifdef CONFIG_PAX_ASLR
8275+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8276+
8277+#define PAX_DELTA_MMAP_LEN 16
8278+#define PAX_DELTA_STACK_LEN 16
8279+#endif
8280+
8281 /* This yields a mask that user programs can use to figure out what
8282 instruction set this cpu supports. This can NOT be done in userspace
8283 on Sparc. */
8284diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
8285index 370ca1e..d4f4a98 100644
8286--- a/arch/sparc/include/asm/elf_64.h
8287+++ b/arch/sparc/include/asm/elf_64.h
8288@@ -189,6 +189,13 @@ typedef struct {
8289 #define ELF_ET_DYN_BASE 0x0000010000000000UL
8290 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
8291
8292+#ifdef CONFIG_PAX_ASLR
8293+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
8294+
8295+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
8296+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
8297+#endif
8298+
8299 extern unsigned long sparc64_elf_hwcap;
8300 #define ELF_HWCAP sparc64_elf_hwcap
8301
8302diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
8303index 9b1c36d..209298b 100644
8304--- a/arch/sparc/include/asm/pgalloc_32.h
8305+++ b/arch/sparc/include/asm/pgalloc_32.h
8306@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
8307 }
8308
8309 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
8310+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
8311
8312 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
8313 unsigned long address)
8314diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
8315index bcfe063..b333142 100644
8316--- a/arch/sparc/include/asm/pgalloc_64.h
8317+++ b/arch/sparc/include/asm/pgalloc_64.h
8318@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8319 }
8320
8321 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
8322+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
8323
8324 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
8325 {
8326diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
8327index 6fc1348..390c50a 100644
8328--- a/arch/sparc/include/asm/pgtable_32.h
8329+++ b/arch/sparc/include/asm/pgtable_32.h
8330@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
8331 #define PAGE_SHARED SRMMU_PAGE_SHARED
8332 #define PAGE_COPY SRMMU_PAGE_COPY
8333 #define PAGE_READONLY SRMMU_PAGE_RDONLY
8334+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
8335+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
8336+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
8337 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
8338
8339 /* Top-level page directory - dummy used by init-mm.
8340@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
8341
8342 /* xwr */
8343 #define __P000 PAGE_NONE
8344-#define __P001 PAGE_READONLY
8345-#define __P010 PAGE_COPY
8346-#define __P011 PAGE_COPY
8347+#define __P001 PAGE_READONLY_NOEXEC
8348+#define __P010 PAGE_COPY_NOEXEC
8349+#define __P011 PAGE_COPY_NOEXEC
8350 #define __P100 PAGE_READONLY
8351 #define __P101 PAGE_READONLY
8352 #define __P110 PAGE_COPY
8353 #define __P111 PAGE_COPY
8354
8355 #define __S000 PAGE_NONE
8356-#define __S001 PAGE_READONLY
8357-#define __S010 PAGE_SHARED
8358-#define __S011 PAGE_SHARED
8359+#define __S001 PAGE_READONLY_NOEXEC
8360+#define __S010 PAGE_SHARED_NOEXEC
8361+#define __S011 PAGE_SHARED_NOEXEC
8362 #define __S100 PAGE_READONLY
8363 #define __S101 PAGE_READONLY
8364 #define __S110 PAGE_SHARED
8365diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
8366index 79da178..c2eede8 100644
8367--- a/arch/sparc/include/asm/pgtsrmmu.h
8368+++ b/arch/sparc/include/asm/pgtsrmmu.h
8369@@ -115,6 +115,11 @@
8370 SRMMU_EXEC | SRMMU_REF)
8371 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
8372 SRMMU_EXEC | SRMMU_REF)
8373+
8374+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
8375+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8376+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8377+
8378 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
8379 SRMMU_DIRTY | SRMMU_REF)
8380
8381diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
8382index 9689176..63c18ea 100644
8383--- a/arch/sparc/include/asm/spinlock_64.h
8384+++ b/arch/sparc/include/asm/spinlock_64.h
8385@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8386
8387 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
8388
8389-static void inline arch_read_lock(arch_rwlock_t *lock)
8390+static inline void arch_read_lock(arch_rwlock_t *lock)
8391 {
8392 unsigned long tmp1, tmp2;
8393
8394 __asm__ __volatile__ (
8395 "1: ldsw [%2], %0\n"
8396 " brlz,pn %0, 2f\n"
8397-"4: add %0, 1, %1\n"
8398+"4: addcc %0, 1, %1\n"
8399+
8400+#ifdef CONFIG_PAX_REFCOUNT
8401+" tvs %%icc, 6\n"
8402+#endif
8403+
8404 " cas [%2], %0, %1\n"
8405 " cmp %0, %1\n"
8406 " bne,pn %%icc, 1b\n"
8407@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8408 " .previous"
8409 : "=&r" (tmp1), "=&r" (tmp2)
8410 : "r" (lock)
8411- : "memory");
8412+ : "memory", "cc");
8413 }
8414
8415-static int inline arch_read_trylock(arch_rwlock_t *lock)
8416+static inline int arch_read_trylock(arch_rwlock_t *lock)
8417 {
8418 int tmp1, tmp2;
8419
8420@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8421 "1: ldsw [%2], %0\n"
8422 " brlz,a,pn %0, 2f\n"
8423 " mov 0, %0\n"
8424-" add %0, 1, %1\n"
8425+" addcc %0, 1, %1\n"
8426+
8427+#ifdef CONFIG_PAX_REFCOUNT
8428+" tvs %%icc, 6\n"
8429+#endif
8430+
8431 " cas [%2], %0, %1\n"
8432 " cmp %0, %1\n"
8433 " bne,pn %%icc, 1b\n"
8434@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8435 return tmp1;
8436 }
8437
8438-static void inline arch_read_unlock(arch_rwlock_t *lock)
8439+static inline void arch_read_unlock(arch_rwlock_t *lock)
8440 {
8441 unsigned long tmp1, tmp2;
8442
8443 __asm__ __volatile__(
8444 "1: lduw [%2], %0\n"
8445-" sub %0, 1, %1\n"
8446+" subcc %0, 1, %1\n"
8447+
8448+#ifdef CONFIG_PAX_REFCOUNT
8449+" tvs %%icc, 6\n"
8450+#endif
8451+
8452 " cas [%2], %0, %1\n"
8453 " cmp %0, %1\n"
8454 " bne,pn %%xcc, 1b\n"
8455@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8456 : "memory");
8457 }
8458
8459-static void inline arch_write_lock(arch_rwlock_t *lock)
8460+static inline void arch_write_lock(arch_rwlock_t *lock)
8461 {
8462 unsigned long mask, tmp1, tmp2;
8463
8464@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8465 : "memory");
8466 }
8467
8468-static void inline arch_write_unlock(arch_rwlock_t *lock)
8469+static inline void arch_write_unlock(arch_rwlock_t *lock)
8470 {
8471 __asm__ __volatile__(
8472 " stw %%g0, [%0]"
8473@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8474 : "memory");
8475 }
8476
8477-static int inline arch_write_trylock(arch_rwlock_t *lock)
8478+static inline int arch_write_trylock(arch_rwlock_t *lock)
8479 {
8480 unsigned long mask, tmp1, tmp2, result;
8481
8482diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8483index dd38075..e7cac83 100644
8484--- a/arch/sparc/include/asm/thread_info_32.h
8485+++ b/arch/sparc/include/asm/thread_info_32.h
8486@@ -49,6 +49,8 @@ struct thread_info {
8487 unsigned long w_saved;
8488
8489 struct restart_block restart_block;
8490+
8491+ unsigned long lowest_stack;
8492 };
8493
8494 /*
8495diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8496index d5e5042..9bfee76 100644
8497--- a/arch/sparc/include/asm/thread_info_64.h
8498+++ b/arch/sparc/include/asm/thread_info_64.h
8499@@ -63,6 +63,8 @@ struct thread_info {
8500 struct pt_regs *kern_una_regs;
8501 unsigned int kern_una_insn;
8502
8503+ unsigned long lowest_stack;
8504+
8505 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8506 };
8507
8508@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8509 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8510 /* flag bit 6 is available */
8511 #define TIF_32BIT 7 /* 32-bit binary */
8512-/* flag bit 8 is available */
8513+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8514 #define TIF_SECCOMP 9 /* secure computing */
8515 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8516 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8517+
8518 /* NOTE: Thread flags >= 12 should be ones we have no interest
8519 * in using in assembly, else we can't use the mask as
8520 * an immediate value in instructions such as andcc.
8521@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8522 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8523 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8524 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8525+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8526
8527 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8528 _TIF_DO_NOTIFY_RESUME_MASK | \
8529 _TIF_NEED_RESCHED)
8530 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8531
8532+#define _TIF_WORK_SYSCALL \
8533+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8534+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8535+
8536+
8537 /*
8538 * Thread-synchronous status.
8539 *
8540diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8541index 0167d26..767bb0c 100644
8542--- a/arch/sparc/include/asm/uaccess.h
8543+++ b/arch/sparc/include/asm/uaccess.h
8544@@ -1,5 +1,6 @@
8545 #ifndef ___ASM_SPARC_UACCESS_H
8546 #define ___ASM_SPARC_UACCESS_H
8547+
8548 #if defined(__sparc__) && defined(__arch64__)
8549 #include <asm/uaccess_64.h>
8550 #else
8551diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8552index 53a28dd..50c38c3 100644
8553--- a/arch/sparc/include/asm/uaccess_32.h
8554+++ b/arch/sparc/include/asm/uaccess_32.h
8555@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8556
8557 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8558 {
8559- if (n && __access_ok((unsigned long) to, n))
8560+ if ((long)n < 0)
8561+ return n;
8562+
8563+ if (n && __access_ok((unsigned long) to, n)) {
8564+ if (!__builtin_constant_p(n))
8565+ check_object_size(from, n, true);
8566 return __copy_user(to, (__force void __user *) from, n);
8567- else
8568+ } else
8569 return n;
8570 }
8571
8572 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8573 {
8574+ if ((long)n < 0)
8575+ return n;
8576+
8577+ if (!__builtin_constant_p(n))
8578+ check_object_size(from, n, true);
8579+
8580 return __copy_user(to, (__force void __user *) from, n);
8581 }
8582
8583 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8584 {
8585- if (n && __access_ok((unsigned long) from, n))
8586+ if ((long)n < 0)
8587+ return n;
8588+
8589+ if (n && __access_ok((unsigned long) from, n)) {
8590+ if (!__builtin_constant_p(n))
8591+ check_object_size(to, n, false);
8592 return __copy_user((__force void __user *) to, from, n);
8593- else
8594+ } else
8595 return n;
8596 }
8597
8598 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8599 {
8600+ if ((long)n < 0)
8601+ return n;
8602+
8603 return __copy_user((__force void __user *) to, from, n);
8604 }
8605
8606diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8607index e562d3c..191f176 100644
8608--- a/arch/sparc/include/asm/uaccess_64.h
8609+++ b/arch/sparc/include/asm/uaccess_64.h
8610@@ -10,6 +10,7 @@
8611 #include <linux/compiler.h>
8612 #include <linux/string.h>
8613 #include <linux/thread_info.h>
8614+#include <linux/kernel.h>
8615 #include <asm/asi.h>
8616 #include <asm/spitfire.h>
8617 #include <asm-generic/uaccess-unaligned.h>
8618@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8619 static inline unsigned long __must_check
8620 copy_from_user(void *to, const void __user *from, unsigned long size)
8621 {
8622- unsigned long ret = ___copy_from_user(to, from, size);
8623+ unsigned long ret;
8624
8625+ if ((long)size < 0 || size > INT_MAX)
8626+ return size;
8627+
8628+ if (!__builtin_constant_p(size))
8629+ check_object_size(to, size, false);
8630+
8631+ ret = ___copy_from_user(to, from, size);
8632 if (unlikely(ret))
8633 ret = copy_from_user_fixup(to, from, size);
8634
8635@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8636 static inline unsigned long __must_check
8637 copy_to_user(void __user *to, const void *from, unsigned long size)
8638 {
8639- unsigned long ret = ___copy_to_user(to, from, size);
8640+ unsigned long ret;
8641
8642+ if ((long)size < 0 || size > INT_MAX)
8643+ return size;
8644+
8645+ if (!__builtin_constant_p(size))
8646+ check_object_size(from, size, true);
8647+
8648+ ret = ___copy_to_user(to, from, size);
8649 if (unlikely(ret))
8650 ret = copy_to_user_fixup(to, from, size);
8651 return ret;
8652diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8653index d432fb2..6056af1 100644
8654--- a/arch/sparc/kernel/Makefile
8655+++ b/arch/sparc/kernel/Makefile
8656@@ -3,7 +3,7 @@
8657 #
8658
8659 asflags-y := -ansi
8660-ccflags-y := -Werror
8661+#ccflags-y := -Werror
8662
8663 extra-y := head_$(BITS).o
8664
8665diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
8666index 5ef48da..11d460f 100644
8667--- a/arch/sparc/kernel/ds.c
8668+++ b/arch/sparc/kernel/ds.c
8669@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
8670 char *base, *p;
8671 int msg_len, loops;
8672
8673+ if (strlen(var) + strlen(value) + 2 >
8674+ sizeof(pkt) - sizeof(pkt.header)) {
8675+ printk(KERN_ERR PFX
8676+ "contents length: %zu, which more than max: %lu,"
8677+ "so could not set (%s) variable to (%s).\n",
8678+ strlen(var) + strlen(value) + 2,
8679+ sizeof(pkt) - sizeof(pkt.header), var, value);
8680+ return;
8681+ }
8682+
8683 memset(&pkt, 0, sizeof(pkt));
8684 pkt.header.data.tag.type = DS_DATA;
8685 pkt.header.data.handle = cp->handle;
8686diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8687index fdd819d..5af08c8 100644
8688--- a/arch/sparc/kernel/process_32.c
8689+++ b/arch/sparc/kernel/process_32.c
8690@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
8691
8692 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8693 r->psr, r->pc, r->npc, r->y, print_tainted());
8694- printk("PC: <%pS>\n", (void *) r->pc);
8695+ printk("PC: <%pA>\n", (void *) r->pc);
8696 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8697 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8698 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8699 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8700 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8701 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8702- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8703+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8704
8705 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8706 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8707@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8708 rw = (struct reg_window32 *) fp;
8709 pc = rw->ins[7];
8710 printk("[%08lx : ", pc);
8711- printk("%pS ] ", (void *) pc);
8712+ printk("%pA ] ", (void *) pc);
8713 fp = rw->ins[6];
8714 } while (++count < 16);
8715 printk("\n");
8716diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8717index baebab2..9cd13b1 100644
8718--- a/arch/sparc/kernel/process_64.c
8719+++ b/arch/sparc/kernel/process_64.c
8720@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
8721 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8722 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8723 if (regs->tstate & TSTATE_PRIV)
8724- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8725+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8726 }
8727
8728 void show_regs(struct pt_regs *regs)
8729@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
8730
8731 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8732 regs->tpc, regs->tnpc, regs->y, print_tainted());
8733- printk("TPC: <%pS>\n", (void *) regs->tpc);
8734+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8735 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8736 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8737 regs->u_regs[3]);
8738@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
8739 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8740 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8741 regs->u_regs[15]);
8742- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8743+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8744 show_regwindow(regs);
8745 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8746 }
8747@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
8748 ((tp && tp->task) ? tp->task->pid : -1));
8749
8750 if (gp->tstate & TSTATE_PRIV) {
8751- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8752+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8753 (void *) gp->tpc,
8754 (void *) gp->o7,
8755 (void *) gp->i7,
8756diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8757index 79cc0d1..ec62734 100644
8758--- a/arch/sparc/kernel/prom_common.c
8759+++ b/arch/sparc/kernel/prom_common.c
8760@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8761
8762 unsigned int prom_early_allocated __initdata;
8763
8764-static struct of_pdt_ops prom_sparc_ops __initdata = {
8765+static struct of_pdt_ops prom_sparc_ops __initconst = {
8766 .nextprop = prom_common_nextprop,
8767 .getproplen = prom_getproplen,
8768 .getproperty = prom_getproperty,
8769diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8770index 7ff45e4..a58f271 100644
8771--- a/arch/sparc/kernel/ptrace_64.c
8772+++ b/arch/sparc/kernel/ptrace_64.c
8773@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8774 return ret;
8775 }
8776
8777+#ifdef CONFIG_GRKERNSEC_SETXID
8778+extern void gr_delayed_cred_worker(void);
8779+#endif
8780+
8781 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8782 {
8783 int ret = 0;
8784@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8785 /* do the secure computing check first */
8786 secure_computing_strict(regs->u_regs[UREG_G1]);
8787
8788+#ifdef CONFIG_GRKERNSEC_SETXID
8789+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8790+ gr_delayed_cred_worker();
8791+#endif
8792+
8793 if (test_thread_flag(TIF_SYSCALL_TRACE))
8794 ret = tracehook_report_syscall_entry(regs);
8795
8796@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8797
8798 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8799 {
8800+#ifdef CONFIG_GRKERNSEC_SETXID
8801+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8802+ gr_delayed_cred_worker();
8803+#endif
8804+
8805 audit_syscall_exit(regs);
8806
8807 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8808diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8809index 3a8d184..49498a8 100644
8810--- a/arch/sparc/kernel/sys_sparc_32.c
8811+++ b/arch/sparc/kernel/sys_sparc_32.c
8812@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8813 if (len > TASK_SIZE - PAGE_SIZE)
8814 return -ENOMEM;
8815 if (!addr)
8816- addr = TASK_UNMAPPED_BASE;
8817+ addr = current->mm->mmap_base;
8818
8819 info.flags = 0;
8820 info.length = len;
8821diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8822index 2daaaa6..4fb84dc 100644
8823--- a/arch/sparc/kernel/sys_sparc_64.c
8824+++ b/arch/sparc/kernel/sys_sparc_64.c
8825@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8826 struct vm_area_struct * vma;
8827 unsigned long task_size = TASK_SIZE;
8828 int do_color_align;
8829+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8830 struct vm_unmapped_area_info info;
8831
8832 if (flags & MAP_FIXED) {
8833 /* We do not accept a shared mapping if it would violate
8834 * cache aliasing constraints.
8835 */
8836- if ((flags & MAP_SHARED) &&
8837+ if ((filp || (flags & MAP_SHARED)) &&
8838 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8839 return -EINVAL;
8840 return addr;
8841@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8842 if (filp || (flags & MAP_SHARED))
8843 do_color_align = 1;
8844
8845+#ifdef CONFIG_PAX_RANDMMAP
8846+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8847+#endif
8848+
8849 if (addr) {
8850 if (do_color_align)
8851 addr = COLOR_ALIGN(addr, pgoff);
8852@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8853 addr = PAGE_ALIGN(addr);
8854
8855 vma = find_vma(mm, addr);
8856- if (task_size - len >= addr &&
8857- (!vma || addr + len <= vma->vm_start))
8858+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8859 return addr;
8860 }
8861
8862 info.flags = 0;
8863 info.length = len;
8864- info.low_limit = TASK_UNMAPPED_BASE;
8865+ info.low_limit = mm->mmap_base;
8866 info.high_limit = min(task_size, VA_EXCLUDE_START);
8867 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8868 info.align_offset = pgoff << PAGE_SHIFT;
8869+ info.threadstack_offset = offset;
8870 addr = vm_unmapped_area(&info);
8871
8872 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8873 VM_BUG_ON(addr != -ENOMEM);
8874 info.low_limit = VA_EXCLUDE_END;
8875+
8876+#ifdef CONFIG_PAX_RANDMMAP
8877+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8878+ info.low_limit += mm->delta_mmap;
8879+#endif
8880+
8881 info.high_limit = task_size;
8882 addr = vm_unmapped_area(&info);
8883 }
8884@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8885 unsigned long task_size = STACK_TOP32;
8886 unsigned long addr = addr0;
8887 int do_color_align;
8888+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8889 struct vm_unmapped_area_info info;
8890
8891 /* This should only ever run for 32-bit processes. */
8892@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8893 /* We do not accept a shared mapping if it would violate
8894 * cache aliasing constraints.
8895 */
8896- if ((flags & MAP_SHARED) &&
8897+ if ((filp || (flags & MAP_SHARED)) &&
8898 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8899 return -EINVAL;
8900 return addr;
8901@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8902 if (filp || (flags & MAP_SHARED))
8903 do_color_align = 1;
8904
8905+#ifdef CONFIG_PAX_RANDMMAP
8906+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8907+#endif
8908+
8909 /* requesting a specific address */
8910 if (addr) {
8911 if (do_color_align)
8912@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8913 addr = PAGE_ALIGN(addr);
8914
8915 vma = find_vma(mm, addr);
8916- if (task_size - len >= addr &&
8917- (!vma || addr + len <= vma->vm_start))
8918+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8919 return addr;
8920 }
8921
8922@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8923 info.high_limit = mm->mmap_base;
8924 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8925 info.align_offset = pgoff << PAGE_SHIFT;
8926+ info.threadstack_offset = offset;
8927 addr = vm_unmapped_area(&info);
8928
8929 /*
8930@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8931 VM_BUG_ON(addr != -ENOMEM);
8932 info.flags = 0;
8933 info.low_limit = TASK_UNMAPPED_BASE;
8934+
8935+#ifdef CONFIG_PAX_RANDMMAP
8936+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8937+ info.low_limit += mm->delta_mmap;
8938+#endif
8939+
8940 info.high_limit = STACK_TOP32;
8941 addr = vm_unmapped_area(&info);
8942 }
8943@@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
8944 EXPORT_SYMBOL(get_fb_unmapped_area);
8945
8946 /* Essentially the same as PowerPC. */
8947-static unsigned long mmap_rnd(void)
8948+static unsigned long mmap_rnd(struct mm_struct *mm)
8949 {
8950 unsigned long rnd = 0UL;
8951
8952+#ifdef CONFIG_PAX_RANDMMAP
8953+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8954+#endif
8955+
8956 if (current->flags & PF_RANDOMIZE) {
8957 unsigned long val = get_random_int();
8958 if (test_thread_flag(TIF_32BIT))
8959@@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
8960
8961 void arch_pick_mmap_layout(struct mm_struct *mm)
8962 {
8963- unsigned long random_factor = mmap_rnd();
8964+ unsigned long random_factor = mmap_rnd(mm);
8965 unsigned long gap;
8966
8967 /*
8968@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8969 gap == RLIM_INFINITY ||
8970 sysctl_legacy_va_layout) {
8971 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8972+
8973+#ifdef CONFIG_PAX_RANDMMAP
8974+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8975+ mm->mmap_base += mm->delta_mmap;
8976+#endif
8977+
8978 mm->get_unmapped_area = arch_get_unmapped_area;
8979 mm->unmap_area = arch_unmap_area;
8980 } else {
8981@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8982 gap = (task_size / 6 * 5);
8983
8984 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8985+
8986+#ifdef CONFIG_PAX_RANDMMAP
8987+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8988+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8989+#endif
8990+
8991 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8992 mm->unmap_area = arch_unmap_area_topdown;
8993 }
8994diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8995index 22a1098..6255eb9 100644
8996--- a/arch/sparc/kernel/syscalls.S
8997+++ b/arch/sparc/kernel/syscalls.S
8998@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
8999 #endif
9000 .align 32
9001 1: ldx [%g6 + TI_FLAGS], %l5
9002- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9003+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9004 be,pt %icc, rtrap
9005 nop
9006 call syscall_trace_leave
9007@@ -184,7 +184,7 @@ linux_sparc_syscall32:
9008
9009 srl %i5, 0, %o5 ! IEU1
9010 srl %i2, 0, %o2 ! IEU0 Group
9011- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9012+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9013 bne,pn %icc, linux_syscall_trace32 ! CTI
9014 mov %i0, %l5 ! IEU1
9015 call %l7 ! CTI Group brk forced
9016@@ -207,7 +207,7 @@ linux_sparc_syscall:
9017
9018 mov %i3, %o3 ! IEU1
9019 mov %i4, %o4 ! IEU0 Group
9020- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9021+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9022 bne,pn %icc, linux_syscall_trace ! CTI Group
9023 mov %i0, %l5 ! IEU0
9024 2: call %l7 ! CTI Group brk forced
9025@@ -223,7 +223,7 @@ ret_sys_call:
9026
9027 cmp %o0, -ERESTART_RESTARTBLOCK
9028 bgeu,pn %xcc, 1f
9029- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9030+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9031 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9032
9033 2:
9034diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
9035index 654e8aa..45f431b 100644
9036--- a/arch/sparc/kernel/sysfs.c
9037+++ b/arch/sparc/kernel/sysfs.c
9038@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
9039 return NOTIFY_OK;
9040 }
9041
9042-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
9043+static struct notifier_block sysfs_cpu_nb = {
9044 .notifier_call = sysfs_cpu_notify,
9045 };
9046
9047diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9048index 6629829..036032d 100644
9049--- a/arch/sparc/kernel/traps_32.c
9050+++ b/arch/sparc/kernel/traps_32.c
9051@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9052 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9053 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9054
9055+extern void gr_handle_kernel_exploit(void);
9056+
9057 void die_if_kernel(char *str, struct pt_regs *regs)
9058 {
9059 static int die_counter;
9060@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9061 count++ < 30 &&
9062 (((unsigned long) rw) >= PAGE_OFFSET) &&
9063 !(((unsigned long) rw) & 0x7)) {
9064- printk("Caller[%08lx]: %pS\n", rw->ins[7],
9065+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
9066 (void *) rw->ins[7]);
9067 rw = (struct reg_window32 *)rw->ins[6];
9068 }
9069 }
9070 printk("Instruction DUMP:");
9071 instruction_dump ((unsigned long *) regs->pc);
9072- if(regs->psr & PSR_PS)
9073+ if(regs->psr & PSR_PS) {
9074+ gr_handle_kernel_exploit();
9075 do_exit(SIGKILL);
9076+ }
9077 do_exit(SIGSEGV);
9078 }
9079
9080diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9081index b3f833a..ac74b2d 100644
9082--- a/arch/sparc/kernel/traps_64.c
9083+++ b/arch/sparc/kernel/traps_64.c
9084@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9085 i + 1,
9086 p->trapstack[i].tstate, p->trapstack[i].tpc,
9087 p->trapstack[i].tnpc, p->trapstack[i].tt);
9088- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
9089+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
9090 }
9091 }
9092
9093@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
9094
9095 lvl -= 0x100;
9096 if (regs->tstate & TSTATE_PRIV) {
9097+
9098+#ifdef CONFIG_PAX_REFCOUNT
9099+ if (lvl == 6)
9100+ pax_report_refcount_overflow(regs);
9101+#endif
9102+
9103 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
9104 die_if_kernel(buffer, regs);
9105 }
9106@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
9107 void bad_trap_tl1(struct pt_regs *regs, long lvl)
9108 {
9109 char buffer[32];
9110-
9111+
9112 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
9113 0, lvl, SIGTRAP) == NOTIFY_STOP)
9114 return;
9115
9116+#ifdef CONFIG_PAX_REFCOUNT
9117+ if (lvl == 6)
9118+ pax_report_refcount_overflow(regs);
9119+#endif
9120+
9121 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
9122
9123 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
9124@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
9125 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
9126 printk("%s" "ERROR(%d): ",
9127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
9128- printk("TPC<%pS>\n", (void *) regs->tpc);
9129+ printk("TPC<%pA>\n", (void *) regs->tpc);
9130 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
9131 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
9132 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
9133@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9134 smp_processor_id(),
9135 (type & 0x1) ? 'I' : 'D',
9136 regs->tpc);
9137- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
9138+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
9139 panic("Irrecoverable Cheetah+ parity error.");
9140 }
9141
9142@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
9143 smp_processor_id(),
9144 (type & 0x1) ? 'I' : 'D',
9145 regs->tpc);
9146- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
9147+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
9148 }
9149
9150 struct sun4v_error_entry {
9151@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
9152
9153 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
9154 regs->tpc, tl);
9155- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
9156+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
9157 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9158- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
9159+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
9160 (void *) regs->u_regs[UREG_I7]);
9161 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
9162 "pte[%lx] error[%lx]\n",
9163@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
9164
9165 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
9166 regs->tpc, tl);
9167- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
9168+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
9169 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
9170- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
9171+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
9172 (void *) regs->u_regs[UREG_I7]);
9173 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
9174 "pte[%lx] error[%lx]\n",
9175@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9176 fp = (unsigned long)sf->fp + STACK_BIAS;
9177 }
9178
9179- printk(" [%016lx] %pS\n", pc, (void *) pc);
9180+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9181 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9182 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
9183 int index = tsk->curr_ret_stack;
9184 if (tsk->ret_stack && index >= graph) {
9185 pc = tsk->ret_stack[index - graph].ret;
9186- printk(" [%016lx] %pS\n", pc, (void *) pc);
9187+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9188 graph++;
9189 }
9190 }
9191@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
9192 return (struct reg_window *) (fp + STACK_BIAS);
9193 }
9194
9195+extern void gr_handle_kernel_exploit(void);
9196+
9197 void die_if_kernel(char *str, struct pt_regs *regs)
9198 {
9199 static int die_counter;
9200@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9201 while (rw &&
9202 count++ < 30 &&
9203 kstack_valid(tp, (unsigned long) rw)) {
9204- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9205+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9206 (void *) rw->ins[7]);
9207
9208 rw = kernel_stack_up(rw);
9209@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9210 }
9211 user_instruction_dump ((unsigned int __user *) regs->tpc);
9212 }
9213- if (regs->tstate & TSTATE_PRIV)
9214+ if (regs->tstate & TSTATE_PRIV) {
9215+ gr_handle_kernel_exploit();
9216 do_exit(SIGKILL);
9217+ }
9218 do_exit(SIGSEGV);
9219 }
9220 EXPORT_SYMBOL(die_if_kernel);
9221diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9222index 8201c25e..072a2a7 100644
9223--- a/arch/sparc/kernel/unaligned_64.c
9224+++ b/arch/sparc/kernel/unaligned_64.c
9225@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9226 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9227
9228 if (__ratelimit(&ratelimit)) {
9229- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9230+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9231 regs->tpc, (void *) regs->tpc);
9232 }
9233 }
9234diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9235index dbe119b..089c7c1 100644
9236--- a/arch/sparc/lib/Makefile
9237+++ b/arch/sparc/lib/Makefile
9238@@ -2,7 +2,7 @@
9239 #
9240
9241 asflags-y := -ansi -DST_DIV0=0x02
9242-ccflags-y := -Werror
9243+#ccflags-y := -Werror
9244
9245 lib-$(CONFIG_SPARC32) += ashrdi3.o
9246 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9247diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9248index 85c233d..68500e0 100644
9249--- a/arch/sparc/lib/atomic_64.S
9250+++ b/arch/sparc/lib/atomic_64.S
9251@@ -17,7 +17,12 @@
9252 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9253 BACKOFF_SETUP(%o2)
9254 1: lduw [%o1], %g1
9255- add %g1, %o0, %g7
9256+ addcc %g1, %o0, %g7
9257+
9258+#ifdef CONFIG_PAX_REFCOUNT
9259+ tvs %icc, 6
9260+#endif
9261+
9262 cas [%o1], %g1, %g7
9263 cmp %g1, %g7
9264 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9265@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9266 2: BACKOFF_SPIN(%o2, %o3, 1b)
9267 ENDPROC(atomic_add)
9268
9269+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9270+ BACKOFF_SETUP(%o2)
9271+1: lduw [%o1], %g1
9272+ add %g1, %o0, %g7
9273+ cas [%o1], %g1, %g7
9274+ cmp %g1, %g7
9275+ bne,pn %icc, 2f
9276+ nop
9277+ retl
9278+ nop
9279+2: BACKOFF_SPIN(%o2, %o3, 1b)
9280+ENDPROC(atomic_add_unchecked)
9281+
9282 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9283 BACKOFF_SETUP(%o2)
9284 1: lduw [%o1], %g1
9285- sub %g1, %o0, %g7
9286+ subcc %g1, %o0, %g7
9287+
9288+#ifdef CONFIG_PAX_REFCOUNT
9289+ tvs %icc, 6
9290+#endif
9291+
9292 cas [%o1], %g1, %g7
9293 cmp %g1, %g7
9294 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9295@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9296 2: BACKOFF_SPIN(%o2, %o3, 1b)
9297 ENDPROC(atomic_sub)
9298
9299+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9300+ BACKOFF_SETUP(%o2)
9301+1: lduw [%o1], %g1
9302+ sub %g1, %o0, %g7
9303+ cas [%o1], %g1, %g7
9304+ cmp %g1, %g7
9305+ bne,pn %icc, 2f
9306+ nop
9307+ retl
9308+ nop
9309+2: BACKOFF_SPIN(%o2, %o3, 1b)
9310+ENDPROC(atomic_sub_unchecked)
9311+
9312 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9313 BACKOFF_SETUP(%o2)
9314 1: lduw [%o1], %g1
9315- add %g1, %o0, %g7
9316+ addcc %g1, %o0, %g7
9317+
9318+#ifdef CONFIG_PAX_REFCOUNT
9319+ tvs %icc, 6
9320+#endif
9321+
9322 cas [%o1], %g1, %g7
9323 cmp %g1, %g7
9324 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9325@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9326 2: BACKOFF_SPIN(%o2, %o3, 1b)
9327 ENDPROC(atomic_add_ret)
9328
9329+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9330+ BACKOFF_SETUP(%o2)
9331+1: lduw [%o1], %g1
9332+ addcc %g1, %o0, %g7
9333+ cas [%o1], %g1, %g7
9334+ cmp %g1, %g7
9335+ bne,pn %icc, 2f
9336+ add %g7, %o0, %g7
9337+ sra %g7, 0, %o0
9338+ retl
9339+ nop
9340+2: BACKOFF_SPIN(%o2, %o3, 1b)
9341+ENDPROC(atomic_add_ret_unchecked)
9342+
9343 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9344 BACKOFF_SETUP(%o2)
9345 1: lduw [%o1], %g1
9346- sub %g1, %o0, %g7
9347+ subcc %g1, %o0, %g7
9348+
9349+#ifdef CONFIG_PAX_REFCOUNT
9350+ tvs %icc, 6
9351+#endif
9352+
9353 cas [%o1], %g1, %g7
9354 cmp %g1, %g7
9355 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9356@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
9357 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9358 BACKOFF_SETUP(%o2)
9359 1: ldx [%o1], %g1
9360- add %g1, %o0, %g7
9361+ addcc %g1, %o0, %g7
9362+
9363+#ifdef CONFIG_PAX_REFCOUNT
9364+ tvs %xcc, 6
9365+#endif
9366+
9367 casx [%o1], %g1, %g7
9368 cmp %g1, %g7
9369 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9370@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9371 2: BACKOFF_SPIN(%o2, %o3, 1b)
9372 ENDPROC(atomic64_add)
9373
9374+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9375+ BACKOFF_SETUP(%o2)
9376+1: ldx [%o1], %g1
9377+ addcc %g1, %o0, %g7
9378+ casx [%o1], %g1, %g7
9379+ cmp %g1, %g7
9380+ bne,pn %xcc, 2f
9381+ nop
9382+ retl
9383+ nop
9384+2: BACKOFF_SPIN(%o2, %o3, 1b)
9385+ENDPROC(atomic64_add_unchecked)
9386+
9387 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9388 BACKOFF_SETUP(%o2)
9389 1: ldx [%o1], %g1
9390- sub %g1, %o0, %g7
9391+ subcc %g1, %o0, %g7
9392+
9393+#ifdef CONFIG_PAX_REFCOUNT
9394+ tvs %xcc, 6
9395+#endif
9396+
9397 casx [%o1], %g1, %g7
9398 cmp %g1, %g7
9399 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9400@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9401 2: BACKOFF_SPIN(%o2, %o3, 1b)
9402 ENDPROC(atomic64_sub)
9403
9404+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9405+ BACKOFF_SETUP(%o2)
9406+1: ldx [%o1], %g1
9407+ subcc %g1, %o0, %g7
9408+ casx [%o1], %g1, %g7
9409+ cmp %g1, %g7
9410+ bne,pn %xcc, 2f
9411+ nop
9412+ retl
9413+ nop
9414+2: BACKOFF_SPIN(%o2, %o3, 1b)
9415+ENDPROC(atomic64_sub_unchecked)
9416+
9417 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9418 BACKOFF_SETUP(%o2)
9419 1: ldx [%o1], %g1
9420- add %g1, %o0, %g7
9421+ addcc %g1, %o0, %g7
9422+
9423+#ifdef CONFIG_PAX_REFCOUNT
9424+ tvs %xcc, 6
9425+#endif
9426+
9427 casx [%o1], %g1, %g7
9428 cmp %g1, %g7
9429 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9430@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9431 2: BACKOFF_SPIN(%o2, %o3, 1b)
9432 ENDPROC(atomic64_add_ret)
9433
9434+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9435+ BACKOFF_SETUP(%o2)
9436+1: ldx [%o1], %g1
9437+ addcc %g1, %o0, %g7
9438+ casx [%o1], %g1, %g7
9439+ cmp %g1, %g7
9440+ bne,pn %xcc, 2f
9441+ add %g7, %o0, %g7
9442+ mov %g7, %o0
9443+ retl
9444+ nop
9445+2: BACKOFF_SPIN(%o2, %o3, 1b)
9446+ENDPROC(atomic64_add_ret_unchecked)
9447+
9448 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9449 BACKOFF_SETUP(%o2)
9450 1: ldx [%o1], %g1
9451- sub %g1, %o0, %g7
9452+ subcc %g1, %o0, %g7
9453+
9454+#ifdef CONFIG_PAX_REFCOUNT
9455+ tvs %xcc, 6
9456+#endif
9457+
9458 casx [%o1], %g1, %g7
9459 cmp %g1, %g7
9460 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9461diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9462index 0c4e35e..745d3e4 100644
9463--- a/arch/sparc/lib/ksyms.c
9464+++ b/arch/sparc/lib/ksyms.c
9465@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9466
9467 /* Atomic counter implementation. */
9468 EXPORT_SYMBOL(atomic_add);
9469+EXPORT_SYMBOL(atomic_add_unchecked);
9470 EXPORT_SYMBOL(atomic_add_ret);
9471+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9472 EXPORT_SYMBOL(atomic_sub);
9473+EXPORT_SYMBOL(atomic_sub_unchecked);
9474 EXPORT_SYMBOL(atomic_sub_ret);
9475 EXPORT_SYMBOL(atomic64_add);
9476+EXPORT_SYMBOL(atomic64_add_unchecked);
9477 EXPORT_SYMBOL(atomic64_add_ret);
9478+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9479 EXPORT_SYMBOL(atomic64_sub);
9480+EXPORT_SYMBOL(atomic64_sub_unchecked);
9481 EXPORT_SYMBOL(atomic64_sub_ret);
9482 EXPORT_SYMBOL(atomic64_dec_if_positive);
9483
9484diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9485index 30c3ecc..736f015 100644
9486--- a/arch/sparc/mm/Makefile
9487+++ b/arch/sparc/mm/Makefile
9488@@ -2,7 +2,7 @@
9489 #
9490
9491 asflags-y := -ansi
9492-ccflags-y := -Werror
9493+#ccflags-y := -Werror
9494
9495 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9496 obj-y += fault_$(BITS).o
9497diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9498index e98bfda..ea8d221 100644
9499--- a/arch/sparc/mm/fault_32.c
9500+++ b/arch/sparc/mm/fault_32.c
9501@@ -21,6 +21,9 @@
9502 #include <linux/perf_event.h>
9503 #include <linux/interrupt.h>
9504 #include <linux/kdebug.h>
9505+#include <linux/slab.h>
9506+#include <linux/pagemap.h>
9507+#include <linux/compiler.h>
9508
9509 #include <asm/page.h>
9510 #include <asm/pgtable.h>
9511@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9512 return safe_compute_effective_address(regs, insn);
9513 }
9514
9515+#ifdef CONFIG_PAX_PAGEEXEC
9516+#ifdef CONFIG_PAX_DLRESOLVE
9517+static void pax_emuplt_close(struct vm_area_struct *vma)
9518+{
9519+ vma->vm_mm->call_dl_resolve = 0UL;
9520+}
9521+
9522+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9523+{
9524+ unsigned int *kaddr;
9525+
9526+ vmf->page = alloc_page(GFP_HIGHUSER);
9527+ if (!vmf->page)
9528+ return VM_FAULT_OOM;
9529+
9530+ kaddr = kmap(vmf->page);
9531+ memset(kaddr, 0, PAGE_SIZE);
9532+ kaddr[0] = 0x9DE3BFA8U; /* save */
9533+ flush_dcache_page(vmf->page);
9534+ kunmap(vmf->page);
9535+ return VM_FAULT_MAJOR;
9536+}
9537+
9538+static const struct vm_operations_struct pax_vm_ops = {
9539+ .close = pax_emuplt_close,
9540+ .fault = pax_emuplt_fault
9541+};
9542+
9543+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9544+{
9545+ int ret;
9546+
9547+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9548+ vma->vm_mm = current->mm;
9549+ vma->vm_start = addr;
9550+ vma->vm_end = addr + PAGE_SIZE;
9551+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9552+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9553+ vma->vm_ops = &pax_vm_ops;
9554+
9555+ ret = insert_vm_struct(current->mm, vma);
9556+ if (ret)
9557+ return ret;
9558+
9559+ ++current->mm->total_vm;
9560+ return 0;
9561+}
9562+#endif
9563+
9564+/*
9565+ * PaX: decide what to do with offenders (regs->pc = fault address)
9566+ *
9567+ * returns 1 when task should be killed
9568+ * 2 when patched PLT trampoline was detected
9569+ * 3 when unpatched PLT trampoline was detected
9570+ */
9571+static int pax_handle_fetch_fault(struct pt_regs *regs)
9572+{
9573+
9574+#ifdef CONFIG_PAX_EMUPLT
9575+ int err;
9576+
9577+ do { /* PaX: patched PLT emulation #1 */
9578+ unsigned int sethi1, sethi2, jmpl;
9579+
9580+ err = get_user(sethi1, (unsigned int *)regs->pc);
9581+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9582+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9583+
9584+ if (err)
9585+ break;
9586+
9587+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9588+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9589+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9590+ {
9591+ unsigned int addr;
9592+
9593+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9594+ addr = regs->u_regs[UREG_G1];
9595+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9596+ regs->pc = addr;
9597+ regs->npc = addr+4;
9598+ return 2;
9599+ }
9600+ } while (0);
9601+
9602+ do { /* PaX: patched PLT emulation #2 */
9603+ unsigned int ba;
9604+
9605+ err = get_user(ba, (unsigned int *)regs->pc);
9606+
9607+ if (err)
9608+ break;
9609+
9610+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9611+ unsigned int addr;
9612+
9613+ if ((ba & 0xFFC00000U) == 0x30800000U)
9614+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9615+ else
9616+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9617+ regs->pc = addr;
9618+ regs->npc = addr+4;
9619+ return 2;
9620+ }
9621+ } while (0);
9622+
9623+ do { /* PaX: patched PLT emulation #3 */
9624+ unsigned int sethi, bajmpl, nop;
9625+
9626+ err = get_user(sethi, (unsigned int *)regs->pc);
9627+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9628+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9629+
9630+ if (err)
9631+ break;
9632+
9633+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9634+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9635+ nop == 0x01000000U)
9636+ {
9637+ unsigned int addr;
9638+
9639+ addr = (sethi & 0x003FFFFFU) << 10;
9640+ regs->u_regs[UREG_G1] = addr;
9641+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9642+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9643+ else
9644+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9645+ regs->pc = addr;
9646+ regs->npc = addr+4;
9647+ return 2;
9648+ }
9649+ } while (0);
9650+
9651+ do { /* PaX: unpatched PLT emulation step 1 */
9652+ unsigned int sethi, ba, nop;
9653+
9654+ err = get_user(sethi, (unsigned int *)regs->pc);
9655+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9656+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9657+
9658+ if (err)
9659+ break;
9660+
9661+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9662+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9663+ nop == 0x01000000U)
9664+ {
9665+ unsigned int addr, save, call;
9666+
9667+ if ((ba & 0xFFC00000U) == 0x30800000U)
9668+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9669+ else
9670+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9671+
9672+ err = get_user(save, (unsigned int *)addr);
9673+ err |= get_user(call, (unsigned int *)(addr+4));
9674+ err |= get_user(nop, (unsigned int *)(addr+8));
9675+ if (err)
9676+ break;
9677+
9678+#ifdef CONFIG_PAX_DLRESOLVE
9679+ if (save == 0x9DE3BFA8U &&
9680+ (call & 0xC0000000U) == 0x40000000U &&
9681+ nop == 0x01000000U)
9682+ {
9683+ struct vm_area_struct *vma;
9684+ unsigned long call_dl_resolve;
9685+
9686+ down_read(&current->mm->mmap_sem);
9687+ call_dl_resolve = current->mm->call_dl_resolve;
9688+ up_read(&current->mm->mmap_sem);
9689+ if (likely(call_dl_resolve))
9690+ goto emulate;
9691+
9692+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9693+
9694+ down_write(&current->mm->mmap_sem);
9695+ if (current->mm->call_dl_resolve) {
9696+ call_dl_resolve = current->mm->call_dl_resolve;
9697+ up_write(&current->mm->mmap_sem);
9698+ if (vma)
9699+ kmem_cache_free(vm_area_cachep, vma);
9700+ goto emulate;
9701+ }
9702+
9703+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9704+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9705+ up_write(&current->mm->mmap_sem);
9706+ if (vma)
9707+ kmem_cache_free(vm_area_cachep, vma);
9708+ return 1;
9709+ }
9710+
9711+ if (pax_insert_vma(vma, call_dl_resolve)) {
9712+ up_write(&current->mm->mmap_sem);
9713+ kmem_cache_free(vm_area_cachep, vma);
9714+ return 1;
9715+ }
9716+
9717+ current->mm->call_dl_resolve = call_dl_resolve;
9718+ up_write(&current->mm->mmap_sem);
9719+
9720+emulate:
9721+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9722+ regs->pc = call_dl_resolve;
9723+ regs->npc = addr+4;
9724+ return 3;
9725+ }
9726+#endif
9727+
9728+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9729+ if ((save & 0xFFC00000U) == 0x05000000U &&
9730+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9731+ nop == 0x01000000U)
9732+ {
9733+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9734+ regs->u_regs[UREG_G2] = addr + 4;
9735+ addr = (save & 0x003FFFFFU) << 10;
9736+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9737+ regs->pc = addr;
9738+ regs->npc = addr+4;
9739+ return 3;
9740+ }
9741+ }
9742+ } while (0);
9743+
9744+ do { /* PaX: unpatched PLT emulation step 2 */
9745+ unsigned int save, call, nop;
9746+
9747+ err = get_user(save, (unsigned int *)(regs->pc-4));
9748+ err |= get_user(call, (unsigned int *)regs->pc);
9749+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9750+ if (err)
9751+ break;
9752+
9753+ if (save == 0x9DE3BFA8U &&
9754+ (call & 0xC0000000U) == 0x40000000U &&
9755+ nop == 0x01000000U)
9756+ {
9757+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9758+
9759+ regs->u_regs[UREG_RETPC] = regs->pc;
9760+ regs->pc = dl_resolve;
9761+ regs->npc = dl_resolve+4;
9762+ return 3;
9763+ }
9764+ } while (0);
9765+#endif
9766+
9767+ return 1;
9768+}
9769+
9770+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9771+{
9772+ unsigned long i;
9773+
9774+ printk(KERN_ERR "PAX: bytes at PC: ");
9775+ for (i = 0; i < 8; i++) {
9776+ unsigned int c;
9777+ if (get_user(c, (unsigned int *)pc+i))
9778+ printk(KERN_CONT "???????? ");
9779+ else
9780+ printk(KERN_CONT "%08x ", c);
9781+ }
9782+ printk("\n");
9783+}
9784+#endif
9785+
9786 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9787 int text_fault)
9788 {
9789@@ -230,6 +504,24 @@ good_area:
9790 if (!(vma->vm_flags & VM_WRITE))
9791 goto bad_area;
9792 } else {
9793+
9794+#ifdef CONFIG_PAX_PAGEEXEC
9795+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9796+ up_read(&mm->mmap_sem);
9797+ switch (pax_handle_fetch_fault(regs)) {
9798+
9799+#ifdef CONFIG_PAX_EMUPLT
9800+ case 2:
9801+ case 3:
9802+ return;
9803+#endif
9804+
9805+ }
9806+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9807+ do_group_exit(SIGKILL);
9808+ }
9809+#endif
9810+
9811 /* Allow reads even for write-only mappings */
9812 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9813 goto bad_area;
9814diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9815index 5062ff3..e0b75f3 100644
9816--- a/arch/sparc/mm/fault_64.c
9817+++ b/arch/sparc/mm/fault_64.c
9818@@ -21,6 +21,9 @@
9819 #include <linux/kprobes.h>
9820 #include <linux/kdebug.h>
9821 #include <linux/percpu.h>
9822+#include <linux/slab.h>
9823+#include <linux/pagemap.h>
9824+#include <linux/compiler.h>
9825
9826 #include <asm/page.h>
9827 #include <asm/pgtable.h>
9828@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9829 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9830 regs->tpc);
9831 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9832- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9833+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9834 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9835 dump_stack();
9836 unhandled_fault(regs->tpc, current, regs);
9837@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9838 show_regs(regs);
9839 }
9840
9841+#ifdef CONFIG_PAX_PAGEEXEC
9842+#ifdef CONFIG_PAX_DLRESOLVE
9843+static void pax_emuplt_close(struct vm_area_struct *vma)
9844+{
9845+ vma->vm_mm->call_dl_resolve = 0UL;
9846+}
9847+
9848+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9849+{
9850+ unsigned int *kaddr;
9851+
9852+ vmf->page = alloc_page(GFP_HIGHUSER);
9853+ if (!vmf->page)
9854+ return VM_FAULT_OOM;
9855+
9856+ kaddr = kmap(vmf->page);
9857+ memset(kaddr, 0, PAGE_SIZE);
9858+ kaddr[0] = 0x9DE3BFA8U; /* save */
9859+ flush_dcache_page(vmf->page);
9860+ kunmap(vmf->page);
9861+ return VM_FAULT_MAJOR;
9862+}
9863+
9864+static const struct vm_operations_struct pax_vm_ops = {
9865+ .close = pax_emuplt_close,
9866+ .fault = pax_emuplt_fault
9867+};
9868+
9869+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9870+{
9871+ int ret;
9872+
9873+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9874+ vma->vm_mm = current->mm;
9875+ vma->vm_start = addr;
9876+ vma->vm_end = addr + PAGE_SIZE;
9877+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9878+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9879+ vma->vm_ops = &pax_vm_ops;
9880+
9881+ ret = insert_vm_struct(current->mm, vma);
9882+ if (ret)
9883+ return ret;
9884+
9885+ ++current->mm->total_vm;
9886+ return 0;
9887+}
9888+#endif
9889+
9890+/*
9891+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9892+ *
9893+ * returns 1 when task should be killed
9894+ * 2 when patched PLT trampoline was detected
9895+ * 3 when unpatched PLT trampoline was detected
9896+ */
9897+static int pax_handle_fetch_fault(struct pt_regs *regs)
9898+{
9899+
9900+#ifdef CONFIG_PAX_EMUPLT
9901+ int err;
9902+
9903+ do { /* PaX: patched PLT emulation #1 */
9904+ unsigned int sethi1, sethi2, jmpl;
9905+
9906+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9907+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9908+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9909+
9910+ if (err)
9911+ break;
9912+
9913+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9914+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9915+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9916+ {
9917+ unsigned long addr;
9918+
9919+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9920+ addr = regs->u_regs[UREG_G1];
9921+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9922+
9923+ if (test_thread_flag(TIF_32BIT))
9924+ addr &= 0xFFFFFFFFUL;
9925+
9926+ regs->tpc = addr;
9927+ regs->tnpc = addr+4;
9928+ return 2;
9929+ }
9930+ } while (0);
9931+
9932+ do { /* PaX: patched PLT emulation #2 */
9933+ unsigned int ba;
9934+
9935+ err = get_user(ba, (unsigned int *)regs->tpc);
9936+
9937+ if (err)
9938+ break;
9939+
9940+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9941+ unsigned long addr;
9942+
9943+ if ((ba & 0xFFC00000U) == 0x30800000U)
9944+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9945+ else
9946+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9947+
9948+ if (test_thread_flag(TIF_32BIT))
9949+ addr &= 0xFFFFFFFFUL;
9950+
9951+ regs->tpc = addr;
9952+ regs->tnpc = addr+4;
9953+ return 2;
9954+ }
9955+ } while (0);
9956+
9957+ do { /* PaX: patched PLT emulation #3 */
9958+ unsigned int sethi, bajmpl, nop;
9959+
9960+ err = get_user(sethi, (unsigned int *)regs->tpc);
9961+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9962+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9963+
9964+ if (err)
9965+ break;
9966+
9967+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9968+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9969+ nop == 0x01000000U)
9970+ {
9971+ unsigned long addr;
9972+
9973+ addr = (sethi & 0x003FFFFFU) << 10;
9974+ regs->u_regs[UREG_G1] = addr;
9975+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9976+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9977+ else
9978+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9979+
9980+ if (test_thread_flag(TIF_32BIT))
9981+ addr &= 0xFFFFFFFFUL;
9982+
9983+ regs->tpc = addr;
9984+ regs->tnpc = addr+4;
9985+ return 2;
9986+ }
9987+ } while (0);
9988+
9989+ do { /* PaX: patched PLT emulation #4 */
9990+ unsigned int sethi, mov1, call, mov2;
9991+
9992+ err = get_user(sethi, (unsigned int *)regs->tpc);
9993+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9994+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9995+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9996+
9997+ if (err)
9998+ break;
9999+
10000+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10001+ mov1 == 0x8210000FU &&
10002+ (call & 0xC0000000U) == 0x40000000U &&
10003+ mov2 == 0x9E100001U)
10004+ {
10005+ unsigned long addr;
10006+
10007+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10008+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10009+
10010+ if (test_thread_flag(TIF_32BIT))
10011+ addr &= 0xFFFFFFFFUL;
10012+
10013+ regs->tpc = addr;
10014+ regs->tnpc = addr+4;
10015+ return 2;
10016+ }
10017+ } while (0);
10018+
10019+ do { /* PaX: patched PLT emulation #5 */
10020+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
10021+
10022+ err = get_user(sethi, (unsigned int *)regs->tpc);
10023+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10024+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10025+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
10026+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
10027+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
10028+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
10029+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
10030+
10031+ if (err)
10032+ break;
10033+
10034+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10035+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10036+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10037+ (or1 & 0xFFFFE000U) == 0x82106000U &&
10038+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10039+ sllx == 0x83287020U &&
10040+ jmpl == 0x81C04005U &&
10041+ nop == 0x01000000U)
10042+ {
10043+ unsigned long addr;
10044+
10045+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10046+ regs->u_regs[UREG_G1] <<= 32;
10047+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10048+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10049+ regs->tpc = addr;
10050+ regs->tnpc = addr+4;
10051+ return 2;
10052+ }
10053+ } while (0);
10054+
10055+ do { /* PaX: patched PLT emulation #6 */
10056+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
10057+
10058+ err = get_user(sethi, (unsigned int *)regs->tpc);
10059+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10060+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10061+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
10062+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
10063+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
10064+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
10065+
10066+ if (err)
10067+ break;
10068+
10069+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10070+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
10071+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10072+ sllx == 0x83287020U &&
10073+ (or & 0xFFFFE000U) == 0x8A116000U &&
10074+ jmpl == 0x81C04005U &&
10075+ nop == 0x01000000U)
10076+ {
10077+ unsigned long addr;
10078+
10079+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
10080+ regs->u_regs[UREG_G1] <<= 32;
10081+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
10082+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10083+ regs->tpc = addr;
10084+ regs->tnpc = addr+4;
10085+ return 2;
10086+ }
10087+ } while (0);
10088+
10089+ do { /* PaX: unpatched PLT emulation step 1 */
10090+ unsigned int sethi, ba, nop;
10091+
10092+ err = get_user(sethi, (unsigned int *)regs->tpc);
10093+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10094+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10095+
10096+ if (err)
10097+ break;
10098+
10099+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10100+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10101+ nop == 0x01000000U)
10102+ {
10103+ unsigned long addr;
10104+ unsigned int save, call;
10105+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
10106+
10107+ if ((ba & 0xFFC00000U) == 0x30800000U)
10108+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10109+ else
10110+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10111+
10112+ if (test_thread_flag(TIF_32BIT))
10113+ addr &= 0xFFFFFFFFUL;
10114+
10115+ err = get_user(save, (unsigned int *)addr);
10116+ err |= get_user(call, (unsigned int *)(addr+4));
10117+ err |= get_user(nop, (unsigned int *)(addr+8));
10118+ if (err)
10119+ break;
10120+
10121+#ifdef CONFIG_PAX_DLRESOLVE
10122+ if (save == 0x9DE3BFA8U &&
10123+ (call & 0xC0000000U) == 0x40000000U &&
10124+ nop == 0x01000000U)
10125+ {
10126+ struct vm_area_struct *vma;
10127+ unsigned long call_dl_resolve;
10128+
10129+ down_read(&current->mm->mmap_sem);
10130+ call_dl_resolve = current->mm->call_dl_resolve;
10131+ up_read(&current->mm->mmap_sem);
10132+ if (likely(call_dl_resolve))
10133+ goto emulate;
10134+
10135+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10136+
10137+ down_write(&current->mm->mmap_sem);
10138+ if (current->mm->call_dl_resolve) {
10139+ call_dl_resolve = current->mm->call_dl_resolve;
10140+ up_write(&current->mm->mmap_sem);
10141+ if (vma)
10142+ kmem_cache_free(vm_area_cachep, vma);
10143+ goto emulate;
10144+ }
10145+
10146+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10147+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10148+ up_write(&current->mm->mmap_sem);
10149+ if (vma)
10150+ kmem_cache_free(vm_area_cachep, vma);
10151+ return 1;
10152+ }
10153+
10154+ if (pax_insert_vma(vma, call_dl_resolve)) {
10155+ up_write(&current->mm->mmap_sem);
10156+ kmem_cache_free(vm_area_cachep, vma);
10157+ return 1;
10158+ }
10159+
10160+ current->mm->call_dl_resolve = call_dl_resolve;
10161+ up_write(&current->mm->mmap_sem);
10162+
10163+emulate:
10164+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10165+ regs->tpc = call_dl_resolve;
10166+ regs->tnpc = addr+4;
10167+ return 3;
10168+ }
10169+#endif
10170+
10171+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10172+ if ((save & 0xFFC00000U) == 0x05000000U &&
10173+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10174+ nop == 0x01000000U)
10175+ {
10176+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10177+ regs->u_regs[UREG_G2] = addr + 4;
10178+ addr = (save & 0x003FFFFFU) << 10;
10179+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10180+
10181+ if (test_thread_flag(TIF_32BIT))
10182+ addr &= 0xFFFFFFFFUL;
10183+
10184+ regs->tpc = addr;
10185+ regs->tnpc = addr+4;
10186+ return 3;
10187+ }
10188+
10189+ /* PaX: 64-bit PLT stub */
10190+ err = get_user(sethi1, (unsigned int *)addr);
10191+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10192+ err |= get_user(or1, (unsigned int *)(addr+8));
10193+ err |= get_user(or2, (unsigned int *)(addr+12));
10194+ err |= get_user(sllx, (unsigned int *)(addr+16));
10195+ err |= get_user(add, (unsigned int *)(addr+20));
10196+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10197+ err |= get_user(nop, (unsigned int *)(addr+28));
10198+ if (err)
10199+ break;
10200+
10201+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10202+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10203+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10204+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10205+ sllx == 0x89293020U &&
10206+ add == 0x8A010005U &&
10207+ jmpl == 0x89C14000U &&
10208+ nop == 0x01000000U)
10209+ {
10210+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10211+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10212+ regs->u_regs[UREG_G4] <<= 32;
10213+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10214+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10215+ regs->u_regs[UREG_G4] = addr + 24;
10216+ addr = regs->u_regs[UREG_G5];
10217+ regs->tpc = addr;
10218+ regs->tnpc = addr+4;
10219+ return 3;
10220+ }
10221+ }
10222+ } while (0);
10223+
10224+#ifdef CONFIG_PAX_DLRESOLVE
10225+ do { /* PaX: unpatched PLT emulation step 2 */
10226+ unsigned int save, call, nop;
10227+
10228+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10229+ err |= get_user(call, (unsigned int *)regs->tpc);
10230+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10231+ if (err)
10232+ break;
10233+
10234+ if (save == 0x9DE3BFA8U &&
10235+ (call & 0xC0000000U) == 0x40000000U &&
10236+ nop == 0x01000000U)
10237+ {
10238+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10239+
10240+ if (test_thread_flag(TIF_32BIT))
10241+ dl_resolve &= 0xFFFFFFFFUL;
10242+
10243+ regs->u_regs[UREG_RETPC] = regs->tpc;
10244+ regs->tpc = dl_resolve;
10245+ regs->tnpc = dl_resolve+4;
10246+ return 3;
10247+ }
10248+ } while (0);
10249+#endif
10250+
10251+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10252+ unsigned int sethi, ba, nop;
10253+
10254+ err = get_user(sethi, (unsigned int *)regs->tpc);
10255+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10256+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10257+
10258+ if (err)
10259+ break;
10260+
10261+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10262+ (ba & 0xFFF00000U) == 0x30600000U &&
10263+ nop == 0x01000000U)
10264+ {
10265+ unsigned long addr;
10266+
10267+ addr = (sethi & 0x003FFFFFU) << 10;
10268+ regs->u_regs[UREG_G1] = addr;
10269+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10270+
10271+ if (test_thread_flag(TIF_32BIT))
10272+ addr &= 0xFFFFFFFFUL;
10273+
10274+ regs->tpc = addr;
10275+ regs->tnpc = addr+4;
10276+ return 2;
10277+ }
10278+ } while (0);
10279+
10280+#endif
10281+
10282+ return 1;
10283+}
10284+
10285+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10286+{
10287+ unsigned long i;
10288+
10289+ printk(KERN_ERR "PAX: bytes at PC: ");
10290+ for (i = 0; i < 8; i++) {
10291+ unsigned int c;
10292+ if (get_user(c, (unsigned int *)pc+i))
10293+ printk(KERN_CONT "???????? ");
10294+ else
10295+ printk(KERN_CONT "%08x ", c);
10296+ }
10297+ printk("\n");
10298+}
10299+#endif
10300+
10301 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
10302 {
10303 struct mm_struct *mm = current->mm;
10304@@ -341,6 +804,29 @@ retry:
10305 if (!vma)
10306 goto bad_area;
10307
10308+#ifdef CONFIG_PAX_PAGEEXEC
10309+ /* PaX: detect ITLB misses on non-exec pages */
10310+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
10311+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
10312+ {
10313+ if (address != regs->tpc)
10314+ goto good_area;
10315+
10316+ up_read(&mm->mmap_sem);
10317+ switch (pax_handle_fetch_fault(regs)) {
10318+
10319+#ifdef CONFIG_PAX_EMUPLT
10320+ case 2:
10321+ case 3:
10322+ return;
10323+#endif
10324+
10325+ }
10326+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
10327+ do_group_exit(SIGKILL);
10328+ }
10329+#endif
10330+
10331 /* Pure DTLB misses do not tell us whether the fault causing
10332 * load/store/atomic was a write or not, it only says that there
10333 * was no match. So in such a case we (carefully) read the
10334diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
10335index d2b5944..d878f3c 100644
10336--- a/arch/sparc/mm/hugetlbpage.c
10337+++ b/arch/sparc/mm/hugetlbpage.c
10338@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10339 unsigned long addr,
10340 unsigned long len,
10341 unsigned long pgoff,
10342- unsigned long flags)
10343+ unsigned long flags,
10344+ unsigned long offset)
10345 {
10346 unsigned long task_size = TASK_SIZE;
10347 struct vm_unmapped_area_info info;
10348@@ -38,15 +39,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10349
10350 info.flags = 0;
10351 info.length = len;
10352- info.low_limit = TASK_UNMAPPED_BASE;
10353+ info.low_limit = mm->mmap_base;
10354 info.high_limit = min(task_size, VA_EXCLUDE_START);
10355 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10356 info.align_offset = 0;
10357+ info.threadstack_offset = offset;
10358 addr = vm_unmapped_area(&info);
10359
10360 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10361 VM_BUG_ON(addr != -ENOMEM);
10362 info.low_limit = VA_EXCLUDE_END;
10363+
10364+#ifdef CONFIG_PAX_RANDMMAP
10365+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10366+ info.low_limit += mm->delta_mmap;
10367+#endif
10368+
10369 info.high_limit = task_size;
10370 addr = vm_unmapped_area(&info);
10371 }
10372@@ -58,7 +66,8 @@ static unsigned long
10373 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10374 const unsigned long len,
10375 const unsigned long pgoff,
10376- const unsigned long flags)
10377+ const unsigned long flags,
10378+ const unsigned long offset)
10379 {
10380 struct mm_struct *mm = current->mm;
10381 unsigned long addr = addr0;
10382@@ -73,6 +82,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10383 info.high_limit = mm->mmap_base;
10384 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10385 info.align_offset = 0;
10386+ info.threadstack_offset = offset;
10387 addr = vm_unmapped_area(&info);
10388
10389 /*
10390@@ -85,6 +95,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10391 VM_BUG_ON(addr != -ENOMEM);
10392 info.flags = 0;
10393 info.low_limit = TASK_UNMAPPED_BASE;
10394+
10395+#ifdef CONFIG_PAX_RANDMMAP
10396+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10397+ info.low_limit += mm->delta_mmap;
10398+#endif
10399+
10400 info.high_limit = STACK_TOP32;
10401 addr = vm_unmapped_area(&info);
10402 }
10403@@ -99,6 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10404 struct mm_struct *mm = current->mm;
10405 struct vm_area_struct *vma;
10406 unsigned long task_size = TASK_SIZE;
10407+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
10408
10409 if (test_thread_flag(TIF_32BIT))
10410 task_size = STACK_TOP32;
10411@@ -114,19 +131,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10412 return addr;
10413 }
10414
10415+#ifdef CONFIG_PAX_RANDMMAP
10416+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10417+#endif
10418+
10419 if (addr) {
10420 addr = ALIGN(addr, HPAGE_SIZE);
10421 vma = find_vma(mm, addr);
10422- if (task_size - len >= addr &&
10423- (!vma || addr + len <= vma->vm_start))
10424+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10425 return addr;
10426 }
10427 if (mm->get_unmapped_area == arch_get_unmapped_area)
10428 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
10429- pgoff, flags);
10430+ pgoff, flags, offset);
10431 else
10432 return hugetlb_get_unmapped_area_topdown(file, addr, len,
10433- pgoff, flags);
10434+ pgoff, flags, offset);
10435 }
10436
10437 pte_t *huge_pte_alloc(struct mm_struct *mm,
10438diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10439index f4500c6..889656c 100644
10440--- a/arch/tile/include/asm/atomic_64.h
10441+++ b/arch/tile/include/asm/atomic_64.h
10442@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10443
10444 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10445
10446+#define atomic64_read_unchecked(v) atomic64_read(v)
10447+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10448+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10449+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10450+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10451+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10452+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10453+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10454+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10455+
10456 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10457 #define smp_mb__before_atomic_dec() smp_mb()
10458 #define smp_mb__after_atomic_dec() smp_mb()
10459diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10460index a9a5299..0fce79e 100644
10461--- a/arch/tile/include/asm/cache.h
10462+++ b/arch/tile/include/asm/cache.h
10463@@ -15,11 +15,12 @@
10464 #ifndef _ASM_TILE_CACHE_H
10465 #define _ASM_TILE_CACHE_H
10466
10467+#include <linux/const.h>
10468 #include <arch/chip.h>
10469
10470 /* bytes per L1 data cache line */
10471 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10472-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10473+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10474
10475 /* bytes per L2 cache line */
10476 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10477diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10478index 8a082bc..7a6bf87 100644
10479--- a/arch/tile/include/asm/uaccess.h
10480+++ b/arch/tile/include/asm/uaccess.h
10481@@ -408,9 +408,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10482 const void __user *from,
10483 unsigned long n)
10484 {
10485- int sz = __compiletime_object_size(to);
10486+ size_t sz = __compiletime_object_size(to);
10487
10488- if (likely(sz == -1 || sz >= n))
10489+ if (likely(sz == (size_t)-1 || sz >= n))
10490 n = _copy_from_user(to, from, n);
10491 else
10492 copy_from_user_overflow();
10493diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
10494index 650ccff..45fe2d6 100644
10495--- a/arch/tile/mm/hugetlbpage.c
10496+++ b/arch/tile/mm/hugetlbpage.c
10497@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
10498 info.high_limit = TASK_SIZE;
10499 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10500 info.align_offset = 0;
10501+ info.threadstack_offset = 0;
10502 return vm_unmapped_area(&info);
10503 }
10504
10505@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
10506 info.high_limit = current->mm->mmap_base;
10507 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10508 info.align_offset = 0;
10509+ info.threadstack_offset = 0;
10510 addr = vm_unmapped_area(&info);
10511
10512 /*
10513diff --git a/arch/um/Makefile b/arch/um/Makefile
10514index 133f7de..1d6f2f1 100644
10515--- a/arch/um/Makefile
10516+++ b/arch/um/Makefile
10517@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10518 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10519 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10520
10521+ifdef CONSTIFY_PLUGIN
10522+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10523+endif
10524+
10525 #This will adjust *FLAGS accordingly to the platform.
10526 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10527
10528diff --git a/arch/um/defconfig b/arch/um/defconfig
10529index 08107a7..ab22afe 100644
10530--- a/arch/um/defconfig
10531+++ b/arch/um/defconfig
10532@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
10533 CONFIG_X86_L1_CACHE_SHIFT=5
10534 CONFIG_X86_XADD=y
10535 CONFIG_X86_PPRO_FENCE=y
10536-CONFIG_X86_WP_WORKS_OK=y
10537 CONFIG_X86_INVLPG=y
10538 CONFIG_X86_BSWAP=y
10539 CONFIG_X86_POPAD_OK=y
10540diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10541index 19e1bdd..3665b77 100644
10542--- a/arch/um/include/asm/cache.h
10543+++ b/arch/um/include/asm/cache.h
10544@@ -1,6 +1,7 @@
10545 #ifndef __UM_CACHE_H
10546 #define __UM_CACHE_H
10547
10548+#include <linux/const.h>
10549
10550 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10551 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10552@@ -12,6 +13,6 @@
10553 # define L1_CACHE_SHIFT 5
10554 #endif
10555
10556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10558
10559 #endif
10560diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10561index 2e0a6b1..a64d0f5 100644
10562--- a/arch/um/include/asm/kmap_types.h
10563+++ b/arch/um/include/asm/kmap_types.h
10564@@ -8,6 +8,6 @@
10565
10566 /* No more #include "asm/arch/kmap_types.h" ! */
10567
10568-#define KM_TYPE_NR 14
10569+#define KM_TYPE_NR 15
10570
10571 #endif
10572diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10573index 5ff53d9..5850cdf 100644
10574--- a/arch/um/include/asm/page.h
10575+++ b/arch/um/include/asm/page.h
10576@@ -14,6 +14,9 @@
10577 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10578 #define PAGE_MASK (~(PAGE_SIZE-1))
10579
10580+#define ktla_ktva(addr) (addr)
10581+#define ktva_ktla(addr) (addr)
10582+
10583 #ifndef __ASSEMBLY__
10584
10585 struct page;
10586diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10587index 0032f92..cd151e0 100644
10588--- a/arch/um/include/asm/pgtable-3level.h
10589+++ b/arch/um/include/asm/pgtable-3level.h
10590@@ -58,6 +58,7 @@
10591 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10592 #define pud_populate(mm, pud, pmd) \
10593 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10594+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10595
10596 #ifdef CONFIG_64BIT
10597 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10598diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
10599index 4febacd..29b0301 100644
10600--- a/arch/um/include/asm/tlb.h
10601+++ b/arch/um/include/asm/tlb.h
10602@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
10603 }
10604
10605 static inline void
10606-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
10607+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
10608 {
10609 tlb->mm = mm;
10610- tlb->fullmm = full_mm_flush;
10611+ tlb->start = start;
10612+ tlb->end = end;
10613+ tlb->fullmm = !(start | (end+1));
10614
10615 init_tlb_gather(tlb);
10616 }
10617diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10618index bbcef52..6a2a483 100644
10619--- a/arch/um/kernel/process.c
10620+++ b/arch/um/kernel/process.c
10621@@ -367,22 +367,6 @@ int singlestepping(void * t)
10622 return 2;
10623 }
10624
10625-/*
10626- * Only x86 and x86_64 have an arch_align_stack().
10627- * All other arches have "#define arch_align_stack(x) (x)"
10628- * in their asm/system.h
10629- * As this is included in UML from asm-um/system-generic.h,
10630- * we can use it to behave as the subarch does.
10631- */
10632-#ifndef arch_align_stack
10633-unsigned long arch_align_stack(unsigned long sp)
10634-{
10635- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10636- sp -= get_random_int() % 8192;
10637- return sp & ~0xf;
10638-}
10639-#endif
10640-
10641 unsigned long get_wchan(struct task_struct *p)
10642 {
10643 unsigned long stack_page, sp, ip;
10644diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10645index ad8f795..2c7eec6 100644
10646--- a/arch/unicore32/include/asm/cache.h
10647+++ b/arch/unicore32/include/asm/cache.h
10648@@ -12,8 +12,10 @@
10649 #ifndef __UNICORE_CACHE_H__
10650 #define __UNICORE_CACHE_H__
10651
10652-#define L1_CACHE_SHIFT (5)
10653-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10654+#include <linux/const.h>
10655+
10656+#define L1_CACHE_SHIFT 5
10657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10658
10659 /*
10660 * Memory returned by kmalloc() may be used for DMA, so we must make
10661diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10662index fe120da..24177f7 100644
10663--- a/arch/x86/Kconfig
10664+++ b/arch/x86/Kconfig
10665@@ -239,7 +239,7 @@ config X86_HT
10666
10667 config X86_32_LAZY_GS
10668 def_bool y
10669- depends on X86_32 && !CC_STACKPROTECTOR
10670+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10671
10672 config ARCH_HWEIGHT_CFLAGS
10673 string
10674@@ -1073,6 +1073,7 @@ config MICROCODE_EARLY
10675
10676 config X86_MSR
10677 tristate "/dev/cpu/*/msr - Model-specific register support"
10678+ depends on !GRKERNSEC_KMEM
10679 ---help---
10680 This device gives privileged processes access to the x86
10681 Model-Specific Registers (MSRs). It is a character device with
10682@@ -1096,7 +1097,7 @@ choice
10683
10684 config NOHIGHMEM
10685 bool "off"
10686- depends on !X86_NUMAQ
10687+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10688 ---help---
10689 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10690 However, the address space of 32-bit x86 processors is only 4
10691@@ -1133,7 +1134,7 @@ config NOHIGHMEM
10692
10693 config HIGHMEM4G
10694 bool "4GB"
10695- depends on !X86_NUMAQ
10696+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10697 ---help---
10698 Select this if you have a 32-bit processor and between 1 and 4
10699 gigabytes of physical RAM.
10700@@ -1186,7 +1187,7 @@ config PAGE_OFFSET
10701 hex
10702 default 0xB0000000 if VMSPLIT_3G_OPT
10703 default 0x80000000 if VMSPLIT_2G
10704- default 0x78000000 if VMSPLIT_2G_OPT
10705+ default 0x70000000 if VMSPLIT_2G_OPT
10706 default 0x40000000 if VMSPLIT_1G
10707 default 0xC0000000
10708 depends on X86_32
10709@@ -1584,6 +1585,7 @@ config SECCOMP
10710
10711 config CC_STACKPROTECTOR
10712 bool "Enable -fstack-protector buffer overflow detection"
10713+ depends on X86_64 || !PAX_MEMORY_UDEREF
10714 ---help---
10715 This option turns on the -fstack-protector GCC feature. This
10716 feature puts, at the beginning of functions, a canary value on
10717@@ -1703,6 +1705,8 @@ config X86_NEED_RELOCS
10718 config PHYSICAL_ALIGN
10719 hex "Alignment value to which kernel should be aligned" if X86_32
10720 default "0x1000000"
10721+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
10722+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
10723 range 0x2000 0x1000000
10724 ---help---
10725 This value puts the alignment restrictions on physical address
10726@@ -1778,9 +1782,10 @@ config DEBUG_HOTPLUG_CPU0
10727 If unsure, say N.
10728
10729 config COMPAT_VDSO
10730- def_bool y
10731+ def_bool n
10732 prompt "Compat VDSO support"
10733 depends on X86_32 || IA32_EMULATION
10734+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10735 ---help---
10736 Map the 32-bit VDSO to the predictable old-style address too.
10737
10738diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10739index c026cca..14657ae 100644
10740--- a/arch/x86/Kconfig.cpu
10741+++ b/arch/x86/Kconfig.cpu
10742@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10743
10744 config X86_F00F_BUG
10745 def_bool y
10746- depends on M586MMX || M586TSC || M586 || M486
10747+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10748
10749 config X86_INVD_BUG
10750 def_bool y
10751@@ -327,7 +327,7 @@ config X86_INVD_BUG
10752
10753 config X86_ALIGNMENT_16
10754 def_bool y
10755- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10756+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10757
10758 config X86_INTEL_USERCOPY
10759 def_bool y
10760@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10761 # generates cmov.
10762 config X86_CMOV
10763 def_bool y
10764- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10765+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10766
10767 config X86_MINIMUM_CPU_FAMILY
10768 int
10769diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10770index c198b7e..63eea60 100644
10771--- a/arch/x86/Kconfig.debug
10772+++ b/arch/x86/Kconfig.debug
10773@@ -84,7 +84,7 @@ config X86_PTDUMP
10774 config DEBUG_RODATA
10775 bool "Write protect kernel read-only data structures"
10776 default y
10777- depends on DEBUG_KERNEL
10778+ depends on DEBUG_KERNEL && BROKEN
10779 ---help---
10780 Mark the kernel read-only data as write-protected in the pagetables,
10781 in order to catch accidental (and incorrect) writes to such const
10782@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10783
10784 config DEBUG_SET_MODULE_RONX
10785 bool "Set loadable kernel module data as NX and text as RO"
10786- depends on MODULES
10787+ depends on MODULES && BROKEN
10788 ---help---
10789 This option helps catch unintended modifications to loadable
10790 kernel module's text and read-only data. It also prevents execution
10791diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10792index 5c47726..8c4fa67 100644
10793--- a/arch/x86/Makefile
10794+++ b/arch/x86/Makefile
10795@@ -54,6 +54,7 @@ else
10796 UTS_MACHINE := x86_64
10797 CHECKFLAGS += -D__x86_64__ -m64
10798
10799+ biarch := $(call cc-option,-m64)
10800 KBUILD_AFLAGS += -m64
10801 KBUILD_CFLAGS += -m64
10802
10803@@ -234,3 +235,12 @@ define archhelp
10804 echo ' FDARGS="..." arguments for the booted kernel'
10805 echo ' FDINITRD=file initrd for the booted kernel'
10806 endef
10807+
10808+define OLD_LD
10809+
10810+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10811+*** Please upgrade your binutils to 2.18 or newer
10812+endef
10813+
10814+archprepare:
10815+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10816diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10817index 379814b..add62ce 100644
10818--- a/arch/x86/boot/Makefile
10819+++ b/arch/x86/boot/Makefile
10820@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10821 $(call cc-option, -fno-stack-protector) \
10822 $(call cc-option, -mpreferred-stack-boundary=2)
10823 KBUILD_CFLAGS += $(call cc-option, -m32)
10824+ifdef CONSTIFY_PLUGIN
10825+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10826+endif
10827 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10828 GCOV_PROFILE := n
10829
10830diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10831index 878e4b9..20537ab 100644
10832--- a/arch/x86/boot/bitops.h
10833+++ b/arch/x86/boot/bitops.h
10834@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10835 u8 v;
10836 const u32 *p = (const u32 *)addr;
10837
10838- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10839+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10840 return v;
10841 }
10842
10843@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10844
10845 static inline void set_bit(int nr, void *addr)
10846 {
10847- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10848+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10849 }
10850
10851 #endif /* BOOT_BITOPS_H */
10852diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10853index 5b75319..331a4ca 100644
10854--- a/arch/x86/boot/boot.h
10855+++ b/arch/x86/boot/boot.h
10856@@ -85,7 +85,7 @@ static inline void io_delay(void)
10857 static inline u16 ds(void)
10858 {
10859 u16 seg;
10860- asm("movw %%ds,%0" : "=rm" (seg));
10861+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10862 return seg;
10863 }
10864
10865@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10866 static inline int memcmp(const void *s1, const void *s2, size_t len)
10867 {
10868 u8 diff;
10869- asm("repe; cmpsb; setnz %0"
10870+ asm volatile("repe; cmpsb; setnz %0"
10871 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10872 return diff;
10873 }
10874diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10875index 5ef205c..342191d 100644
10876--- a/arch/x86/boot/compressed/Makefile
10877+++ b/arch/x86/boot/compressed/Makefile
10878@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10879 KBUILD_CFLAGS += $(cflags-y)
10880 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10881 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10882+ifdef CONSTIFY_PLUGIN
10883+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10884+endif
10885
10886 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10887 GCOV_PROFILE := n
10888diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10889index d606463..b887794 100644
10890--- a/arch/x86/boot/compressed/eboot.c
10891+++ b/arch/x86/boot/compressed/eboot.c
10892@@ -150,7 +150,6 @@ again:
10893 *addr = max_addr;
10894 }
10895
10896-free_pool:
10897 efi_call_phys1(sys_table->boottime->free_pool, map);
10898
10899 fail:
10900@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10901 if (i == map_size / desc_size)
10902 status = EFI_NOT_FOUND;
10903
10904-free_pool:
10905 efi_call_phys1(sys_table->boottime->free_pool, map);
10906 fail:
10907 return status;
10908diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
10909index a53440e..c3dbf1e 100644
10910--- a/arch/x86/boot/compressed/efi_stub_32.S
10911+++ b/arch/x86/boot/compressed/efi_stub_32.S
10912@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
10913 * parameter 2, ..., param n. To make things easy, we save the return
10914 * address of efi_call_phys in a global variable.
10915 */
10916- popl %ecx
10917- movl %ecx, saved_return_addr(%edx)
10918- /* get the function pointer into ECX*/
10919- popl %ecx
10920- movl %ecx, efi_rt_function_ptr(%edx)
10921+ popl saved_return_addr(%edx)
10922+ popl efi_rt_function_ptr(%edx)
10923
10924 /*
10925 * 3. Call the physical function.
10926 */
10927- call *%ecx
10928+ call *efi_rt_function_ptr(%edx)
10929
10930 /*
10931 * 4. Balance the stack. And because EAX contain the return value,
10932@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
10933 1: popl %edx
10934 subl $1b, %edx
10935
10936- movl efi_rt_function_ptr(%edx), %ecx
10937- pushl %ecx
10938+ pushl efi_rt_function_ptr(%edx)
10939
10940 /*
10941 * 10. Push the saved return address onto the stack and return.
10942 */
10943- movl saved_return_addr(%edx), %ecx
10944- pushl %ecx
10945- ret
10946+ jmpl *saved_return_addr(%edx)
10947 ENDPROC(efi_call_phys)
10948 .previous
10949
10950diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10951index 1e3184f..0d11e2e 100644
10952--- a/arch/x86/boot/compressed/head_32.S
10953+++ b/arch/x86/boot/compressed/head_32.S
10954@@ -118,7 +118,7 @@ preferred_addr:
10955 notl %eax
10956 andl %eax, %ebx
10957 #else
10958- movl $LOAD_PHYSICAL_ADDR, %ebx
10959+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10960 #endif
10961
10962 /* Target address to relocate to for decompression */
10963@@ -204,7 +204,7 @@ relocated:
10964 * and where it was actually loaded.
10965 */
10966 movl %ebp, %ebx
10967- subl $LOAD_PHYSICAL_ADDR, %ebx
10968+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10969 jz 2f /* Nothing to be done if loaded at compiled addr. */
10970 /*
10971 * Process relocations.
10972@@ -212,8 +212,7 @@ relocated:
10973
10974 1: subl $4, %edi
10975 movl (%edi), %ecx
10976- testl %ecx, %ecx
10977- jz 2f
10978+ jecxz 2f
10979 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10980 jmp 1b
10981 2:
10982diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10983index 16f24e6..47491a3 100644
10984--- a/arch/x86/boot/compressed/head_64.S
10985+++ b/arch/x86/boot/compressed/head_64.S
10986@@ -97,7 +97,7 @@ ENTRY(startup_32)
10987 notl %eax
10988 andl %eax, %ebx
10989 #else
10990- movl $LOAD_PHYSICAL_ADDR, %ebx
10991+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10992 #endif
10993
10994 /* Target address to relocate to for decompression */
10995@@ -272,7 +272,7 @@ preferred_addr:
10996 notq %rax
10997 andq %rax, %rbp
10998 #else
10999- movq $LOAD_PHYSICAL_ADDR, %rbp
11000+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11001 #endif
11002
11003 /* Target address to relocate to for decompression */
11004@@ -363,8 +363,8 @@ gdt:
11005 .long gdt
11006 .word 0
11007 .quad 0x0000000000000000 /* NULL descriptor */
11008- .quad 0x00af9a000000ffff /* __KERNEL_CS */
11009- .quad 0x00cf92000000ffff /* __KERNEL_DS */
11010+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
11011+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
11012 .quad 0x0080890000000000 /* TS descriptor */
11013 .quad 0x0000000000000000 /* TS continued */
11014 gdt_end:
11015diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11016index 7cb56c6..d382d84 100644
11017--- a/arch/x86/boot/compressed/misc.c
11018+++ b/arch/x86/boot/compressed/misc.c
11019@@ -303,7 +303,7 @@ static void parse_elf(void *output)
11020 case PT_LOAD:
11021 #ifdef CONFIG_RELOCATABLE
11022 dest = output;
11023- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
11024+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
11025 #else
11026 dest = (void *)(phdr->p_paddr);
11027 #endif
11028@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
11029 error("Destination address too large");
11030 #endif
11031 #ifndef CONFIG_RELOCATABLE
11032- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
11033+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
11034 error("Wrong destination address");
11035 #endif
11036
11037diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
11038index 4d3ff03..e4972ff 100644
11039--- a/arch/x86/boot/cpucheck.c
11040+++ b/arch/x86/boot/cpucheck.c
11041@@ -74,7 +74,7 @@ static int has_fpu(void)
11042 u16 fcw = -1, fsw = -1;
11043 u32 cr0;
11044
11045- asm("movl %%cr0,%0" : "=r" (cr0));
11046+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
11047 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
11048 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
11049 asm volatile("movl %0,%%cr0" : : "r" (cr0));
11050@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
11051 {
11052 u32 f0, f1;
11053
11054- asm("pushfl ; "
11055+ asm volatile("pushfl ; "
11056 "pushfl ; "
11057 "popl %0 ; "
11058 "movl %0,%1 ; "
11059@@ -115,7 +115,7 @@ static void get_flags(void)
11060 set_bit(X86_FEATURE_FPU, cpu.flags);
11061
11062 if (has_eflag(X86_EFLAGS_ID)) {
11063- asm("cpuid"
11064+ asm volatile("cpuid"
11065 : "=a" (max_intel_level),
11066 "=b" (cpu_vendor[0]),
11067 "=d" (cpu_vendor[1]),
11068@@ -124,7 +124,7 @@ static void get_flags(void)
11069
11070 if (max_intel_level >= 0x00000001 &&
11071 max_intel_level <= 0x0000ffff) {
11072- asm("cpuid"
11073+ asm volatile("cpuid"
11074 : "=a" (tfms),
11075 "=c" (cpu.flags[4]),
11076 "=d" (cpu.flags[0])
11077@@ -136,7 +136,7 @@ static void get_flags(void)
11078 cpu.model += ((tfms >> 16) & 0xf) << 4;
11079 }
11080
11081- asm("cpuid"
11082+ asm volatile("cpuid"
11083 : "=a" (max_amd_level)
11084 : "a" (0x80000000)
11085 : "ebx", "ecx", "edx");
11086@@ -144,7 +144,7 @@ static void get_flags(void)
11087 if (max_amd_level >= 0x80000001 &&
11088 max_amd_level <= 0x8000ffff) {
11089 u32 eax = 0x80000001;
11090- asm("cpuid"
11091+ asm volatile("cpuid"
11092 : "+a" (eax),
11093 "=c" (cpu.flags[6]),
11094 "=d" (cpu.flags[1])
11095@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11096 u32 ecx = MSR_K7_HWCR;
11097 u32 eax, edx;
11098
11099- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11100+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11101 eax &= ~(1 << 15);
11102- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11103+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11104
11105 get_flags(); /* Make sure it really did something */
11106 err = check_flags();
11107@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11108 u32 ecx = MSR_VIA_FCR;
11109 u32 eax, edx;
11110
11111- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11112+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11113 eax |= (1<<1)|(1<<7);
11114- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11115+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11116
11117 set_bit(X86_FEATURE_CX8, cpu.flags);
11118 err = check_flags();
11119@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
11120 u32 eax, edx;
11121 u32 level = 1;
11122
11123- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11124- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11125- asm("cpuid"
11126+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
11127+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
11128+ asm volatile("cpuid"
11129 : "+a" (level), "=d" (cpu.flags[0])
11130 : : "ecx", "ebx");
11131- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11132+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
11133
11134 err = check_flags();
11135 }
11136diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
11137index 9ec06a1..2c25e79 100644
11138--- a/arch/x86/boot/header.S
11139+++ b/arch/x86/boot/header.S
11140@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
11141 # single linked list of
11142 # struct setup_data
11143
11144-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
11145+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
11146
11147 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
11148+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11149+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
11150+#else
11151 #define VO_INIT_SIZE (VO__end - VO__text)
11152+#endif
11153 #if ZO_INIT_SIZE > VO_INIT_SIZE
11154 #define INIT_SIZE ZO_INIT_SIZE
11155 #else
11156diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
11157index db75d07..8e6d0af 100644
11158--- a/arch/x86/boot/memory.c
11159+++ b/arch/x86/boot/memory.c
11160@@ -19,7 +19,7 @@
11161
11162 static int detect_memory_e820(void)
11163 {
11164- int count = 0;
11165+ unsigned int count = 0;
11166 struct biosregs ireg, oreg;
11167 struct e820entry *desc = boot_params.e820_map;
11168 static struct e820entry buf; /* static so it is zeroed */
11169diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
11170index 11e8c6e..fdbb1ed 100644
11171--- a/arch/x86/boot/video-vesa.c
11172+++ b/arch/x86/boot/video-vesa.c
11173@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
11174
11175 boot_params.screen_info.vesapm_seg = oreg.es;
11176 boot_params.screen_info.vesapm_off = oreg.di;
11177+ boot_params.screen_info.vesapm_size = oreg.cx;
11178 }
11179
11180 /*
11181diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
11182index 43eda28..5ab5fdb 100644
11183--- a/arch/x86/boot/video.c
11184+++ b/arch/x86/boot/video.c
11185@@ -96,7 +96,7 @@ static void store_mode_params(void)
11186 static unsigned int get_entry(void)
11187 {
11188 char entry_buf[4];
11189- int i, len = 0;
11190+ unsigned int i, len = 0;
11191 int key;
11192 unsigned int v;
11193
11194diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
11195index 9105655..5e37f27 100644
11196--- a/arch/x86/crypto/aes-x86_64-asm_64.S
11197+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
11198@@ -8,6 +8,8 @@
11199 * including this sentence is retained in full.
11200 */
11201
11202+#include <asm/alternative-asm.h>
11203+
11204 .extern crypto_ft_tab
11205 .extern crypto_it_tab
11206 .extern crypto_fl_tab
11207@@ -70,6 +72,8 @@
11208 je B192; \
11209 leaq 32(r9),r9;
11210
11211+#define ret pax_force_retaddr 0, 1; ret
11212+
11213 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
11214 movq r1,r2; \
11215 movq r3,r4; \
11216diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
11217index 477e9d7..3ab339f 100644
11218--- a/arch/x86/crypto/aesni-intel_asm.S
11219+++ b/arch/x86/crypto/aesni-intel_asm.S
11220@@ -31,6 +31,7 @@
11221
11222 #include <linux/linkage.h>
11223 #include <asm/inst.h>
11224+#include <asm/alternative-asm.h>
11225
11226 #ifdef __x86_64__
11227 .data
11228@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
11229 pop %r14
11230 pop %r13
11231 pop %r12
11232+ pax_force_retaddr 0, 1
11233 ret
11234 ENDPROC(aesni_gcm_dec)
11235
11236@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
11237 pop %r14
11238 pop %r13
11239 pop %r12
11240+ pax_force_retaddr 0, 1
11241 ret
11242 ENDPROC(aesni_gcm_enc)
11243
11244@@ -1722,6 +1725,7 @@ _key_expansion_256a:
11245 pxor %xmm1, %xmm0
11246 movaps %xmm0, (TKEYP)
11247 add $0x10, TKEYP
11248+ pax_force_retaddr_bts
11249 ret
11250 ENDPROC(_key_expansion_128)
11251 ENDPROC(_key_expansion_256a)
11252@@ -1748,6 +1752,7 @@ _key_expansion_192a:
11253 shufps $0b01001110, %xmm2, %xmm1
11254 movaps %xmm1, 0x10(TKEYP)
11255 add $0x20, TKEYP
11256+ pax_force_retaddr_bts
11257 ret
11258 ENDPROC(_key_expansion_192a)
11259
11260@@ -1768,6 +1773,7 @@ _key_expansion_192b:
11261
11262 movaps %xmm0, (TKEYP)
11263 add $0x10, TKEYP
11264+ pax_force_retaddr_bts
11265 ret
11266 ENDPROC(_key_expansion_192b)
11267
11268@@ -1781,6 +1787,7 @@ _key_expansion_256b:
11269 pxor %xmm1, %xmm2
11270 movaps %xmm2, (TKEYP)
11271 add $0x10, TKEYP
11272+ pax_force_retaddr_bts
11273 ret
11274 ENDPROC(_key_expansion_256b)
11275
11276@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
11277 #ifndef __x86_64__
11278 popl KEYP
11279 #endif
11280+ pax_force_retaddr 0, 1
11281 ret
11282 ENDPROC(aesni_set_key)
11283
11284@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
11285 popl KLEN
11286 popl KEYP
11287 #endif
11288+ pax_force_retaddr 0, 1
11289 ret
11290 ENDPROC(aesni_enc)
11291
11292@@ -1974,6 +1983,7 @@ _aesni_enc1:
11293 AESENC KEY STATE
11294 movaps 0x70(TKEYP), KEY
11295 AESENCLAST KEY STATE
11296+ pax_force_retaddr_bts
11297 ret
11298 ENDPROC(_aesni_enc1)
11299
11300@@ -2083,6 +2093,7 @@ _aesni_enc4:
11301 AESENCLAST KEY STATE2
11302 AESENCLAST KEY STATE3
11303 AESENCLAST KEY STATE4
11304+ pax_force_retaddr_bts
11305 ret
11306 ENDPROC(_aesni_enc4)
11307
11308@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
11309 popl KLEN
11310 popl KEYP
11311 #endif
11312+ pax_force_retaddr 0, 1
11313 ret
11314 ENDPROC(aesni_dec)
11315
11316@@ -2164,6 +2176,7 @@ _aesni_dec1:
11317 AESDEC KEY STATE
11318 movaps 0x70(TKEYP), KEY
11319 AESDECLAST KEY STATE
11320+ pax_force_retaddr_bts
11321 ret
11322 ENDPROC(_aesni_dec1)
11323
11324@@ -2273,6 +2286,7 @@ _aesni_dec4:
11325 AESDECLAST KEY STATE2
11326 AESDECLAST KEY STATE3
11327 AESDECLAST KEY STATE4
11328+ pax_force_retaddr_bts
11329 ret
11330 ENDPROC(_aesni_dec4)
11331
11332@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
11333 popl KEYP
11334 popl LEN
11335 #endif
11336+ pax_force_retaddr 0, 1
11337 ret
11338 ENDPROC(aesni_ecb_enc)
11339
11340@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
11341 popl KEYP
11342 popl LEN
11343 #endif
11344+ pax_force_retaddr 0, 1
11345 ret
11346 ENDPROC(aesni_ecb_dec)
11347
11348@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
11349 popl LEN
11350 popl IVP
11351 #endif
11352+ pax_force_retaddr 0, 1
11353 ret
11354 ENDPROC(aesni_cbc_enc)
11355
11356@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
11357 popl LEN
11358 popl IVP
11359 #endif
11360+ pax_force_retaddr 0, 1
11361 ret
11362 ENDPROC(aesni_cbc_dec)
11363
11364@@ -2550,6 +2568,7 @@ _aesni_inc_init:
11365 mov $1, TCTR_LOW
11366 MOVQ_R64_XMM TCTR_LOW INC
11367 MOVQ_R64_XMM CTR TCTR_LOW
11368+ pax_force_retaddr_bts
11369 ret
11370 ENDPROC(_aesni_inc_init)
11371
11372@@ -2579,6 +2598,7 @@ _aesni_inc:
11373 .Linc_low:
11374 movaps CTR, IV
11375 PSHUFB_XMM BSWAP_MASK IV
11376+ pax_force_retaddr_bts
11377 ret
11378 ENDPROC(_aesni_inc)
11379
11380@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
11381 .Lctr_enc_ret:
11382 movups IV, (IVP)
11383 .Lctr_enc_just_ret:
11384+ pax_force_retaddr 0, 1
11385 ret
11386 ENDPROC(aesni_ctr_enc)
11387
11388@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
11389 pxor INC, STATE4
11390 movdqu STATE4, 0x70(OUTP)
11391
11392+ pax_force_retaddr 0, 1
11393 ret
11394 ENDPROC(aesni_xts_crypt8)
11395
11396diff --git a/arch/x86/crypto/blowfish-avx2-asm_64.S b/arch/x86/crypto/blowfish-avx2-asm_64.S
11397index 784452e..46982c7 100644
11398--- a/arch/x86/crypto/blowfish-avx2-asm_64.S
11399+++ b/arch/x86/crypto/blowfish-avx2-asm_64.S
11400@@ -221,6 +221,7 @@ __blowfish_enc_blk32:
11401
11402 write_block(RXl, RXr);
11403
11404+ pax_force_retaddr 0, 1
11405 ret;
11406 ENDPROC(__blowfish_enc_blk32)
11407
11408@@ -250,6 +251,7 @@ __blowfish_dec_blk32:
11409
11410 write_block(RXl, RXr);
11411
11412+ pax_force_retaddr 0, 1
11413 ret;
11414 ENDPROC(__blowfish_dec_blk32)
11415
11416@@ -284,6 +286,7 @@ ENTRY(blowfish_ecb_enc_32way)
11417
11418 vzeroupper;
11419
11420+ pax_force_retaddr 0, 1
11421 ret;
11422 ENDPROC(blowfish_ecb_enc_32way)
11423
11424@@ -318,6 +321,7 @@ ENTRY(blowfish_ecb_dec_32way)
11425
11426 vzeroupper;
11427
11428+ pax_force_retaddr 0, 1
11429 ret;
11430 ENDPROC(blowfish_ecb_dec_32way)
11431
11432@@ -365,6 +369,7 @@ ENTRY(blowfish_cbc_dec_32way)
11433
11434 vzeroupper;
11435
11436+ pax_force_retaddr 0, 1
11437 ret;
11438 ENDPROC(blowfish_cbc_dec_32way)
11439
11440@@ -445,5 +450,6 @@ ENTRY(blowfish_ctr_32way)
11441
11442 vzeroupper;
11443
11444+ pax_force_retaddr 0, 1
11445 ret;
11446 ENDPROC(blowfish_ctr_32way)
11447diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11448index 246c670..4d1ed00 100644
11449--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
11450+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11451@@ -21,6 +21,7 @@
11452 */
11453
11454 #include <linux/linkage.h>
11455+#include <asm/alternative-asm.h>
11456
11457 .file "blowfish-x86_64-asm.S"
11458 .text
11459@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
11460 jnz .L__enc_xor;
11461
11462 write_block();
11463+ pax_force_retaddr 0, 1
11464 ret;
11465 .L__enc_xor:
11466 xor_block();
11467+ pax_force_retaddr 0, 1
11468 ret;
11469 ENDPROC(__blowfish_enc_blk)
11470
11471@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
11472
11473 movq %r11, %rbp;
11474
11475+ pax_force_retaddr 0, 1
11476 ret;
11477 ENDPROC(blowfish_dec_blk)
11478
11479@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
11480
11481 popq %rbx;
11482 popq %rbp;
11483+ pax_force_retaddr 0, 1
11484 ret;
11485
11486 .L__enc_xor4:
11487@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
11488
11489 popq %rbx;
11490 popq %rbp;
11491+ pax_force_retaddr 0, 1
11492 ret;
11493 ENDPROC(__blowfish_enc_blk_4way)
11494
11495@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
11496 popq %rbx;
11497 popq %rbp;
11498
11499+ pax_force_retaddr 0, 1
11500 ret;
11501 ENDPROC(blowfish_dec_blk_4way)
11502diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
11503index ce71f92..2dd5b1e 100644
11504--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
11505+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
11506@@ -16,6 +16,7 @@
11507 */
11508
11509 #include <linux/linkage.h>
11510+#include <asm/alternative-asm.h>
11511
11512 #define CAMELLIA_TABLE_BYTE_LEN 272
11513
11514@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
11515 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
11516 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
11517 %rcx, (%r9));
11518+ pax_force_retaddr_bts
11519 ret;
11520 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
11521
11522@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
11523 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
11524 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
11525 %rax, (%r9));
11526+ pax_force_retaddr_bts
11527 ret;
11528 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
11529
11530@@ -780,6 +783,7 @@ __camellia_enc_blk16:
11531 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
11532 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
11533
11534+ pax_force_retaddr_bts
11535 ret;
11536
11537 .align 8
11538@@ -865,6 +869,7 @@ __camellia_dec_blk16:
11539 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
11540 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
11541
11542+ pax_force_retaddr_bts
11543 ret;
11544
11545 .align 8
11546@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
11547 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11548 %xmm8, %rsi);
11549
11550+ pax_force_retaddr 0, 1
11551 ret;
11552 ENDPROC(camellia_ecb_enc_16way)
11553
11554@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
11555 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11556 %xmm8, %rsi);
11557
11558+ pax_force_retaddr 0, 1
11559 ret;
11560 ENDPROC(camellia_ecb_dec_16way)
11561
11562@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
11563 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11564 %xmm8, %rsi);
11565
11566+ pax_force_retaddr 0, 1
11567 ret;
11568 ENDPROC(camellia_cbc_dec_16way)
11569
11570@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
11571 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11572 %xmm8, %rsi);
11573
11574+ pax_force_retaddr 0, 1
11575 ret;
11576 ENDPROC(camellia_ctr_16way)
11577
11578@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
11579 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
11580 %xmm8, %rsi);
11581
11582+ pax_force_retaddr 0, 1
11583 ret;
11584 ENDPROC(camellia_xts_crypt_16way)
11585
11586diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
11587index 91a1878..bcf340a 100644
11588--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
11589+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
11590@@ -11,6 +11,7 @@
11591 */
11592
11593 #include <linux/linkage.h>
11594+#include <asm/alternative-asm.h>
11595
11596 #define CAMELLIA_TABLE_BYTE_LEN 272
11597
11598@@ -212,6 +213,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
11599 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
11600 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
11601 %rcx, (%r9));
11602+ pax_force_retaddr_bts
11603 ret;
11604 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
11605
11606@@ -220,6 +222,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
11607 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
11608 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
11609 %rax, (%r9));
11610+ pax_force_retaddr_bts
11611 ret;
11612 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
11613
11614@@ -802,6 +805,7 @@ __camellia_enc_blk32:
11615 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
11616 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
11617
11618+ pax_force_retaddr_bts
11619 ret;
11620
11621 .align 8
11622@@ -887,6 +891,7 @@ __camellia_dec_blk32:
11623 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
11624 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
11625
11626+ pax_force_retaddr_bts
11627 ret;
11628
11629 .align 8
11630@@ -930,6 +935,7 @@ ENTRY(camellia_ecb_enc_32way)
11631
11632 vzeroupper;
11633
11634+ pax_force_retaddr 0, 1
11635 ret;
11636 ENDPROC(camellia_ecb_enc_32way)
11637
11638@@ -962,6 +968,7 @@ ENTRY(camellia_ecb_dec_32way)
11639
11640 vzeroupper;
11641
11642+ pax_force_retaddr 0, 1
11643 ret;
11644 ENDPROC(camellia_ecb_dec_32way)
11645
11646@@ -1028,6 +1035,7 @@ ENTRY(camellia_cbc_dec_32way)
11647
11648 vzeroupper;
11649
11650+ pax_force_retaddr 0, 1
11651 ret;
11652 ENDPROC(camellia_cbc_dec_32way)
11653
11654@@ -1166,6 +1174,7 @@ ENTRY(camellia_ctr_32way)
11655
11656 vzeroupper;
11657
11658+ pax_force_retaddr 0, 1
11659 ret;
11660 ENDPROC(camellia_ctr_32way)
11661
11662@@ -1331,6 +1340,7 @@ camellia_xts_crypt_32way:
11663
11664 vzeroupper;
11665
11666+ pax_force_retaddr 0, 1
11667 ret;
11668 ENDPROC(camellia_xts_crypt_32way)
11669
11670diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
11671index 310319c..ce174a4 100644
11672--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
11673+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
11674@@ -21,6 +21,7 @@
11675 */
11676
11677 #include <linux/linkage.h>
11678+#include <asm/alternative-asm.h>
11679
11680 .file "camellia-x86_64-asm_64.S"
11681 .text
11682@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
11683 enc_outunpack(mov, RT1);
11684
11685 movq RRBP, %rbp;
11686+ pax_force_retaddr 0, 1
11687 ret;
11688
11689 .L__enc_xor:
11690 enc_outunpack(xor, RT1);
11691
11692 movq RRBP, %rbp;
11693+ pax_force_retaddr 0, 1
11694 ret;
11695 ENDPROC(__camellia_enc_blk)
11696
11697@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
11698 dec_outunpack();
11699
11700 movq RRBP, %rbp;
11701+ pax_force_retaddr 0, 1
11702 ret;
11703 ENDPROC(camellia_dec_blk)
11704
11705@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
11706
11707 movq RRBP, %rbp;
11708 popq %rbx;
11709+ pax_force_retaddr 0, 1
11710 ret;
11711
11712 .L__enc2_xor:
11713@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
11714
11715 movq RRBP, %rbp;
11716 popq %rbx;
11717+ pax_force_retaddr 0, 1
11718 ret;
11719 ENDPROC(__camellia_enc_blk_2way)
11720
11721@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
11722
11723 movq RRBP, %rbp;
11724 movq RXOR, %rbx;
11725+ pax_force_retaddr 0, 1
11726 ret;
11727 ENDPROC(camellia_dec_blk_2way)
11728diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11729index c35fd5d..c1ee236 100644
11730--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11731+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11732@@ -24,6 +24,7 @@
11733 */
11734
11735 #include <linux/linkage.h>
11736+#include <asm/alternative-asm.h>
11737
11738 .file "cast5-avx-x86_64-asm_64.S"
11739
11740@@ -281,6 +282,7 @@ __cast5_enc_blk16:
11741 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11742 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11743
11744+ pax_force_retaddr 0, 1
11745 ret;
11746 ENDPROC(__cast5_enc_blk16)
11747
11748@@ -352,6 +354,7 @@ __cast5_dec_blk16:
11749 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11750 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11751
11752+ pax_force_retaddr 0, 1
11753 ret;
11754
11755 .L__skip_dec:
11756@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
11757 vmovdqu RR4, (6*4*4)(%r11);
11758 vmovdqu RL4, (7*4*4)(%r11);
11759
11760+ pax_force_retaddr
11761 ret;
11762 ENDPROC(cast5_ecb_enc_16way)
11763
11764@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
11765 vmovdqu RR4, (6*4*4)(%r11);
11766 vmovdqu RL4, (7*4*4)(%r11);
11767
11768+ pax_force_retaddr
11769 ret;
11770 ENDPROC(cast5_ecb_dec_16way)
11771
11772@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
11773
11774 popq %r12;
11775
11776+ pax_force_retaddr
11777 ret;
11778 ENDPROC(cast5_cbc_dec_16way)
11779
11780@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
11781
11782 popq %r12;
11783
11784+ pax_force_retaddr
11785 ret;
11786 ENDPROC(cast5_ctr_16way)
11787diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11788index e3531f8..18ded3a 100644
11789--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11790+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11791@@ -24,6 +24,7 @@
11792 */
11793
11794 #include <linux/linkage.h>
11795+#include <asm/alternative-asm.h>
11796 #include "glue_helper-asm-avx.S"
11797
11798 .file "cast6-avx-x86_64-asm_64.S"
11799@@ -295,6 +296,7 @@ __cast6_enc_blk8:
11800 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11801 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11802
11803+ pax_force_retaddr 0, 1
11804 ret;
11805 ENDPROC(__cast6_enc_blk8)
11806
11807@@ -340,6 +342,7 @@ __cast6_dec_blk8:
11808 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11809 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11810
11811+ pax_force_retaddr 0, 1
11812 ret;
11813 ENDPROC(__cast6_dec_blk8)
11814
11815@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
11816
11817 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11818
11819+ pax_force_retaddr
11820 ret;
11821 ENDPROC(cast6_ecb_enc_8way)
11822
11823@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
11824
11825 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11826
11827+ pax_force_retaddr
11828 ret;
11829 ENDPROC(cast6_ecb_dec_8way)
11830
11831@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
11832
11833 popq %r12;
11834
11835+ pax_force_retaddr
11836 ret;
11837 ENDPROC(cast6_cbc_dec_8way)
11838
11839@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
11840
11841 popq %r12;
11842
11843+ pax_force_retaddr
11844 ret;
11845 ENDPROC(cast6_ctr_8way)
11846
11847@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
11848 /* dst <= regs xor IVs(in dst) */
11849 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11850
11851+ pax_force_retaddr
11852 ret;
11853 ENDPROC(cast6_xts_enc_8way)
11854
11855@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
11856 /* dst <= regs xor IVs(in dst) */
11857 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11858
11859+ pax_force_retaddr
11860 ret;
11861 ENDPROC(cast6_xts_dec_8way)
11862diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
11863index dbc4339..3d868c5 100644
11864--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
11865+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
11866@@ -45,6 +45,7 @@
11867
11868 #include <asm/inst.h>
11869 #include <linux/linkage.h>
11870+#include <asm/alternative-asm.h>
11871
11872 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
11873
11874@@ -312,6 +313,7 @@ do_return:
11875 popq %rsi
11876 popq %rdi
11877 popq %rbx
11878+ pax_force_retaddr 0, 1
11879 ret
11880
11881 ################################################################
11882diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
11883index 586f41a..d02851e 100644
11884--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
11885+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
11886@@ -18,6 +18,7 @@
11887
11888 #include <linux/linkage.h>
11889 #include <asm/inst.h>
11890+#include <asm/alternative-asm.h>
11891
11892 .data
11893
11894@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
11895 psrlq $1, T2
11896 pxor T2, T1
11897 pxor T1, DATA
11898+ pax_force_retaddr
11899 ret
11900 ENDPROC(__clmul_gf128mul_ble)
11901
11902@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
11903 call __clmul_gf128mul_ble
11904 PSHUFB_XMM BSWAP DATA
11905 movups DATA, (%rdi)
11906+ pax_force_retaddr
11907 ret
11908 ENDPROC(clmul_ghash_mul)
11909
11910@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
11911 PSHUFB_XMM BSWAP DATA
11912 movups DATA, (%rdi)
11913 .Lupdate_just_ret:
11914+ pax_force_retaddr
11915 ret
11916 ENDPROC(clmul_ghash_update)
11917
11918@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
11919 pand .Lpoly, %xmm1
11920 pxor %xmm1, %xmm0
11921 movups %xmm0, (%rdi)
11922+ pax_force_retaddr
11923 ret
11924 ENDPROC(clmul_ghash_setkey)
11925diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11926index 9279e0b..9270820 100644
11927--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11928+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11929@@ -1,4 +1,5 @@
11930 #include <linux/linkage.h>
11931+#include <asm/alternative-asm.h>
11932
11933 # enter salsa20_encrypt_bytes
11934 ENTRY(salsa20_encrypt_bytes)
11935@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
11936 add %r11,%rsp
11937 mov %rdi,%rax
11938 mov %rsi,%rdx
11939+ pax_force_retaddr 0, 1
11940 ret
11941 # bytesatleast65:
11942 ._bytesatleast65:
11943@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
11944 add %r11,%rsp
11945 mov %rdi,%rax
11946 mov %rsi,%rdx
11947+ pax_force_retaddr
11948 ret
11949 ENDPROC(salsa20_keysetup)
11950
11951@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
11952 add %r11,%rsp
11953 mov %rdi,%rax
11954 mov %rsi,%rdx
11955+ pax_force_retaddr
11956 ret
11957 ENDPROC(salsa20_ivsetup)
11958diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11959index 2f202f4..d9164d6 100644
11960--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11961+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11962@@ -24,6 +24,7 @@
11963 */
11964
11965 #include <linux/linkage.h>
11966+#include <asm/alternative-asm.h>
11967 #include "glue_helper-asm-avx.S"
11968
11969 .file "serpent-avx-x86_64-asm_64.S"
11970@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
11971 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11972 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11973
11974+ pax_force_retaddr
11975 ret;
11976 ENDPROC(__serpent_enc_blk8_avx)
11977
11978@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
11979 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11980 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11981
11982+ pax_force_retaddr
11983 ret;
11984 ENDPROC(__serpent_dec_blk8_avx)
11985
11986@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
11987
11988 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11989
11990+ pax_force_retaddr
11991 ret;
11992 ENDPROC(serpent_ecb_enc_8way_avx)
11993
11994@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
11995
11996 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11997
11998+ pax_force_retaddr
11999 ret;
12000 ENDPROC(serpent_ecb_dec_8way_avx)
12001
12002@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
12003
12004 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12005
12006+ pax_force_retaddr
12007 ret;
12008 ENDPROC(serpent_cbc_dec_8way_avx)
12009
12010@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
12011
12012 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12013
12014+ pax_force_retaddr
12015 ret;
12016 ENDPROC(serpent_ctr_8way_avx)
12017
12018@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
12019 /* dst <= regs xor IVs(in dst) */
12020 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12021
12022+ pax_force_retaddr
12023 ret;
12024 ENDPROC(serpent_xts_enc_8way_avx)
12025
12026@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
12027 /* dst <= regs xor IVs(in dst) */
12028 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
12029
12030+ pax_force_retaddr
12031 ret;
12032 ENDPROC(serpent_xts_dec_8way_avx)
12033diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
12034index b222085..abd483c 100644
12035--- a/arch/x86/crypto/serpent-avx2-asm_64.S
12036+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
12037@@ -15,6 +15,7 @@
12038 */
12039
12040 #include <linux/linkage.h>
12041+#include <asm/alternative-asm.h>
12042 #include "glue_helper-asm-avx2.S"
12043
12044 .file "serpent-avx2-asm_64.S"
12045@@ -610,6 +611,7 @@ __serpent_enc_blk16:
12046 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12047 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12048
12049+ pax_force_retaddr
12050 ret;
12051 ENDPROC(__serpent_enc_blk16)
12052
12053@@ -664,6 +666,7 @@ __serpent_dec_blk16:
12054 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12055 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12056
12057+ pax_force_retaddr
12058 ret;
12059 ENDPROC(__serpent_dec_blk16)
12060
12061@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
12062
12063 vzeroupper;
12064
12065+ pax_force_retaddr
12066 ret;
12067 ENDPROC(serpent_ecb_enc_16way)
12068
12069@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
12070
12071 vzeroupper;
12072
12073+ pax_force_retaddr
12074 ret;
12075 ENDPROC(serpent_ecb_dec_16way)
12076
12077@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
12078
12079 vzeroupper;
12080
12081+ pax_force_retaddr
12082 ret;
12083 ENDPROC(serpent_cbc_dec_16way)
12084
12085@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
12086
12087 vzeroupper;
12088
12089+ pax_force_retaddr
12090 ret;
12091 ENDPROC(serpent_ctr_16way)
12092
12093@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
12094
12095 vzeroupper;
12096
12097+ pax_force_retaddr
12098 ret;
12099 ENDPROC(serpent_xts_enc_16way)
12100
12101@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
12102
12103 vzeroupper;
12104
12105+ pax_force_retaddr
12106 ret;
12107 ENDPROC(serpent_xts_dec_16way)
12108diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12109index acc066c..1559cc4 100644
12110--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12111+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
12112@@ -25,6 +25,7 @@
12113 */
12114
12115 #include <linux/linkage.h>
12116+#include <asm/alternative-asm.h>
12117
12118 .file "serpent-sse2-x86_64-asm_64.S"
12119 .text
12120@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
12121 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12122 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12123
12124+ pax_force_retaddr
12125 ret;
12126
12127 .L__enc_xor8:
12128 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
12129 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
12130
12131+ pax_force_retaddr
12132 ret;
12133 ENDPROC(__serpent_enc_blk_8way)
12134
12135@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
12136 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
12137 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
12138
12139+ pax_force_retaddr
12140 ret;
12141 ENDPROC(serpent_dec_blk_8way)
12142diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
12143index a410950..3356d42 100644
12144--- a/arch/x86/crypto/sha1_ssse3_asm.S
12145+++ b/arch/x86/crypto/sha1_ssse3_asm.S
12146@@ -29,6 +29,7 @@
12147 */
12148
12149 #include <linux/linkage.h>
12150+#include <asm/alternative-asm.h>
12151
12152 #define CTX %rdi // arg1
12153 #define BUF %rsi // arg2
12154@@ -104,6 +105,7 @@
12155 pop %r12
12156 pop %rbp
12157 pop %rbx
12158+ pax_force_retaddr 0, 1
12159 ret
12160
12161 ENDPROC(\name)
12162diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
12163index 642f156..4ab07b9 100644
12164--- a/arch/x86/crypto/sha256-avx-asm.S
12165+++ b/arch/x86/crypto/sha256-avx-asm.S
12166@@ -49,6 +49,7 @@
12167
12168 #ifdef CONFIG_AS_AVX
12169 #include <linux/linkage.h>
12170+#include <asm/alternative-asm.h>
12171
12172 ## assume buffers not aligned
12173 #define VMOVDQ vmovdqu
12174@@ -460,6 +461,7 @@ done_hash:
12175 popq %r13
12176 popq %rbp
12177 popq %rbx
12178+ pax_force_retaddr 0, 1
12179 ret
12180 ENDPROC(sha256_transform_avx)
12181
12182diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
12183index 9e86944..2e7f95a 100644
12184--- a/arch/x86/crypto/sha256-avx2-asm.S
12185+++ b/arch/x86/crypto/sha256-avx2-asm.S
12186@@ -50,6 +50,7 @@
12187
12188 #ifdef CONFIG_AS_AVX2
12189 #include <linux/linkage.h>
12190+#include <asm/alternative-asm.h>
12191
12192 ## assume buffers not aligned
12193 #define VMOVDQ vmovdqu
12194@@ -720,6 +721,7 @@ done_hash:
12195 popq %r12
12196 popq %rbp
12197 popq %rbx
12198+ pax_force_retaddr 0, 1
12199 ret
12200 ENDPROC(sha256_transform_rorx)
12201
12202diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
12203index f833b74..c36ed14 100644
12204--- a/arch/x86/crypto/sha256-ssse3-asm.S
12205+++ b/arch/x86/crypto/sha256-ssse3-asm.S
12206@@ -47,6 +47,7 @@
12207 ########################################################################
12208
12209 #include <linux/linkage.h>
12210+#include <asm/alternative-asm.h>
12211
12212 ## assume buffers not aligned
12213 #define MOVDQ movdqu
12214@@ -471,6 +472,7 @@ done_hash:
12215 popq %rbp
12216 popq %rbx
12217
12218+ pax_force_retaddr 0, 1
12219 ret
12220 ENDPROC(sha256_transform_ssse3)
12221
12222diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
12223index 974dde9..4533d34 100644
12224--- a/arch/x86/crypto/sha512-avx-asm.S
12225+++ b/arch/x86/crypto/sha512-avx-asm.S
12226@@ -49,6 +49,7 @@
12227
12228 #ifdef CONFIG_AS_AVX
12229 #include <linux/linkage.h>
12230+#include <asm/alternative-asm.h>
12231
12232 .text
12233
12234@@ -364,6 +365,7 @@ updateblock:
12235 mov frame_RSPSAVE(%rsp), %rsp
12236
12237 nowork:
12238+ pax_force_retaddr 0, 1
12239 ret
12240 ENDPROC(sha512_transform_avx)
12241
12242diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
12243index 568b961..061ef1d 100644
12244--- a/arch/x86/crypto/sha512-avx2-asm.S
12245+++ b/arch/x86/crypto/sha512-avx2-asm.S
12246@@ -51,6 +51,7 @@
12247
12248 #ifdef CONFIG_AS_AVX2
12249 #include <linux/linkage.h>
12250+#include <asm/alternative-asm.h>
12251
12252 .text
12253
12254@@ -678,6 +679,7 @@ done_hash:
12255
12256 # Restore Stack Pointer
12257 mov frame_RSPSAVE(%rsp), %rsp
12258+ pax_force_retaddr 0, 1
12259 ret
12260 ENDPROC(sha512_transform_rorx)
12261
12262diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
12263index fb56855..e23914f 100644
12264--- a/arch/x86/crypto/sha512-ssse3-asm.S
12265+++ b/arch/x86/crypto/sha512-ssse3-asm.S
12266@@ -48,6 +48,7 @@
12267 ########################################################################
12268
12269 #include <linux/linkage.h>
12270+#include <asm/alternative-asm.h>
12271
12272 .text
12273
12274@@ -363,6 +364,7 @@ updateblock:
12275 mov frame_RSPSAVE(%rsp), %rsp
12276
12277 nowork:
12278+ pax_force_retaddr 0, 1
12279 ret
12280 ENDPROC(sha512_transform_ssse3)
12281
12282diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12283index 0505813..63b1d00 100644
12284--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12285+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
12286@@ -24,6 +24,7 @@
12287 */
12288
12289 #include <linux/linkage.h>
12290+#include <asm/alternative-asm.h>
12291 #include "glue_helper-asm-avx.S"
12292
12293 .file "twofish-avx-x86_64-asm_64.S"
12294@@ -284,6 +285,7 @@ __twofish_enc_blk8:
12295 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
12296 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
12297
12298+ pax_force_retaddr 0, 1
12299 ret;
12300 ENDPROC(__twofish_enc_blk8)
12301
12302@@ -324,6 +326,7 @@ __twofish_dec_blk8:
12303 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
12304 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
12305
12306+ pax_force_retaddr 0, 1
12307 ret;
12308 ENDPROC(__twofish_dec_blk8)
12309
12310@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
12311
12312 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
12313
12314+ pax_force_retaddr 0, 1
12315 ret;
12316 ENDPROC(twofish_ecb_enc_8way)
12317
12318@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
12319
12320 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12321
12322+ pax_force_retaddr 0, 1
12323 ret;
12324 ENDPROC(twofish_ecb_dec_8way)
12325
12326@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
12327
12328 popq %r12;
12329
12330+ pax_force_retaddr 0, 1
12331 ret;
12332 ENDPROC(twofish_cbc_dec_8way)
12333
12334@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
12335
12336 popq %r12;
12337
12338+ pax_force_retaddr 0, 1
12339 ret;
12340 ENDPROC(twofish_ctr_8way)
12341
12342@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
12343 /* dst <= regs xor IVs(in dst) */
12344 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
12345
12346+ pax_force_retaddr 0, 1
12347 ret;
12348 ENDPROC(twofish_xts_enc_8way)
12349
12350@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
12351 /* dst <= regs xor IVs(in dst) */
12352 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
12353
12354+ pax_force_retaddr 0, 1
12355 ret;
12356 ENDPROC(twofish_xts_dec_8way)
12357diff --git a/arch/x86/crypto/twofish-avx2-asm_64.S b/arch/x86/crypto/twofish-avx2-asm_64.S
12358index e1a83b9..33006b9 100644
12359--- a/arch/x86/crypto/twofish-avx2-asm_64.S
12360+++ b/arch/x86/crypto/twofish-avx2-asm_64.S
12361@@ -11,6 +11,7 @@
12362 */
12363
12364 #include <linux/linkage.h>
12365+#include <asm/alternative-asm.h>
12366 #include "glue_helper-asm-avx2.S"
12367
12368 .file "twofish-avx2-asm_64.S"
12369@@ -422,6 +423,7 @@ __twofish_enc_blk16:
12370 outunpack_enc16(RA, RB, RC, RD);
12371 write_blocks16(RA, RB, RC, RD);
12372
12373+ pax_force_retaddr_bts
12374 ret;
12375 ENDPROC(__twofish_enc_blk16)
12376
12377@@ -454,6 +456,7 @@ __twofish_dec_blk16:
12378 outunpack_dec16(RA, RB, RC, RD);
12379 write_blocks16(RA, RB, RC, RD);
12380
12381+ pax_force_retaddr_bts
12382 ret;
12383 ENDPROC(__twofish_dec_blk16)
12384
12385@@ -476,6 +479,7 @@ ENTRY(twofish_ecb_enc_16way)
12386 popq %r12;
12387 vzeroupper;
12388
12389+ pax_force_retaddr 0, 1
12390 ret;
12391 ENDPROC(twofish_ecb_enc_16way)
12392
12393@@ -498,6 +502,7 @@ ENTRY(twofish_ecb_dec_16way)
12394 popq %r12;
12395 vzeroupper;
12396
12397+ pax_force_retaddr 0, 1
12398 ret;
12399 ENDPROC(twofish_ecb_dec_16way)
12400
12401@@ -521,6 +526,7 @@ ENTRY(twofish_cbc_dec_16way)
12402 popq %r12;
12403 vzeroupper;
12404
12405+ pax_force_retaddr 0, 1
12406 ret;
12407 ENDPROC(twofish_cbc_dec_16way)
12408
12409@@ -546,6 +552,7 @@ ENTRY(twofish_ctr_16way)
12410 popq %r12;
12411 vzeroupper;
12412
12413+ pax_force_retaddr 0, 1
12414 ret;
12415 ENDPROC(twofish_ctr_16way)
12416
12417@@ -574,6 +581,7 @@ twofish_xts_crypt_16way:
12418 popq %r12;
12419 vzeroupper;
12420
12421+ pax_force_retaddr 0, 1
12422 ret;
12423 ENDPROC(twofish_xts_crypt_16way)
12424
12425diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12426index 1c3b7ce..b365c5e 100644
12427--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12428+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
12429@@ -21,6 +21,7 @@
12430 */
12431
12432 #include <linux/linkage.h>
12433+#include <asm/alternative-asm.h>
12434
12435 .file "twofish-x86_64-asm-3way.S"
12436 .text
12437@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
12438 popq %r13;
12439 popq %r14;
12440 popq %r15;
12441+ pax_force_retaddr 0, 1
12442 ret;
12443
12444 .L__enc_xor3:
12445@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
12446 popq %r13;
12447 popq %r14;
12448 popq %r15;
12449+ pax_force_retaddr 0, 1
12450 ret;
12451 ENDPROC(__twofish_enc_blk_3way)
12452
12453@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
12454 popq %r13;
12455 popq %r14;
12456 popq %r15;
12457+ pax_force_retaddr 0, 1
12458 ret;
12459 ENDPROC(twofish_dec_blk_3way)
12460diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
12461index a039d21..29e7615 100644
12462--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
12463+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
12464@@ -22,6 +22,7 @@
12465
12466 #include <linux/linkage.h>
12467 #include <asm/asm-offsets.h>
12468+#include <asm/alternative-asm.h>
12469
12470 #define a_offset 0
12471 #define b_offset 4
12472@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
12473
12474 popq R1
12475 movq $1,%rax
12476+ pax_force_retaddr 0, 1
12477 ret
12478 ENDPROC(twofish_enc_blk)
12479
12480@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
12481
12482 popq R1
12483 movq $1,%rax
12484+ pax_force_retaddr 0, 1
12485 ret
12486 ENDPROC(twofish_dec_blk)
12487diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
12488index 52ff81c..98af645 100644
12489--- a/arch/x86/ia32/ia32_aout.c
12490+++ b/arch/x86/ia32/ia32_aout.c
12491@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
12492 unsigned long dump_start, dump_size;
12493 struct user32 dump;
12494
12495+ memset(&dump, 0, sizeof(dump));
12496+
12497 fs = get_fs();
12498 set_fs(KERNEL_DS);
12499 has_dumped = 1;
12500diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
12501index cf1a471..5ba2673 100644
12502--- a/arch/x86/ia32/ia32_signal.c
12503+++ b/arch/x86/ia32/ia32_signal.c
12504@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
12505 sp -= frame_size;
12506 /* Align the stack pointer according to the i386 ABI,
12507 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
12508- sp = ((sp + 4) & -16ul) - 4;
12509+ sp = ((sp - 12) & -16ul) - 4;
12510 return (void __user *) sp;
12511 }
12512
12513@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
12514 * These are actually not used anymore, but left because some
12515 * gdb versions depend on them as a marker.
12516 */
12517- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
12518+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
12519 } put_user_catch(err);
12520
12521 if (err)
12522@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
12523 0xb8,
12524 __NR_ia32_rt_sigreturn,
12525 0x80cd,
12526- 0,
12527+ 0
12528 };
12529
12530 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
12531@@ -459,20 +459,22 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
12532 else
12533 put_user_ex(0, &frame->uc.uc_flags);
12534 put_user_ex(0, &frame->uc.uc_link);
12535- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
12536+ __compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
12537
12538 if (ksig->ka.sa.sa_flags & SA_RESTORER)
12539 restorer = ksig->ka.sa.sa_restorer;
12540+ else if (current->mm->context.vdso)
12541+ /* Return stub is in 32bit vsyscall page */
12542+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
12543 else
12544- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
12545- rt_sigreturn);
12546+ restorer = &frame->retcode;
12547 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
12548
12549 /*
12550 * Not actually used anymore, but left because some gdb
12551 * versions need it.
12552 */
12553- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
12554+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
12555 } put_user_catch(err);
12556
12557 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
12558diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
12559index 474dc1b..9297c58 100644
12560--- a/arch/x86/ia32/ia32entry.S
12561+++ b/arch/x86/ia32/ia32entry.S
12562@@ -15,8 +15,10 @@
12563 #include <asm/irqflags.h>
12564 #include <asm/asm.h>
12565 #include <asm/smap.h>
12566+#include <asm/pgtable.h>
12567 #include <linux/linkage.h>
12568 #include <linux/err.h>
12569+#include <asm/alternative-asm.h>
12570
12571 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12572 #include <linux/elf-em.h>
12573@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
12574 ENDPROC(native_irq_enable_sysexit)
12575 #endif
12576
12577+ .macro pax_enter_kernel_user
12578+ pax_set_fptr_mask
12579+#ifdef CONFIG_PAX_MEMORY_UDEREF
12580+ call pax_enter_kernel_user
12581+#endif
12582+ .endm
12583+
12584+ .macro pax_exit_kernel_user
12585+#ifdef CONFIG_PAX_MEMORY_UDEREF
12586+ call pax_exit_kernel_user
12587+#endif
12588+#ifdef CONFIG_PAX_RANDKSTACK
12589+ pushq %rax
12590+ pushq %r11
12591+ call pax_randomize_kstack
12592+ popq %r11
12593+ popq %rax
12594+#endif
12595+ .endm
12596+
12597+ .macro pax_erase_kstack
12598+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12599+ call pax_erase_kstack
12600+#endif
12601+ .endm
12602+
12603 /*
12604 * 32bit SYSENTER instruction entry.
12605 *
12606@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
12607 CFI_REGISTER rsp,rbp
12608 SWAPGS_UNSAFE_STACK
12609 movq PER_CPU_VAR(kernel_stack), %rsp
12610- addq $(KERNEL_STACK_OFFSET),%rsp
12611- /*
12612- * No need to follow this irqs on/off section: the syscall
12613- * disabled irqs, here we enable it straight after entry:
12614- */
12615- ENABLE_INTERRUPTS(CLBR_NONE)
12616 movl %ebp,%ebp /* zero extension */
12617 pushq_cfi $__USER32_DS
12618 /*CFI_REL_OFFSET ss,0*/
12619@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
12620 CFI_REL_OFFSET rsp,0
12621 pushfq_cfi
12622 /*CFI_REL_OFFSET rflags,0*/
12623- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
12624- CFI_REGISTER rip,r10
12625+ orl $X86_EFLAGS_IF,(%rsp)
12626+ GET_THREAD_INFO(%r11)
12627+ movl TI_sysenter_return(%r11), %r11d
12628+ CFI_REGISTER rip,r11
12629 pushq_cfi $__USER32_CS
12630 /*CFI_REL_OFFSET cs,0*/
12631 movl %eax, %eax
12632- pushq_cfi %r10
12633+ pushq_cfi %r11
12634 CFI_REL_OFFSET rip,0
12635 pushq_cfi %rax
12636 cld
12637 SAVE_ARGS 0,1,0
12638+ pax_enter_kernel_user
12639+
12640+#ifdef CONFIG_PAX_RANDKSTACK
12641+ pax_erase_kstack
12642+#endif
12643+
12644+ /*
12645+ * No need to follow this irqs on/off section: the syscall
12646+ * disabled irqs, here we enable it straight after entry:
12647+ */
12648+ ENABLE_INTERRUPTS(CLBR_NONE)
12649 /* no need to do an access_ok check here because rbp has been
12650 32bit zero extended */
12651+
12652+#ifdef CONFIG_PAX_MEMORY_UDEREF
12653+ addq pax_user_shadow_base,%rbp
12654+ ASM_PAX_OPEN_USERLAND
12655+#endif
12656+
12657 ASM_STAC
12658 1: movl (%rbp),%ebp
12659 _ASM_EXTABLE(1b,ia32_badarg)
12660 ASM_CLAC
12661- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12662- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12663+
12664+#ifdef CONFIG_PAX_MEMORY_UDEREF
12665+ ASM_PAX_CLOSE_USERLAND
12666+#endif
12667+
12668+ GET_THREAD_INFO(%r11)
12669+ orl $TS_COMPAT,TI_status(%r11)
12670+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12671 CFI_REMEMBER_STATE
12672 jnz sysenter_tracesys
12673 cmpq $(IA32_NR_syscalls-1),%rax
12674@@ -162,12 +209,15 @@ sysenter_do_call:
12675 sysenter_dispatch:
12676 call *ia32_sys_call_table(,%rax,8)
12677 movq %rax,RAX-ARGOFFSET(%rsp)
12678+ GET_THREAD_INFO(%r11)
12679 DISABLE_INTERRUPTS(CLBR_NONE)
12680 TRACE_IRQS_OFF
12681- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12682+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
12683 jnz sysexit_audit
12684 sysexit_from_sys_call:
12685- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12686+ pax_exit_kernel_user
12687+ pax_erase_kstack
12688+ andl $~TS_COMPAT,TI_status(%r11)
12689 /* clear IF, that popfq doesn't enable interrupts early */
12690 andl $~0x200,EFLAGS-R11(%rsp)
12691 movl RIP-R11(%rsp),%edx /* User %eip */
12692@@ -193,6 +243,9 @@ sysexit_from_sys_call:
12693 movl %eax,%esi /* 2nd arg: syscall number */
12694 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
12695 call __audit_syscall_entry
12696+
12697+ pax_erase_kstack
12698+
12699 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
12700 cmpq $(IA32_NR_syscalls-1),%rax
12701 ja ia32_badsys
12702@@ -204,7 +257,7 @@ sysexit_from_sys_call:
12703 .endm
12704
12705 .macro auditsys_exit exit
12706- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12707+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12708 jnz ia32_ret_from_sys_call
12709 TRACE_IRQS_ON
12710 ENABLE_INTERRUPTS(CLBR_NONE)
12711@@ -215,11 +268,12 @@ sysexit_from_sys_call:
12712 1: setbe %al /* 1 if error, 0 if not */
12713 movzbl %al,%edi /* zero-extend that into %edi */
12714 call __audit_syscall_exit
12715+ GET_THREAD_INFO(%r11)
12716 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
12717 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
12718 DISABLE_INTERRUPTS(CLBR_NONE)
12719 TRACE_IRQS_OFF
12720- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12721+ testl %edi,TI_flags(%r11)
12722 jz \exit
12723 CLEAR_RREGS -ARGOFFSET
12724 jmp int_with_check
12725@@ -237,7 +291,7 @@ sysexit_audit:
12726
12727 sysenter_tracesys:
12728 #ifdef CONFIG_AUDITSYSCALL
12729- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12730+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12731 jz sysenter_auditsys
12732 #endif
12733 SAVE_REST
12734@@ -249,6 +303,9 @@ sysenter_tracesys:
12735 RESTORE_REST
12736 cmpq $(IA32_NR_syscalls-1),%rax
12737 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
12738+
12739+ pax_erase_kstack
12740+
12741 jmp sysenter_do_call
12742 CFI_ENDPROC
12743 ENDPROC(ia32_sysenter_target)
12744@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
12745 ENTRY(ia32_cstar_target)
12746 CFI_STARTPROC32 simple
12747 CFI_SIGNAL_FRAME
12748- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12749+ CFI_DEF_CFA rsp,0
12750 CFI_REGISTER rip,rcx
12751 /*CFI_REGISTER rflags,r11*/
12752 SWAPGS_UNSAFE_STACK
12753 movl %esp,%r8d
12754 CFI_REGISTER rsp,r8
12755 movq PER_CPU_VAR(kernel_stack),%rsp
12756+ SAVE_ARGS 8*6,0,0
12757+ pax_enter_kernel_user
12758+
12759+#ifdef CONFIG_PAX_RANDKSTACK
12760+ pax_erase_kstack
12761+#endif
12762+
12763 /*
12764 * No need to follow this irqs on/off section: the syscall
12765 * disabled irqs and here we enable it straight after entry:
12766 */
12767 ENABLE_INTERRUPTS(CLBR_NONE)
12768- SAVE_ARGS 8,0,0
12769 movl %eax,%eax /* zero extension */
12770 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12771 movq %rcx,RIP-ARGOFFSET(%rsp)
12772@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
12773 /* no need to do an access_ok check here because r8 has been
12774 32bit zero extended */
12775 /* hardware stack frame is complete now */
12776+
12777+#ifdef CONFIG_PAX_MEMORY_UDEREF
12778+ ASM_PAX_OPEN_USERLAND
12779+ movq pax_user_shadow_base,%r8
12780+ addq RSP-ARGOFFSET(%rsp),%r8
12781+#endif
12782+
12783 ASM_STAC
12784 1: movl (%r8),%r9d
12785 _ASM_EXTABLE(1b,ia32_badarg)
12786 ASM_CLAC
12787- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12788- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12789+
12790+#ifdef CONFIG_PAX_MEMORY_UDEREF
12791+ ASM_PAX_CLOSE_USERLAND
12792+#endif
12793+
12794+ GET_THREAD_INFO(%r11)
12795+ orl $TS_COMPAT,TI_status(%r11)
12796+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12797 CFI_REMEMBER_STATE
12798 jnz cstar_tracesys
12799 cmpq $IA32_NR_syscalls-1,%rax
12800@@ -319,12 +395,15 @@ cstar_do_call:
12801 cstar_dispatch:
12802 call *ia32_sys_call_table(,%rax,8)
12803 movq %rax,RAX-ARGOFFSET(%rsp)
12804+ GET_THREAD_INFO(%r11)
12805 DISABLE_INTERRUPTS(CLBR_NONE)
12806 TRACE_IRQS_OFF
12807- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12808+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
12809 jnz sysretl_audit
12810 sysretl_from_sys_call:
12811- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12812+ pax_exit_kernel_user
12813+ pax_erase_kstack
12814+ andl $~TS_COMPAT,TI_status(%r11)
12815 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
12816 movl RIP-ARGOFFSET(%rsp),%ecx
12817 CFI_REGISTER rip,rcx
12818@@ -352,7 +431,7 @@ sysretl_audit:
12819
12820 cstar_tracesys:
12821 #ifdef CONFIG_AUDITSYSCALL
12822- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12823+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12824 jz cstar_auditsys
12825 #endif
12826 xchgl %r9d,%ebp
12827@@ -366,11 +445,19 @@ cstar_tracesys:
12828 xchgl %ebp,%r9d
12829 cmpq $(IA32_NR_syscalls-1),%rax
12830 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
12831+
12832+ pax_erase_kstack
12833+
12834 jmp cstar_do_call
12835 END(ia32_cstar_target)
12836
12837 ia32_badarg:
12838 ASM_CLAC
12839+
12840+#ifdef CONFIG_PAX_MEMORY_UDEREF
12841+ ASM_PAX_CLOSE_USERLAND
12842+#endif
12843+
12844 movq $-EFAULT,%rax
12845 jmp ia32_sysret
12846 CFI_ENDPROC
12847@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
12848 CFI_REL_OFFSET rip,RIP-RIP
12849 PARAVIRT_ADJUST_EXCEPTION_FRAME
12850 SWAPGS
12851- /*
12852- * No need to follow this irqs on/off section: the syscall
12853- * disabled irqs and here we enable it straight after entry:
12854- */
12855- ENABLE_INTERRUPTS(CLBR_NONE)
12856 movl %eax,%eax
12857 pushq_cfi %rax
12858 cld
12859 /* note the registers are not zero extended to the sf.
12860 this could be a problem. */
12861 SAVE_ARGS 0,1,0
12862- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12863- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12864+ pax_enter_kernel_user
12865+
12866+#ifdef CONFIG_PAX_RANDKSTACK
12867+ pax_erase_kstack
12868+#endif
12869+
12870+ /*
12871+ * No need to follow this irqs on/off section: the syscall
12872+ * disabled irqs and here we enable it straight after entry:
12873+ */
12874+ ENABLE_INTERRUPTS(CLBR_NONE)
12875+ GET_THREAD_INFO(%r11)
12876+ orl $TS_COMPAT,TI_status(%r11)
12877+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12878 jnz ia32_tracesys
12879 cmpq $(IA32_NR_syscalls-1),%rax
12880 ja ia32_badsys
12881@@ -442,6 +536,9 @@ ia32_tracesys:
12882 RESTORE_REST
12883 cmpq $(IA32_NR_syscalls-1),%rax
12884 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
12885+
12886+ pax_erase_kstack
12887+
12888 jmp ia32_do_call
12889 END(ia32_syscall)
12890
12891diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
12892index 8e0ceec..af13504 100644
12893--- a/arch/x86/ia32/sys_ia32.c
12894+++ b/arch/x86/ia32/sys_ia32.c
12895@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
12896 */
12897 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
12898 {
12899- typeof(ubuf->st_uid) uid = 0;
12900- typeof(ubuf->st_gid) gid = 0;
12901+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
12902+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
12903 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
12904 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
12905 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
12906diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
12907index 372231c..a5aa1a1 100644
12908--- a/arch/x86/include/asm/alternative-asm.h
12909+++ b/arch/x86/include/asm/alternative-asm.h
12910@@ -18,6 +18,45 @@
12911 .endm
12912 #endif
12913
12914+#ifdef KERNEXEC_PLUGIN
12915+ .macro pax_force_retaddr_bts rip=0
12916+ btsq $63,\rip(%rsp)
12917+ .endm
12918+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12919+ .macro pax_force_retaddr rip=0, reload=0
12920+ btsq $63,\rip(%rsp)
12921+ .endm
12922+ .macro pax_force_fptr ptr
12923+ btsq $63,\ptr
12924+ .endm
12925+ .macro pax_set_fptr_mask
12926+ .endm
12927+#endif
12928+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
12929+ .macro pax_force_retaddr rip=0, reload=0
12930+ .if \reload
12931+ pax_set_fptr_mask
12932+ .endif
12933+ orq %r10,\rip(%rsp)
12934+ .endm
12935+ .macro pax_force_fptr ptr
12936+ orq %r10,\ptr
12937+ .endm
12938+ .macro pax_set_fptr_mask
12939+ movabs $0x8000000000000000,%r10
12940+ .endm
12941+#endif
12942+#else
12943+ .macro pax_force_retaddr rip=0, reload=0
12944+ .endm
12945+ .macro pax_force_fptr ptr
12946+ .endm
12947+ .macro pax_force_retaddr_bts rip=0
12948+ .endm
12949+ .macro pax_set_fptr_mask
12950+ .endm
12951+#endif
12952+
12953 .macro altinstruction_entry orig alt feature orig_len alt_len
12954 .long \orig - .
12955 .long \alt - .
12956diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
12957index 58ed6d9..f1cbe58 100644
12958--- a/arch/x86/include/asm/alternative.h
12959+++ b/arch/x86/include/asm/alternative.h
12960@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12961 ".pushsection .discard,\"aw\",@progbits\n" \
12962 DISCARD_ENTRY(1) \
12963 ".popsection\n" \
12964- ".pushsection .altinstr_replacement, \"ax\"\n" \
12965+ ".pushsection .altinstr_replacement, \"a\"\n" \
12966 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
12967 ".popsection"
12968
12969@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12970 DISCARD_ENTRY(1) \
12971 DISCARD_ENTRY(2) \
12972 ".popsection\n" \
12973- ".pushsection .altinstr_replacement, \"ax\"\n" \
12974+ ".pushsection .altinstr_replacement, \"a\"\n" \
12975 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
12976 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
12977 ".popsection"
12978diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
12979index 3388034..050f0b9 100644
12980--- a/arch/x86/include/asm/apic.h
12981+++ b/arch/x86/include/asm/apic.h
12982@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
12983
12984 #ifdef CONFIG_X86_LOCAL_APIC
12985
12986-extern unsigned int apic_verbosity;
12987+extern int apic_verbosity;
12988 extern int local_apic_timer_c2_ok;
12989
12990 extern int disable_apic;
12991diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
12992index 20370c6..a2eb9b0 100644
12993--- a/arch/x86/include/asm/apm.h
12994+++ b/arch/x86/include/asm/apm.h
12995@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
12996 __asm__ __volatile__(APM_DO_ZERO_SEGS
12997 "pushl %%edi\n\t"
12998 "pushl %%ebp\n\t"
12999- "lcall *%%cs:apm_bios_entry\n\t"
13000+ "lcall *%%ss:apm_bios_entry\n\t"
13001 "setc %%al\n\t"
13002 "popl %%ebp\n\t"
13003 "popl %%edi\n\t"
13004@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
13005 __asm__ __volatile__(APM_DO_ZERO_SEGS
13006 "pushl %%edi\n\t"
13007 "pushl %%ebp\n\t"
13008- "lcall *%%cs:apm_bios_entry\n\t"
13009+ "lcall *%%ss:apm_bios_entry\n\t"
13010 "setc %%bl\n\t"
13011 "popl %%ebp\n\t"
13012 "popl %%edi\n\t"
13013diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
13014index 722aa3b..3a0bb27 100644
13015--- a/arch/x86/include/asm/atomic.h
13016+++ b/arch/x86/include/asm/atomic.h
13017@@ -22,7 +22,18 @@
13018 */
13019 static inline int atomic_read(const atomic_t *v)
13020 {
13021- return (*(volatile int *)&(v)->counter);
13022+ return (*(volatile const int *)&(v)->counter);
13023+}
13024+
13025+/**
13026+ * atomic_read_unchecked - read atomic variable
13027+ * @v: pointer of type atomic_unchecked_t
13028+ *
13029+ * Atomically reads the value of @v.
13030+ */
13031+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
13032+{
13033+ return (*(volatile const int *)&(v)->counter);
13034 }
13035
13036 /**
13037@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
13038 }
13039
13040 /**
13041+ * atomic_set_unchecked - set atomic variable
13042+ * @v: pointer of type atomic_unchecked_t
13043+ * @i: required value
13044+ *
13045+ * Atomically sets the value of @v to @i.
13046+ */
13047+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
13048+{
13049+ v->counter = i;
13050+}
13051+
13052+/**
13053 * atomic_add - add integer to atomic variable
13054 * @i: integer value to add
13055 * @v: pointer of type atomic_t
13056@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
13057 */
13058 static inline void atomic_add(int i, atomic_t *v)
13059 {
13060- asm volatile(LOCK_PREFIX "addl %1,%0"
13061+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13062+
13063+#ifdef CONFIG_PAX_REFCOUNT
13064+ "jno 0f\n"
13065+ LOCK_PREFIX "subl %1,%0\n"
13066+ "int $4\n0:\n"
13067+ _ASM_EXTABLE(0b, 0b)
13068+#endif
13069+
13070+ : "+m" (v->counter)
13071+ : "ir" (i));
13072+}
13073+
13074+/**
13075+ * atomic_add_unchecked - add integer to atomic variable
13076+ * @i: integer value to add
13077+ * @v: pointer of type atomic_unchecked_t
13078+ *
13079+ * Atomically adds @i to @v.
13080+ */
13081+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
13082+{
13083+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
13084 : "+m" (v->counter)
13085 : "ir" (i));
13086 }
13087@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
13088 */
13089 static inline void atomic_sub(int i, atomic_t *v)
13090 {
13091- asm volatile(LOCK_PREFIX "subl %1,%0"
13092+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13093+
13094+#ifdef CONFIG_PAX_REFCOUNT
13095+ "jno 0f\n"
13096+ LOCK_PREFIX "addl %1,%0\n"
13097+ "int $4\n0:\n"
13098+ _ASM_EXTABLE(0b, 0b)
13099+#endif
13100+
13101+ : "+m" (v->counter)
13102+ : "ir" (i));
13103+}
13104+
13105+/**
13106+ * atomic_sub_unchecked - subtract integer from atomic variable
13107+ * @i: integer value to subtract
13108+ * @v: pointer of type atomic_unchecked_t
13109+ *
13110+ * Atomically subtracts @i from @v.
13111+ */
13112+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
13113+{
13114+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
13115 : "+m" (v->counter)
13116 : "ir" (i));
13117 }
13118@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13119 {
13120 unsigned char c;
13121
13122- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
13123+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
13124+
13125+#ifdef CONFIG_PAX_REFCOUNT
13126+ "jno 0f\n"
13127+ LOCK_PREFIX "addl %2,%0\n"
13128+ "int $4\n0:\n"
13129+ _ASM_EXTABLE(0b, 0b)
13130+#endif
13131+
13132+ "sete %1\n"
13133 : "+m" (v->counter), "=qm" (c)
13134 : "ir" (i) : "memory");
13135 return c;
13136@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
13137 */
13138 static inline void atomic_inc(atomic_t *v)
13139 {
13140- asm volatile(LOCK_PREFIX "incl %0"
13141+ asm volatile(LOCK_PREFIX "incl %0\n"
13142+
13143+#ifdef CONFIG_PAX_REFCOUNT
13144+ "jno 0f\n"
13145+ LOCK_PREFIX "decl %0\n"
13146+ "int $4\n0:\n"
13147+ _ASM_EXTABLE(0b, 0b)
13148+#endif
13149+
13150+ : "+m" (v->counter));
13151+}
13152+
13153+/**
13154+ * atomic_inc_unchecked - increment atomic variable
13155+ * @v: pointer of type atomic_unchecked_t
13156+ *
13157+ * Atomically increments @v by 1.
13158+ */
13159+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
13160+{
13161+ asm volatile(LOCK_PREFIX "incl %0\n"
13162 : "+m" (v->counter));
13163 }
13164
13165@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
13166 */
13167 static inline void atomic_dec(atomic_t *v)
13168 {
13169- asm volatile(LOCK_PREFIX "decl %0"
13170+ asm volatile(LOCK_PREFIX "decl %0\n"
13171+
13172+#ifdef CONFIG_PAX_REFCOUNT
13173+ "jno 0f\n"
13174+ LOCK_PREFIX "incl %0\n"
13175+ "int $4\n0:\n"
13176+ _ASM_EXTABLE(0b, 0b)
13177+#endif
13178+
13179+ : "+m" (v->counter));
13180+}
13181+
13182+/**
13183+ * atomic_dec_unchecked - decrement atomic variable
13184+ * @v: pointer of type atomic_unchecked_t
13185+ *
13186+ * Atomically decrements @v by 1.
13187+ */
13188+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
13189+{
13190+ asm volatile(LOCK_PREFIX "decl %0\n"
13191 : "+m" (v->counter));
13192 }
13193
13194@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
13195 {
13196 unsigned char c;
13197
13198- asm volatile(LOCK_PREFIX "decl %0; sete %1"
13199+ asm volatile(LOCK_PREFIX "decl %0\n"
13200+
13201+#ifdef CONFIG_PAX_REFCOUNT
13202+ "jno 0f\n"
13203+ LOCK_PREFIX "incl %0\n"
13204+ "int $4\n0:\n"
13205+ _ASM_EXTABLE(0b, 0b)
13206+#endif
13207+
13208+ "sete %1\n"
13209 : "+m" (v->counter), "=qm" (c)
13210 : : "memory");
13211 return c != 0;
13212@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
13213 {
13214 unsigned char c;
13215
13216- asm volatile(LOCK_PREFIX "incl %0; sete %1"
13217+ asm volatile(LOCK_PREFIX "incl %0\n"
13218+
13219+#ifdef CONFIG_PAX_REFCOUNT
13220+ "jno 0f\n"
13221+ LOCK_PREFIX "decl %0\n"
13222+ "int $4\n0:\n"
13223+ _ASM_EXTABLE(0b, 0b)
13224+#endif
13225+
13226+ "sete %1\n"
13227+ : "+m" (v->counter), "=qm" (c)
13228+ : : "memory");
13229+ return c != 0;
13230+}
13231+
13232+/**
13233+ * atomic_inc_and_test_unchecked - increment and test
13234+ * @v: pointer of type atomic_unchecked_t
13235+ *
13236+ * Atomically increments @v by 1
13237+ * and returns true if the result is zero, or false for all
13238+ * other cases.
13239+ */
13240+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
13241+{
13242+ unsigned char c;
13243+
13244+ asm volatile(LOCK_PREFIX "incl %0\n"
13245+ "sete %1\n"
13246 : "+m" (v->counter), "=qm" (c)
13247 : : "memory");
13248 return c != 0;
13249@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13250 {
13251 unsigned char c;
13252
13253- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
13254+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
13255+
13256+#ifdef CONFIG_PAX_REFCOUNT
13257+ "jno 0f\n"
13258+ LOCK_PREFIX "subl %2,%0\n"
13259+ "int $4\n0:\n"
13260+ _ASM_EXTABLE(0b, 0b)
13261+#endif
13262+
13263+ "sets %1\n"
13264 : "+m" (v->counter), "=qm" (c)
13265 : "ir" (i) : "memory");
13266 return c;
13267@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
13268 */
13269 static inline int atomic_add_return(int i, atomic_t *v)
13270 {
13271+ return i + xadd_check_overflow(&v->counter, i);
13272+}
13273+
13274+/**
13275+ * atomic_add_return_unchecked - add integer and return
13276+ * @i: integer value to add
13277+ * @v: pointer of type atomic_unchecked_t
13278+ *
13279+ * Atomically adds @i to @v and returns @i + @v
13280+ */
13281+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
13282+{
13283 return i + xadd(&v->counter, i);
13284 }
13285
13286@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
13287 }
13288
13289 #define atomic_inc_return(v) (atomic_add_return(1, v))
13290+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
13291+{
13292+ return atomic_add_return_unchecked(1, v);
13293+}
13294 #define atomic_dec_return(v) (atomic_sub_return(1, v))
13295
13296 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13297@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
13298 return cmpxchg(&v->counter, old, new);
13299 }
13300
13301+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
13302+{
13303+ return cmpxchg(&v->counter, old, new);
13304+}
13305+
13306 static inline int atomic_xchg(atomic_t *v, int new)
13307 {
13308 return xchg(&v->counter, new);
13309 }
13310
13311+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
13312+{
13313+ return xchg(&v->counter, new);
13314+}
13315+
13316 /**
13317 * __atomic_add_unless - add unless the number is already a given value
13318 * @v: pointer of type atomic_t
13319@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
13320 */
13321 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
13322 {
13323- int c, old;
13324+ int c, old, new;
13325 c = atomic_read(v);
13326 for (;;) {
13327- if (unlikely(c == (u)))
13328+ if (unlikely(c == u))
13329 break;
13330- old = atomic_cmpxchg((v), c, c + (a));
13331+
13332+ asm volatile("addl %2,%0\n"
13333+
13334+#ifdef CONFIG_PAX_REFCOUNT
13335+ "jno 0f\n"
13336+ "subl %2,%0\n"
13337+ "int $4\n0:\n"
13338+ _ASM_EXTABLE(0b, 0b)
13339+#endif
13340+
13341+ : "=r" (new)
13342+ : "0" (c), "ir" (a));
13343+
13344+ old = atomic_cmpxchg(v, c, new);
13345 if (likely(old == c))
13346 break;
13347 c = old;
13348@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
13349 }
13350
13351 /**
13352+ * atomic_inc_not_zero_hint - increment if not null
13353+ * @v: pointer of type atomic_t
13354+ * @hint: probable value of the atomic before the increment
13355+ *
13356+ * This version of atomic_inc_not_zero() gives a hint of probable
13357+ * value of the atomic. This helps processor to not read the memory
13358+ * before doing the atomic read/modify/write cycle, lowering
13359+ * number of bus transactions on some arches.
13360+ *
13361+ * Returns: 0 if increment was not done, 1 otherwise.
13362+ */
13363+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
13364+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
13365+{
13366+ int val, c = hint, new;
13367+
13368+ /* sanity test, should be removed by compiler if hint is a constant */
13369+ if (!hint)
13370+ return __atomic_add_unless(v, 1, 0);
13371+
13372+ do {
13373+ asm volatile("incl %0\n"
13374+
13375+#ifdef CONFIG_PAX_REFCOUNT
13376+ "jno 0f\n"
13377+ "decl %0\n"
13378+ "int $4\n0:\n"
13379+ _ASM_EXTABLE(0b, 0b)
13380+#endif
13381+
13382+ : "=r" (new)
13383+ : "0" (c));
13384+
13385+ val = atomic_cmpxchg(v, c, new);
13386+ if (val == c)
13387+ return 1;
13388+ c = val;
13389+ } while (c);
13390+
13391+ return 0;
13392+}
13393+
13394+/**
13395 * atomic_inc_short - increment of a short integer
13396 * @v: pointer to type int
13397 *
13398@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
13399 #endif
13400
13401 /* These are x86-specific, used by some header files */
13402-#define atomic_clear_mask(mask, addr) \
13403- asm volatile(LOCK_PREFIX "andl %0,%1" \
13404- : : "r" (~(mask)), "m" (*(addr)) : "memory")
13405+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
13406+{
13407+ asm volatile(LOCK_PREFIX "andl %1,%0"
13408+ : "+m" (v->counter)
13409+ : "r" (~(mask))
13410+ : "memory");
13411+}
13412
13413-#define atomic_set_mask(mask, addr) \
13414- asm volatile(LOCK_PREFIX "orl %0,%1" \
13415- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
13416- : "memory")
13417+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
13418+{
13419+ asm volatile(LOCK_PREFIX "andl %1,%0"
13420+ : "+m" (v->counter)
13421+ : "r" (~(mask))
13422+ : "memory");
13423+}
13424+
13425+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
13426+{
13427+ asm volatile(LOCK_PREFIX "orl %1,%0"
13428+ : "+m" (v->counter)
13429+ : "r" (mask)
13430+ : "memory");
13431+}
13432+
13433+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
13434+{
13435+ asm volatile(LOCK_PREFIX "orl %1,%0"
13436+ : "+m" (v->counter)
13437+ : "r" (mask)
13438+ : "memory");
13439+}
13440
13441 /* Atomic operations are already serializing on x86 */
13442 #define smp_mb__before_atomic_dec() barrier()
13443diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
13444index b154de7..aadebd8 100644
13445--- a/arch/x86/include/asm/atomic64_32.h
13446+++ b/arch/x86/include/asm/atomic64_32.h
13447@@ -12,6 +12,14 @@ typedef struct {
13448 u64 __aligned(8) counter;
13449 } atomic64_t;
13450
13451+#ifdef CONFIG_PAX_REFCOUNT
13452+typedef struct {
13453+ u64 __aligned(8) counter;
13454+} atomic64_unchecked_t;
13455+#else
13456+typedef atomic64_t atomic64_unchecked_t;
13457+#endif
13458+
13459 #define ATOMIC64_INIT(val) { (val) }
13460
13461 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
13462@@ -37,21 +45,31 @@ typedef struct {
13463 ATOMIC64_DECL_ONE(sym##_386)
13464
13465 ATOMIC64_DECL_ONE(add_386);
13466+ATOMIC64_DECL_ONE(add_unchecked_386);
13467 ATOMIC64_DECL_ONE(sub_386);
13468+ATOMIC64_DECL_ONE(sub_unchecked_386);
13469 ATOMIC64_DECL_ONE(inc_386);
13470+ATOMIC64_DECL_ONE(inc_unchecked_386);
13471 ATOMIC64_DECL_ONE(dec_386);
13472+ATOMIC64_DECL_ONE(dec_unchecked_386);
13473 #endif
13474
13475 #define alternative_atomic64(f, out, in...) \
13476 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
13477
13478 ATOMIC64_DECL(read);
13479+ATOMIC64_DECL(read_unchecked);
13480 ATOMIC64_DECL(set);
13481+ATOMIC64_DECL(set_unchecked);
13482 ATOMIC64_DECL(xchg);
13483 ATOMIC64_DECL(add_return);
13484+ATOMIC64_DECL(add_return_unchecked);
13485 ATOMIC64_DECL(sub_return);
13486+ATOMIC64_DECL(sub_return_unchecked);
13487 ATOMIC64_DECL(inc_return);
13488+ATOMIC64_DECL(inc_return_unchecked);
13489 ATOMIC64_DECL(dec_return);
13490+ATOMIC64_DECL(dec_return_unchecked);
13491 ATOMIC64_DECL(dec_if_positive);
13492 ATOMIC64_DECL(inc_not_zero);
13493 ATOMIC64_DECL(add_unless);
13494@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
13495 }
13496
13497 /**
13498+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
13499+ * @p: pointer to type atomic64_unchecked_t
13500+ * @o: expected value
13501+ * @n: new value
13502+ *
13503+ * Atomically sets @v to @n if it was equal to @o and returns
13504+ * the old value.
13505+ */
13506+
13507+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
13508+{
13509+ return cmpxchg64(&v->counter, o, n);
13510+}
13511+
13512+/**
13513 * atomic64_xchg - xchg atomic64 variable
13514 * @v: pointer to type atomic64_t
13515 * @n: value to assign
13516@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
13517 }
13518
13519 /**
13520+ * atomic64_set_unchecked - set atomic64 variable
13521+ * @v: pointer to type atomic64_unchecked_t
13522+ * @n: value to assign
13523+ *
13524+ * Atomically sets the value of @v to @n.
13525+ */
13526+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
13527+{
13528+ unsigned high = (unsigned)(i >> 32);
13529+ unsigned low = (unsigned)i;
13530+ alternative_atomic64(set, /* no output */,
13531+ "S" (v), "b" (low), "c" (high)
13532+ : "eax", "edx", "memory");
13533+}
13534+
13535+/**
13536 * atomic64_read - read atomic64 variable
13537 * @v: pointer to type atomic64_t
13538 *
13539@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
13540 }
13541
13542 /**
13543+ * atomic64_read_unchecked - read atomic64 variable
13544+ * @v: pointer to type atomic64_unchecked_t
13545+ *
13546+ * Atomically reads the value of @v and returns it.
13547+ */
13548+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
13549+{
13550+ long long r;
13551+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
13552+ return r;
13553+ }
13554+
13555+/**
13556 * atomic64_add_return - add and return
13557 * @i: integer value to add
13558 * @v: pointer to type atomic64_t
13559@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
13560 return i;
13561 }
13562
13563+/**
13564+ * atomic64_add_return_unchecked - add and return
13565+ * @i: integer value to add
13566+ * @v: pointer to type atomic64_unchecked_t
13567+ *
13568+ * Atomically adds @i to @v and returns @i + *@v
13569+ */
13570+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
13571+{
13572+ alternative_atomic64(add_return_unchecked,
13573+ ASM_OUTPUT2("+A" (i), "+c" (v)),
13574+ ASM_NO_INPUT_CLOBBER("memory"));
13575+ return i;
13576+}
13577+
13578 /*
13579 * Other variants with different arithmetic operators:
13580 */
13581@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
13582 return a;
13583 }
13584
13585+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13586+{
13587+ long long a;
13588+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
13589+ "S" (v) : "memory", "ecx");
13590+ return a;
13591+}
13592+
13593 static inline long long atomic64_dec_return(atomic64_t *v)
13594 {
13595 long long a;
13596@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
13597 }
13598
13599 /**
13600+ * atomic64_add_unchecked - add integer to atomic64 variable
13601+ * @i: integer value to add
13602+ * @v: pointer to type atomic64_unchecked_t
13603+ *
13604+ * Atomically adds @i to @v.
13605+ */
13606+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
13607+{
13608+ __alternative_atomic64(add_unchecked, add_return_unchecked,
13609+ ASM_OUTPUT2("+A" (i), "+c" (v)),
13610+ ASM_NO_INPUT_CLOBBER("memory"));
13611+ return i;
13612+}
13613+
13614+/**
13615 * atomic64_sub - subtract the atomic64 variable
13616 * @i: integer value to subtract
13617 * @v: pointer to type atomic64_t
13618diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
13619index 0e1cbfc..5623683 100644
13620--- a/arch/x86/include/asm/atomic64_64.h
13621+++ b/arch/x86/include/asm/atomic64_64.h
13622@@ -18,7 +18,19 @@
13623 */
13624 static inline long atomic64_read(const atomic64_t *v)
13625 {
13626- return (*(volatile long *)&(v)->counter);
13627+ return (*(volatile const long *)&(v)->counter);
13628+}
13629+
13630+/**
13631+ * atomic64_read_unchecked - read atomic64 variable
13632+ * @v: pointer of type atomic64_unchecked_t
13633+ *
13634+ * Atomically reads the value of @v.
13635+ * Doesn't imply a read memory barrier.
13636+ */
13637+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
13638+{
13639+ return (*(volatile const long *)&(v)->counter);
13640 }
13641
13642 /**
13643@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
13644 }
13645
13646 /**
13647+ * atomic64_set_unchecked - set atomic64 variable
13648+ * @v: pointer to type atomic64_unchecked_t
13649+ * @i: required value
13650+ *
13651+ * Atomically sets the value of @v to @i.
13652+ */
13653+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
13654+{
13655+ v->counter = i;
13656+}
13657+
13658+/**
13659 * atomic64_add - add integer to atomic64 variable
13660 * @i: integer value to add
13661 * @v: pointer to type atomic64_t
13662@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
13663 */
13664 static inline void atomic64_add(long i, atomic64_t *v)
13665 {
13666+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
13667+
13668+#ifdef CONFIG_PAX_REFCOUNT
13669+ "jno 0f\n"
13670+ LOCK_PREFIX "subq %1,%0\n"
13671+ "int $4\n0:\n"
13672+ _ASM_EXTABLE(0b, 0b)
13673+#endif
13674+
13675+ : "=m" (v->counter)
13676+ : "er" (i), "m" (v->counter));
13677+}
13678+
13679+/**
13680+ * atomic64_add_unchecked - add integer to atomic64 variable
13681+ * @i: integer value to add
13682+ * @v: pointer to type atomic64_unchecked_t
13683+ *
13684+ * Atomically adds @i to @v.
13685+ */
13686+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
13687+{
13688 asm volatile(LOCK_PREFIX "addq %1,%0"
13689 : "=m" (v->counter)
13690 : "er" (i), "m" (v->counter));
13691@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
13692 */
13693 static inline void atomic64_sub(long i, atomic64_t *v)
13694 {
13695- asm volatile(LOCK_PREFIX "subq %1,%0"
13696+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
13697+
13698+#ifdef CONFIG_PAX_REFCOUNT
13699+ "jno 0f\n"
13700+ LOCK_PREFIX "addq %1,%0\n"
13701+ "int $4\n0:\n"
13702+ _ASM_EXTABLE(0b, 0b)
13703+#endif
13704+
13705+ : "=m" (v->counter)
13706+ : "er" (i), "m" (v->counter));
13707+}
13708+
13709+/**
13710+ * atomic64_sub_unchecked - subtract the atomic64 variable
13711+ * @i: integer value to subtract
13712+ * @v: pointer to type atomic64_unchecked_t
13713+ *
13714+ * Atomically subtracts @i from @v.
13715+ */
13716+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
13717+{
13718+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
13719 : "=m" (v->counter)
13720 : "er" (i), "m" (v->counter));
13721 }
13722@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
13723 {
13724 unsigned char c;
13725
13726- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
13727+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
13728+
13729+#ifdef CONFIG_PAX_REFCOUNT
13730+ "jno 0f\n"
13731+ LOCK_PREFIX "addq %2,%0\n"
13732+ "int $4\n0:\n"
13733+ _ASM_EXTABLE(0b, 0b)
13734+#endif
13735+
13736+ "sete %1\n"
13737 : "=m" (v->counter), "=qm" (c)
13738 : "er" (i), "m" (v->counter) : "memory");
13739 return c;
13740@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
13741 */
13742 static inline void atomic64_inc(atomic64_t *v)
13743 {
13744+ asm volatile(LOCK_PREFIX "incq %0\n"
13745+
13746+#ifdef CONFIG_PAX_REFCOUNT
13747+ "jno 0f\n"
13748+ LOCK_PREFIX "decq %0\n"
13749+ "int $4\n0:\n"
13750+ _ASM_EXTABLE(0b, 0b)
13751+#endif
13752+
13753+ : "=m" (v->counter)
13754+ : "m" (v->counter));
13755+}
13756+
13757+/**
13758+ * atomic64_inc_unchecked - increment atomic64 variable
13759+ * @v: pointer to type atomic64_unchecked_t
13760+ *
13761+ * Atomically increments @v by 1.
13762+ */
13763+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
13764+{
13765 asm volatile(LOCK_PREFIX "incq %0"
13766 : "=m" (v->counter)
13767 : "m" (v->counter));
13768@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
13769 */
13770 static inline void atomic64_dec(atomic64_t *v)
13771 {
13772- asm volatile(LOCK_PREFIX "decq %0"
13773+ asm volatile(LOCK_PREFIX "decq %0\n"
13774+
13775+#ifdef CONFIG_PAX_REFCOUNT
13776+ "jno 0f\n"
13777+ LOCK_PREFIX "incq %0\n"
13778+ "int $4\n0:\n"
13779+ _ASM_EXTABLE(0b, 0b)
13780+#endif
13781+
13782+ : "=m" (v->counter)
13783+ : "m" (v->counter));
13784+}
13785+
13786+/**
13787+ * atomic64_dec_unchecked - decrement atomic64 variable
13788+ * @v: pointer to type atomic64_t
13789+ *
13790+ * Atomically decrements @v by 1.
13791+ */
13792+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
13793+{
13794+ asm volatile(LOCK_PREFIX "decq %0\n"
13795 : "=m" (v->counter)
13796 : "m" (v->counter));
13797 }
13798@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
13799 {
13800 unsigned char c;
13801
13802- asm volatile(LOCK_PREFIX "decq %0; sete %1"
13803+ asm volatile(LOCK_PREFIX "decq %0\n"
13804+
13805+#ifdef CONFIG_PAX_REFCOUNT
13806+ "jno 0f\n"
13807+ LOCK_PREFIX "incq %0\n"
13808+ "int $4\n0:\n"
13809+ _ASM_EXTABLE(0b, 0b)
13810+#endif
13811+
13812+ "sete %1\n"
13813 : "=m" (v->counter), "=qm" (c)
13814 : "m" (v->counter) : "memory");
13815 return c != 0;
13816@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
13817 {
13818 unsigned char c;
13819
13820- asm volatile(LOCK_PREFIX "incq %0; sete %1"
13821+ asm volatile(LOCK_PREFIX "incq %0\n"
13822+
13823+#ifdef CONFIG_PAX_REFCOUNT
13824+ "jno 0f\n"
13825+ LOCK_PREFIX "decq %0\n"
13826+ "int $4\n0:\n"
13827+ _ASM_EXTABLE(0b, 0b)
13828+#endif
13829+
13830+ "sete %1\n"
13831 : "=m" (v->counter), "=qm" (c)
13832 : "m" (v->counter) : "memory");
13833 return c != 0;
13834@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13835 {
13836 unsigned char c;
13837
13838- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
13839+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
13840+
13841+#ifdef CONFIG_PAX_REFCOUNT
13842+ "jno 0f\n"
13843+ LOCK_PREFIX "subq %2,%0\n"
13844+ "int $4\n0:\n"
13845+ _ASM_EXTABLE(0b, 0b)
13846+#endif
13847+
13848+ "sets %1\n"
13849 : "=m" (v->counter), "=qm" (c)
13850 : "er" (i), "m" (v->counter) : "memory");
13851 return c;
13852@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13853 */
13854 static inline long atomic64_add_return(long i, atomic64_t *v)
13855 {
13856+ return i + xadd_check_overflow(&v->counter, i);
13857+}
13858+
13859+/**
13860+ * atomic64_add_return_unchecked - add and return
13861+ * @i: integer value to add
13862+ * @v: pointer to type atomic64_unchecked_t
13863+ *
13864+ * Atomically adds @i to @v and returns @i + @v
13865+ */
13866+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
13867+{
13868 return i + xadd(&v->counter, i);
13869 }
13870
13871@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
13872 }
13873
13874 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
13875+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13876+{
13877+ return atomic64_add_return_unchecked(1, v);
13878+}
13879 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
13880
13881 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13882@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13883 return cmpxchg(&v->counter, old, new);
13884 }
13885
13886+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
13887+{
13888+ return cmpxchg(&v->counter, old, new);
13889+}
13890+
13891 static inline long atomic64_xchg(atomic64_t *v, long new)
13892 {
13893 return xchg(&v->counter, new);
13894@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
13895 */
13896 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
13897 {
13898- long c, old;
13899+ long c, old, new;
13900 c = atomic64_read(v);
13901 for (;;) {
13902- if (unlikely(c == (u)))
13903+ if (unlikely(c == u))
13904 break;
13905- old = atomic64_cmpxchg((v), c, c + (a));
13906+
13907+ asm volatile("add %2,%0\n"
13908+
13909+#ifdef CONFIG_PAX_REFCOUNT
13910+ "jno 0f\n"
13911+ "sub %2,%0\n"
13912+ "int $4\n0:\n"
13913+ _ASM_EXTABLE(0b, 0b)
13914+#endif
13915+
13916+ : "=r" (new)
13917+ : "0" (c), "ir" (a));
13918+
13919+ old = atomic64_cmpxchg(v, c, new);
13920 if (likely(old == c))
13921 break;
13922 c = old;
13923 }
13924- return c != (u);
13925+ return c != u;
13926 }
13927
13928 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13929diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
13930index 6dfd019..28e188d 100644
13931--- a/arch/x86/include/asm/bitops.h
13932+++ b/arch/x86/include/asm/bitops.h
13933@@ -40,7 +40,7 @@
13934 * a mask operation on a byte.
13935 */
13936 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
13937-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
13938+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
13939 #define CONST_MASK(nr) (1 << ((nr) & 7))
13940
13941 /**
13942@@ -486,7 +486,7 @@ static inline int fls(int x)
13943 * at position 64.
13944 */
13945 #ifdef CONFIG_X86_64
13946-static __always_inline int fls64(__u64 x)
13947+static __always_inline long fls64(__u64 x)
13948 {
13949 int bitpos = -1;
13950 /*
13951diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
13952index 4fa687a..60f2d39 100644
13953--- a/arch/x86/include/asm/boot.h
13954+++ b/arch/x86/include/asm/boot.h
13955@@ -6,10 +6,15 @@
13956 #include <uapi/asm/boot.h>
13957
13958 /* Physical address where kernel should be loaded. */
13959-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13960+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13961 + (CONFIG_PHYSICAL_ALIGN - 1)) \
13962 & ~(CONFIG_PHYSICAL_ALIGN - 1))
13963
13964+#ifndef __ASSEMBLY__
13965+extern unsigned char __LOAD_PHYSICAL_ADDR[];
13966+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
13967+#endif
13968+
13969 /* Minimum kernel alignment, as a power of two */
13970 #ifdef CONFIG_X86_64
13971 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
13972diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
13973index 48f99f1..d78ebf9 100644
13974--- a/arch/x86/include/asm/cache.h
13975+++ b/arch/x86/include/asm/cache.h
13976@@ -5,12 +5,13 @@
13977
13978 /* L1 cache line size */
13979 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13980-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13981+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13982
13983 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
13984+#define __read_only __attribute__((__section__(".data..read_only")))
13985
13986 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
13987-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
13988+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
13989
13990 #ifdef CONFIG_X86_VSMP
13991 #ifdef CONFIG_SMP
13992diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
13993index 9863ee3..4a1f8e1 100644
13994--- a/arch/x86/include/asm/cacheflush.h
13995+++ b/arch/x86/include/asm/cacheflush.h
13996@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
13997 unsigned long pg_flags = pg->flags & _PGMT_MASK;
13998
13999 if (pg_flags == _PGMT_DEFAULT)
14000- return -1;
14001+ return ~0UL;
14002 else if (pg_flags == _PGMT_WC)
14003 return _PAGE_CACHE_WC;
14004 else if (pg_flags == _PGMT_UC_MINUS)
14005diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
14006index 46fc474..b02b0f9 100644
14007--- a/arch/x86/include/asm/checksum_32.h
14008+++ b/arch/x86/include/asm/checksum_32.h
14009@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
14010 int len, __wsum sum,
14011 int *src_err_ptr, int *dst_err_ptr);
14012
14013+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
14014+ int len, __wsum sum,
14015+ int *src_err_ptr, int *dst_err_ptr);
14016+
14017+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
14018+ int len, __wsum sum,
14019+ int *src_err_ptr, int *dst_err_ptr);
14020+
14021 /*
14022 * Note: when you get a NULL pointer exception here this means someone
14023 * passed in an incorrect kernel address to one of these functions.
14024@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
14025 int *err_ptr)
14026 {
14027 might_sleep();
14028- return csum_partial_copy_generic((__force void *)src, dst,
14029+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
14030 len, sum, err_ptr, NULL);
14031 }
14032
14033@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
14034 {
14035 might_sleep();
14036 if (access_ok(VERIFY_WRITE, dst, len))
14037- return csum_partial_copy_generic(src, (__force void *)dst,
14038+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
14039 len, sum, NULL, err_ptr);
14040
14041 if (len)
14042diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
14043index d47786a..ce1b05d 100644
14044--- a/arch/x86/include/asm/cmpxchg.h
14045+++ b/arch/x86/include/asm/cmpxchg.h
14046@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
14047 __compiletime_error("Bad argument size for cmpxchg");
14048 extern void __xadd_wrong_size(void)
14049 __compiletime_error("Bad argument size for xadd");
14050+extern void __xadd_check_overflow_wrong_size(void)
14051+ __compiletime_error("Bad argument size for xadd_check_overflow");
14052 extern void __add_wrong_size(void)
14053 __compiletime_error("Bad argument size for add");
14054+extern void __add_check_overflow_wrong_size(void)
14055+ __compiletime_error("Bad argument size for add_check_overflow");
14056
14057 /*
14058 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
14059@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
14060 __ret; \
14061 })
14062
14063+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
14064+ ({ \
14065+ __typeof__ (*(ptr)) __ret = (arg); \
14066+ switch (sizeof(*(ptr))) { \
14067+ case __X86_CASE_L: \
14068+ asm volatile (lock #op "l %0, %1\n" \
14069+ "jno 0f\n" \
14070+ "mov %0,%1\n" \
14071+ "int $4\n0:\n" \
14072+ _ASM_EXTABLE(0b, 0b) \
14073+ : "+r" (__ret), "+m" (*(ptr)) \
14074+ : : "memory", "cc"); \
14075+ break; \
14076+ case __X86_CASE_Q: \
14077+ asm volatile (lock #op "q %q0, %1\n" \
14078+ "jno 0f\n" \
14079+ "mov %0,%1\n" \
14080+ "int $4\n0:\n" \
14081+ _ASM_EXTABLE(0b, 0b) \
14082+ : "+r" (__ret), "+m" (*(ptr)) \
14083+ : : "memory", "cc"); \
14084+ break; \
14085+ default: \
14086+ __ ## op ## _check_overflow_wrong_size(); \
14087+ } \
14088+ __ret; \
14089+ })
14090+
14091 /*
14092 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
14093 * Since this is generally used to protect other memory information, we
14094@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
14095 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
14096 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
14097
14098+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
14099+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
14100+
14101 #define __add(ptr, inc, lock) \
14102 ({ \
14103 __typeof__ (*(ptr)) __ret = (inc); \
14104diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
14105index 59c6c40..5e0b22c 100644
14106--- a/arch/x86/include/asm/compat.h
14107+++ b/arch/x86/include/asm/compat.h
14108@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
14109 typedef u32 compat_uint_t;
14110 typedef u32 compat_ulong_t;
14111 typedef u64 __attribute__((aligned(4))) compat_u64;
14112-typedef u32 compat_uptr_t;
14113+typedef u32 __user compat_uptr_t;
14114
14115 struct compat_timespec {
14116 compat_time_t tv_sec;
14117diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
14118index e99ac27..10d834e 100644
14119--- a/arch/x86/include/asm/cpufeature.h
14120+++ b/arch/x86/include/asm/cpufeature.h
14121@@ -203,7 +203,7 @@
14122 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
14123 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
14124 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
14125-
14126+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
14127
14128 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
14129 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
14130@@ -211,7 +211,7 @@
14131 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
14132 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
14133 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
14134-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
14135+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
14136 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
14137 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
14138 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
14139@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
14140 #undef cpu_has_centaur_mcr
14141 #define cpu_has_centaur_mcr 0
14142
14143+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
14144 #endif /* CONFIG_X86_64 */
14145
14146 #if __GNUC__ >= 4
14147@@ -394,7 +395,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
14148 ".section .discard,\"aw\",@progbits\n"
14149 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
14150 ".previous\n"
14151- ".section .altinstr_replacement,\"ax\"\n"
14152+ ".section .altinstr_replacement,\"a\"\n"
14153 "3: movb $1,%0\n"
14154 "4:\n"
14155 ".previous\n"
14156diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
14157index 8bf1c06..b6ae785 100644
14158--- a/arch/x86/include/asm/desc.h
14159+++ b/arch/x86/include/asm/desc.h
14160@@ -4,6 +4,7 @@
14161 #include <asm/desc_defs.h>
14162 #include <asm/ldt.h>
14163 #include <asm/mmu.h>
14164+#include <asm/pgtable.h>
14165
14166 #include <linux/smp.h>
14167 #include <linux/percpu.h>
14168@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14169
14170 desc->type = (info->read_exec_only ^ 1) << 1;
14171 desc->type |= info->contents << 2;
14172+ desc->type |= info->seg_not_present ^ 1;
14173
14174 desc->s = 1;
14175 desc->dpl = 0x3;
14176@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
14177 }
14178
14179 extern struct desc_ptr idt_descr;
14180-extern gate_desc idt_table[];
14181 extern struct desc_ptr nmi_idt_descr;
14182-extern gate_desc nmi_idt_table[];
14183-
14184-struct gdt_page {
14185- struct desc_struct gdt[GDT_ENTRIES];
14186-} __attribute__((aligned(PAGE_SIZE)));
14187-
14188-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
14189+extern gate_desc idt_table[256];
14190+extern gate_desc nmi_idt_table[256];
14191
14192+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
14193 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
14194 {
14195- return per_cpu(gdt_page, cpu).gdt;
14196+ return cpu_gdt_table[cpu];
14197 }
14198
14199 #ifdef CONFIG_X86_64
14200@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
14201 unsigned long base, unsigned dpl, unsigned flags,
14202 unsigned short seg)
14203 {
14204- gate->a = (seg << 16) | (base & 0xffff);
14205- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
14206+ gate->gate.offset_low = base;
14207+ gate->gate.seg = seg;
14208+ gate->gate.reserved = 0;
14209+ gate->gate.type = type;
14210+ gate->gate.s = 0;
14211+ gate->gate.dpl = dpl;
14212+ gate->gate.p = 1;
14213+ gate->gate.offset_high = base >> 16;
14214 }
14215
14216 #endif
14217@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
14218
14219 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
14220 {
14221+ pax_open_kernel();
14222 memcpy(&idt[entry], gate, sizeof(*gate));
14223+ pax_close_kernel();
14224 }
14225
14226 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
14227 {
14228+ pax_open_kernel();
14229 memcpy(&ldt[entry], desc, 8);
14230+ pax_close_kernel();
14231 }
14232
14233 static inline void
14234@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
14235 default: size = sizeof(*gdt); break;
14236 }
14237
14238+ pax_open_kernel();
14239 memcpy(&gdt[entry], desc, size);
14240+ pax_close_kernel();
14241 }
14242
14243 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
14244@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
14245
14246 static inline void native_load_tr_desc(void)
14247 {
14248+ pax_open_kernel();
14249 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
14250+ pax_close_kernel();
14251 }
14252
14253 static inline void native_load_gdt(const struct desc_ptr *dtr)
14254@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
14255 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
14256 unsigned int i;
14257
14258+ pax_open_kernel();
14259 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
14260 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
14261+ pax_close_kernel();
14262 }
14263
14264 #define _LDT_empty(info) \
14265@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
14266 preempt_enable();
14267 }
14268
14269-static inline unsigned long get_desc_base(const struct desc_struct *desc)
14270+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
14271 {
14272 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
14273 }
14274@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
14275 }
14276
14277 #ifdef CONFIG_X86_64
14278-static inline void set_nmi_gate(int gate, void *addr)
14279+static inline void set_nmi_gate(int gate, const void *addr)
14280 {
14281 gate_desc s;
14282
14283@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
14284 }
14285 #endif
14286
14287-static inline void _set_gate(int gate, unsigned type, void *addr,
14288+static inline void _set_gate(int gate, unsigned type, const void *addr,
14289 unsigned dpl, unsigned ist, unsigned seg)
14290 {
14291 gate_desc s;
14292@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
14293 * Pentium F0 0F bugfix can have resulted in the mapped
14294 * IDT being write-protected.
14295 */
14296-static inline void set_intr_gate(unsigned int n, void *addr)
14297+static inline void set_intr_gate(unsigned int n, const void *addr)
14298 {
14299 BUG_ON((unsigned)n > 0xFF);
14300 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
14301@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
14302 /*
14303 * This routine sets up an interrupt gate at directory privilege level 3.
14304 */
14305-static inline void set_system_intr_gate(unsigned int n, void *addr)
14306+static inline void set_system_intr_gate(unsigned int n, const void *addr)
14307 {
14308 BUG_ON((unsigned)n > 0xFF);
14309 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
14310 }
14311
14312-static inline void set_system_trap_gate(unsigned int n, void *addr)
14313+static inline void set_system_trap_gate(unsigned int n, const void *addr)
14314 {
14315 BUG_ON((unsigned)n > 0xFF);
14316 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
14317 }
14318
14319-static inline void set_trap_gate(unsigned int n, void *addr)
14320+static inline void set_trap_gate(unsigned int n, const void *addr)
14321 {
14322 BUG_ON((unsigned)n > 0xFF);
14323 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
14324@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
14325 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
14326 {
14327 BUG_ON((unsigned)n > 0xFF);
14328- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
14329+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
14330 }
14331
14332-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
14333+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
14334 {
14335 BUG_ON((unsigned)n > 0xFF);
14336 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
14337 }
14338
14339-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
14340+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
14341 {
14342 BUG_ON((unsigned)n > 0xFF);
14343 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
14344 }
14345
14346+#ifdef CONFIG_X86_32
14347+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
14348+{
14349+ struct desc_struct d;
14350+
14351+ if (likely(limit))
14352+ limit = (limit - 1UL) >> PAGE_SHIFT;
14353+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
14354+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
14355+}
14356+#endif
14357+
14358 #endif /* _ASM_X86_DESC_H */
14359diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
14360index 278441f..b95a174 100644
14361--- a/arch/x86/include/asm/desc_defs.h
14362+++ b/arch/x86/include/asm/desc_defs.h
14363@@ -31,6 +31,12 @@ struct desc_struct {
14364 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
14365 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
14366 };
14367+ struct {
14368+ u16 offset_low;
14369+ u16 seg;
14370+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
14371+ unsigned offset_high: 16;
14372+ } gate;
14373 };
14374 } __attribute__((packed));
14375
14376diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
14377index ced283a..ffe04cc 100644
14378--- a/arch/x86/include/asm/div64.h
14379+++ b/arch/x86/include/asm/div64.h
14380@@ -39,7 +39,7 @@
14381 __mod; \
14382 })
14383
14384-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
14385+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
14386 {
14387 union {
14388 u64 v64;
14389diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
14390index 9c999c1..3860cb8 100644
14391--- a/arch/x86/include/asm/elf.h
14392+++ b/arch/x86/include/asm/elf.h
14393@@ -243,7 +243,25 @@ extern int force_personality32;
14394 the loader. We need to make sure that it is out of the way of the program
14395 that it will "exec", and that there is sufficient room for the brk. */
14396
14397+#ifdef CONFIG_PAX_SEGMEXEC
14398+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
14399+#else
14400 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
14401+#endif
14402+
14403+#ifdef CONFIG_PAX_ASLR
14404+#ifdef CONFIG_X86_32
14405+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
14406+
14407+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
14408+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
14409+#else
14410+#define PAX_ELF_ET_DYN_BASE 0x400000UL
14411+
14412+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
14413+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
14414+#endif
14415+#endif
14416
14417 /* This yields a mask that user programs can use to figure out what
14418 instruction set this CPU supports. This could be done in user space,
14419@@ -296,16 +314,12 @@ do { \
14420
14421 #define ARCH_DLINFO \
14422 do { \
14423- if (vdso_enabled) \
14424- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
14425- (unsigned long)current->mm->context.vdso); \
14426+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
14427 } while (0)
14428
14429 #define ARCH_DLINFO_X32 \
14430 do { \
14431- if (vdso_enabled) \
14432- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
14433- (unsigned long)current->mm->context.vdso); \
14434+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
14435 } while (0)
14436
14437 #define AT_SYSINFO 32
14438@@ -320,7 +334,7 @@ else \
14439
14440 #endif /* !CONFIG_X86_32 */
14441
14442-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
14443+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
14444
14445 #define VDSO_ENTRY \
14446 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
14447@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
14448 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
14449 #define compat_arch_setup_additional_pages syscall32_setup_pages
14450
14451-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
14452-#define arch_randomize_brk arch_randomize_brk
14453-
14454 /*
14455 * True on X86_32 or when emulating IA32 on X86_64
14456 */
14457diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
14458index 75ce3f4..882e801 100644
14459--- a/arch/x86/include/asm/emergency-restart.h
14460+++ b/arch/x86/include/asm/emergency-restart.h
14461@@ -13,6 +13,6 @@ enum reboot_type {
14462
14463 extern enum reboot_type reboot_type;
14464
14465-extern void machine_emergency_restart(void);
14466+extern void machine_emergency_restart(void) __noreturn;
14467
14468 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
14469diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
14470index e25cc33..7d3ec01 100644
14471--- a/arch/x86/include/asm/fpu-internal.h
14472+++ b/arch/x86/include/asm/fpu-internal.h
14473@@ -126,8 +126,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
14474 #define user_insn(insn, output, input...) \
14475 ({ \
14476 int err; \
14477+ pax_open_userland(); \
14478 asm volatile(ASM_STAC "\n" \
14479- "1:" #insn "\n\t" \
14480+ "1:" \
14481+ __copyuser_seg \
14482+ #insn "\n\t" \
14483 "2: " ASM_CLAC "\n" \
14484 ".section .fixup,\"ax\"\n" \
14485 "3: movl $-1,%[err]\n" \
14486@@ -136,6 +139,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
14487 _ASM_EXTABLE(1b, 3b) \
14488 : [err] "=r" (err), output \
14489 : "0"(0), input); \
14490+ pax_close_userland(); \
14491 err; \
14492 })
14493
14494@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
14495 "emms\n\t" /* clear stack tags */
14496 "fildl %P[addr]", /* set F?P to defined value */
14497 X86_FEATURE_FXSAVE_LEAK,
14498- [addr] "m" (tsk->thread.fpu.has_fpu));
14499+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
14500
14501 return fpu_restore_checking(&tsk->thread.fpu);
14502 }
14503diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
14504index be27ba1..04a8801 100644
14505--- a/arch/x86/include/asm/futex.h
14506+++ b/arch/x86/include/asm/futex.h
14507@@ -12,6 +12,7 @@
14508 #include <asm/smap.h>
14509
14510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
14511+ typecheck(u32 __user *, uaddr); \
14512 asm volatile("\t" ASM_STAC "\n" \
14513 "1:\t" insn "\n" \
14514 "2:\t" ASM_CLAC "\n" \
14515@@ -20,15 +21,16 @@
14516 "\tjmp\t2b\n" \
14517 "\t.previous\n" \
14518 _ASM_EXTABLE(1b, 3b) \
14519- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
14520+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
14521 : "i" (-EFAULT), "0" (oparg), "1" (0))
14522
14523 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
14524+ typecheck(u32 __user *, uaddr); \
14525 asm volatile("\t" ASM_STAC "\n" \
14526 "1:\tmovl %2, %0\n" \
14527 "\tmovl\t%0, %3\n" \
14528 "\t" insn "\n" \
14529- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
14530+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
14531 "\tjnz\t1b\n" \
14532 "3:\t" ASM_CLAC "\n" \
14533 "\t.section .fixup,\"ax\"\n" \
14534@@ -38,7 +40,7 @@
14535 _ASM_EXTABLE(1b, 4b) \
14536 _ASM_EXTABLE(2b, 4b) \
14537 : "=&a" (oldval), "=&r" (ret), \
14538- "+m" (*uaddr), "=&r" (tem) \
14539+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
14540 : "r" (oparg), "i" (-EFAULT), "1" (0))
14541
14542 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14543@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14544
14545 pagefault_disable();
14546
14547+ pax_open_userland();
14548 switch (op) {
14549 case FUTEX_OP_SET:
14550- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
14551+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
14552 break;
14553 case FUTEX_OP_ADD:
14554- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
14555+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
14556 uaddr, oparg);
14557 break;
14558 case FUTEX_OP_OR:
14559@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
14560 default:
14561 ret = -ENOSYS;
14562 }
14563+ pax_close_userland();
14564
14565 pagefault_enable();
14566
14567@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
14568 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
14569 return -EFAULT;
14570
14571+ pax_open_userland();
14572 asm volatile("\t" ASM_STAC "\n"
14573- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
14574+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
14575 "2:\t" ASM_CLAC "\n"
14576 "\t.section .fixup, \"ax\"\n"
14577 "3:\tmov %3, %0\n"
14578 "\tjmp 2b\n"
14579 "\t.previous\n"
14580 _ASM_EXTABLE(1b, 3b)
14581- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
14582+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
14583 : "i" (-EFAULT), "r" (newval), "1" (oldval)
14584 : "memory"
14585 );
14586+ pax_close_userland();
14587
14588 *uval = oldval;
14589 return ret;
14590diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
14591index 1da97ef..9c2ebff 100644
14592--- a/arch/x86/include/asm/hw_irq.h
14593+++ b/arch/x86/include/asm/hw_irq.h
14594@@ -148,8 +148,8 @@ extern void setup_ioapic_dest(void);
14595 extern void enable_IO_APIC(void);
14596
14597 /* Statistics */
14598-extern atomic_t irq_err_count;
14599-extern atomic_t irq_mis_count;
14600+extern atomic_unchecked_t irq_err_count;
14601+extern atomic_unchecked_t irq_mis_count;
14602
14603 /* EISA */
14604 extern void eisa_set_level_irq(unsigned int irq);
14605diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
14606index a203659..9889f1c 100644
14607--- a/arch/x86/include/asm/i8259.h
14608+++ b/arch/x86/include/asm/i8259.h
14609@@ -62,7 +62,7 @@ struct legacy_pic {
14610 void (*init)(int auto_eoi);
14611 int (*irq_pending)(unsigned int irq);
14612 void (*make_irq)(unsigned int irq);
14613-};
14614+} __do_const;
14615
14616 extern struct legacy_pic *legacy_pic;
14617 extern struct legacy_pic null_legacy_pic;
14618diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
14619index d8e8eef..1765f78 100644
14620--- a/arch/x86/include/asm/io.h
14621+++ b/arch/x86/include/asm/io.h
14622@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
14623 "m" (*(volatile type __force *)addr) barrier); }
14624
14625 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
14626-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
14627-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
14628+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
14629+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
14630
14631 build_mmio_read(__readb, "b", unsigned char, "=q", )
14632-build_mmio_read(__readw, "w", unsigned short, "=r", )
14633-build_mmio_read(__readl, "l", unsigned int, "=r", )
14634+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
14635+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
14636
14637 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
14638 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
14639@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
14640 return ioremap_nocache(offset, size);
14641 }
14642
14643-extern void iounmap(volatile void __iomem *addr);
14644+extern void iounmap(const volatile void __iomem *addr);
14645
14646 extern void set_iounmap_nonlazy(void);
14647
14648@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
14649
14650 #include <linux/vmalloc.h>
14651
14652+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
14653+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
14654+{
14655+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
14656+}
14657+
14658+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
14659+{
14660+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
14661+}
14662+
14663 /*
14664 * Convert a virtual cached pointer to an uncached pointer
14665 */
14666diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
14667index bba3cf8..06bc8da 100644
14668--- a/arch/x86/include/asm/irqflags.h
14669+++ b/arch/x86/include/asm/irqflags.h
14670@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
14671 sti; \
14672 sysexit
14673
14674+#define GET_CR0_INTO_RDI mov %cr0, %rdi
14675+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
14676+#define GET_CR3_INTO_RDI mov %cr3, %rdi
14677+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
14678+
14679 #else
14680 #define INTERRUPT_RETURN iret
14681 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
14682diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
14683index 5a6d287..f815789 100644
14684--- a/arch/x86/include/asm/kprobes.h
14685+++ b/arch/x86/include/asm/kprobes.h
14686@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
14687 #define RELATIVEJUMP_SIZE 5
14688 #define RELATIVECALL_OPCODE 0xe8
14689 #define RELATIVE_ADDR_SIZE 4
14690-#define MAX_STACK_SIZE 64
14691-#define MIN_STACK_SIZE(ADDR) \
14692- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
14693- THREAD_SIZE - (unsigned long)(ADDR))) \
14694- ? (MAX_STACK_SIZE) \
14695- : (((unsigned long)current_thread_info()) + \
14696- THREAD_SIZE - (unsigned long)(ADDR)))
14697+#define MAX_STACK_SIZE 64UL
14698+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
14699
14700 #define flush_insn_slot(p) do { } while (0)
14701
14702diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
14703index 2d89e39..baee879 100644
14704--- a/arch/x86/include/asm/local.h
14705+++ b/arch/x86/include/asm/local.h
14706@@ -10,33 +10,97 @@ typedef struct {
14707 atomic_long_t a;
14708 } local_t;
14709
14710+typedef struct {
14711+ atomic_long_unchecked_t a;
14712+} local_unchecked_t;
14713+
14714 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14715
14716 #define local_read(l) atomic_long_read(&(l)->a)
14717+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
14718 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
14719+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
14720
14721 static inline void local_inc(local_t *l)
14722 {
14723- asm volatile(_ASM_INC "%0"
14724+ asm volatile(_ASM_INC "%0\n"
14725+
14726+#ifdef CONFIG_PAX_REFCOUNT
14727+ "jno 0f\n"
14728+ _ASM_DEC "%0\n"
14729+ "int $4\n0:\n"
14730+ _ASM_EXTABLE(0b, 0b)
14731+#endif
14732+
14733+ : "+m" (l->a.counter));
14734+}
14735+
14736+static inline void local_inc_unchecked(local_unchecked_t *l)
14737+{
14738+ asm volatile(_ASM_INC "%0\n"
14739 : "+m" (l->a.counter));
14740 }
14741
14742 static inline void local_dec(local_t *l)
14743 {
14744- asm volatile(_ASM_DEC "%0"
14745+ asm volatile(_ASM_DEC "%0\n"
14746+
14747+#ifdef CONFIG_PAX_REFCOUNT
14748+ "jno 0f\n"
14749+ _ASM_INC "%0\n"
14750+ "int $4\n0:\n"
14751+ _ASM_EXTABLE(0b, 0b)
14752+#endif
14753+
14754+ : "+m" (l->a.counter));
14755+}
14756+
14757+static inline void local_dec_unchecked(local_unchecked_t *l)
14758+{
14759+ asm volatile(_ASM_DEC "%0\n"
14760 : "+m" (l->a.counter));
14761 }
14762
14763 static inline void local_add(long i, local_t *l)
14764 {
14765- asm volatile(_ASM_ADD "%1,%0"
14766+ asm volatile(_ASM_ADD "%1,%0\n"
14767+
14768+#ifdef CONFIG_PAX_REFCOUNT
14769+ "jno 0f\n"
14770+ _ASM_SUB "%1,%0\n"
14771+ "int $4\n0:\n"
14772+ _ASM_EXTABLE(0b, 0b)
14773+#endif
14774+
14775+ : "+m" (l->a.counter)
14776+ : "ir" (i));
14777+}
14778+
14779+static inline void local_add_unchecked(long i, local_unchecked_t *l)
14780+{
14781+ asm volatile(_ASM_ADD "%1,%0\n"
14782 : "+m" (l->a.counter)
14783 : "ir" (i));
14784 }
14785
14786 static inline void local_sub(long i, local_t *l)
14787 {
14788- asm volatile(_ASM_SUB "%1,%0"
14789+ asm volatile(_ASM_SUB "%1,%0\n"
14790+
14791+#ifdef CONFIG_PAX_REFCOUNT
14792+ "jno 0f\n"
14793+ _ASM_ADD "%1,%0\n"
14794+ "int $4\n0:\n"
14795+ _ASM_EXTABLE(0b, 0b)
14796+#endif
14797+
14798+ : "+m" (l->a.counter)
14799+ : "ir" (i));
14800+}
14801+
14802+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
14803+{
14804+ asm volatile(_ASM_SUB "%1,%0\n"
14805 : "+m" (l->a.counter)
14806 : "ir" (i));
14807 }
14808@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
14809 {
14810 unsigned char c;
14811
14812- asm volatile(_ASM_SUB "%2,%0; sete %1"
14813+ asm volatile(_ASM_SUB "%2,%0\n"
14814+
14815+#ifdef CONFIG_PAX_REFCOUNT
14816+ "jno 0f\n"
14817+ _ASM_ADD "%2,%0\n"
14818+ "int $4\n0:\n"
14819+ _ASM_EXTABLE(0b, 0b)
14820+#endif
14821+
14822+ "sete %1\n"
14823 : "+m" (l->a.counter), "=qm" (c)
14824 : "ir" (i) : "memory");
14825 return c;
14826@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
14827 {
14828 unsigned char c;
14829
14830- asm volatile(_ASM_DEC "%0; sete %1"
14831+ asm volatile(_ASM_DEC "%0\n"
14832+
14833+#ifdef CONFIG_PAX_REFCOUNT
14834+ "jno 0f\n"
14835+ _ASM_INC "%0\n"
14836+ "int $4\n0:\n"
14837+ _ASM_EXTABLE(0b, 0b)
14838+#endif
14839+
14840+ "sete %1\n"
14841 : "+m" (l->a.counter), "=qm" (c)
14842 : : "memory");
14843 return c != 0;
14844@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
14845 {
14846 unsigned char c;
14847
14848- asm volatile(_ASM_INC "%0; sete %1"
14849+ asm volatile(_ASM_INC "%0\n"
14850+
14851+#ifdef CONFIG_PAX_REFCOUNT
14852+ "jno 0f\n"
14853+ _ASM_DEC "%0\n"
14854+ "int $4\n0:\n"
14855+ _ASM_EXTABLE(0b, 0b)
14856+#endif
14857+
14858+ "sete %1\n"
14859 : "+m" (l->a.counter), "=qm" (c)
14860 : : "memory");
14861 return c != 0;
14862@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
14863 {
14864 unsigned char c;
14865
14866- asm volatile(_ASM_ADD "%2,%0; sets %1"
14867+ asm volatile(_ASM_ADD "%2,%0\n"
14868+
14869+#ifdef CONFIG_PAX_REFCOUNT
14870+ "jno 0f\n"
14871+ _ASM_SUB "%2,%0\n"
14872+ "int $4\n0:\n"
14873+ _ASM_EXTABLE(0b, 0b)
14874+#endif
14875+
14876+ "sets %1\n"
14877 : "+m" (l->a.counter), "=qm" (c)
14878 : "ir" (i) : "memory");
14879 return c;
14880@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
14881 static inline long local_add_return(long i, local_t *l)
14882 {
14883 long __i = i;
14884+ asm volatile(_ASM_XADD "%0, %1\n"
14885+
14886+#ifdef CONFIG_PAX_REFCOUNT
14887+ "jno 0f\n"
14888+ _ASM_MOV "%0,%1\n"
14889+ "int $4\n0:\n"
14890+ _ASM_EXTABLE(0b, 0b)
14891+#endif
14892+
14893+ : "+r" (i), "+m" (l->a.counter)
14894+ : : "memory");
14895+ return i + __i;
14896+}
14897+
14898+/**
14899+ * local_add_return_unchecked - add and return
14900+ * @i: integer value to add
14901+ * @l: pointer to type local_unchecked_t
14902+ *
14903+ * Atomically adds @i to @l and returns @i + @l
14904+ */
14905+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
14906+{
14907+ long __i = i;
14908 asm volatile(_ASM_XADD "%0, %1;"
14909 : "+r" (i), "+m" (l->a.counter)
14910 : : "memory");
14911@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
14912
14913 #define local_cmpxchg(l, o, n) \
14914 (cmpxchg_local(&((l)->a.counter), (o), (n)))
14915+#define local_cmpxchg_unchecked(l, o, n) \
14916+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
14917 /* Always has a lock prefix */
14918 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
14919
14920diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
14921new file mode 100644
14922index 0000000..2bfd3ba
14923--- /dev/null
14924+++ b/arch/x86/include/asm/mman.h
14925@@ -0,0 +1,15 @@
14926+#ifndef _X86_MMAN_H
14927+#define _X86_MMAN_H
14928+
14929+#include <uapi/asm/mman.h>
14930+
14931+#ifdef __KERNEL__
14932+#ifndef __ASSEMBLY__
14933+#ifdef CONFIG_X86_32
14934+#define arch_mmap_check i386_mmap_check
14935+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
14936+#endif
14937+#endif
14938+#endif
14939+
14940+#endif /* X86_MMAN_H */
14941diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
14942index 5f55e69..e20bfb1 100644
14943--- a/arch/x86/include/asm/mmu.h
14944+++ b/arch/x86/include/asm/mmu.h
14945@@ -9,7 +9,7 @@
14946 * we put the segment information here.
14947 */
14948 typedef struct {
14949- void *ldt;
14950+ struct desc_struct *ldt;
14951 int size;
14952
14953 #ifdef CONFIG_X86_64
14954@@ -18,7 +18,19 @@ typedef struct {
14955 #endif
14956
14957 struct mutex lock;
14958- void *vdso;
14959+ unsigned long vdso;
14960+
14961+#ifdef CONFIG_X86_32
14962+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14963+ unsigned long user_cs_base;
14964+ unsigned long user_cs_limit;
14965+
14966+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14967+ cpumask_t cpu_user_cs_mask;
14968+#endif
14969+
14970+#endif
14971+#endif
14972 } mm_context_t;
14973
14974 #ifdef CONFIG_SMP
14975diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
14976index cdbf367..4c73c9e 100644
14977--- a/arch/x86/include/asm/mmu_context.h
14978+++ b/arch/x86/include/asm/mmu_context.h
14979@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
14980
14981 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14982 {
14983+
14984+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14985+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
14986+ unsigned int i;
14987+ pgd_t *pgd;
14988+
14989+ pax_open_kernel();
14990+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
14991+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
14992+ set_pgd_batched(pgd+i, native_make_pgd(0));
14993+ pax_close_kernel();
14994+ }
14995+#endif
14996+
14997 #ifdef CONFIG_SMP
14998 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
14999 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
15000@@ -34,16 +48,55 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15001 struct task_struct *tsk)
15002 {
15003 unsigned cpu = smp_processor_id();
15004+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15005+ int tlbstate = TLBSTATE_OK;
15006+#endif
15007
15008 if (likely(prev != next)) {
15009 #ifdef CONFIG_SMP
15010+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15011+ tlbstate = this_cpu_read(cpu_tlbstate.state);
15012+#endif
15013 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15014 this_cpu_write(cpu_tlbstate.active_mm, next);
15015 #endif
15016 cpumask_set_cpu(cpu, mm_cpumask(next));
15017
15018 /* Re-load page tables */
15019+#ifdef CONFIG_PAX_PER_CPU_PGD
15020+ pax_open_kernel();
15021+
15022+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15023+ if (static_cpu_has(X86_FEATURE_PCID))
15024+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15025+ else
15026+#endif
15027+
15028+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15029+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15030+ pax_close_kernel();
15031+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15032+
15033+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15034+ if (static_cpu_has(X86_FEATURE_PCID)) {
15035+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15036+ unsigned long descriptor[2];
15037+ descriptor[0] = PCID_USER;
15038+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15039+ } else {
15040+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15041+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15042+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15043+ else
15044+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15045+ }
15046+ } else
15047+#endif
15048+
15049+ load_cr3(get_cpu_pgd(cpu, kernel));
15050+#else
15051 load_cr3(next->pgd);
15052+#endif
15053
15054 /* stop flush ipis for the previous mm */
15055 cpumask_clear_cpu(cpu, mm_cpumask(prev));
15056@@ -53,9 +106,63 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15057 */
15058 if (unlikely(prev->context.ldt != next->context.ldt))
15059 load_LDT_nolock(&next->context);
15060- }
15061+
15062+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15063+ if (!(__supported_pte_mask & _PAGE_NX)) {
15064+ smp_mb__before_clear_bit();
15065+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
15066+ smp_mb__after_clear_bit();
15067+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15068+ }
15069+#endif
15070+
15071+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15072+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
15073+ prev->context.user_cs_limit != next->context.user_cs_limit))
15074+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15075 #ifdef CONFIG_SMP
15076+ else if (unlikely(tlbstate != TLBSTATE_OK))
15077+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15078+#endif
15079+#endif
15080+
15081+ }
15082 else {
15083+
15084+#ifdef CONFIG_PAX_PER_CPU_PGD
15085+ pax_open_kernel();
15086+
15087+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15088+ if (static_cpu_has(X86_FEATURE_PCID))
15089+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
15090+ else
15091+#endif
15092+
15093+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
15094+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
15095+ pax_close_kernel();
15096+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
15097+
15098+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15099+ if (static_cpu_has(X86_FEATURE_PCID)) {
15100+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
15101+ unsigned long descriptor[2];
15102+ descriptor[0] = PCID_USER;
15103+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
15104+ } else {
15105+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
15106+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
15107+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
15108+ else
15109+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
15110+ }
15111+ } else
15112+#endif
15113+
15114+ load_cr3(get_cpu_pgd(cpu, kernel));
15115+#endif
15116+
15117+#ifdef CONFIG_SMP
15118 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
15119 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
15120
15121@@ -64,11 +171,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15122 * tlb flush IPI delivery. We must reload CR3
15123 * to make sure to use no freed page tables.
15124 */
15125+
15126+#ifndef CONFIG_PAX_PER_CPU_PGD
15127 load_cr3(next->pgd);
15128+#endif
15129+
15130 load_LDT_nolock(&next->context);
15131+
15132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15133+ if (!(__supported_pte_mask & _PAGE_NX))
15134+ cpu_set(cpu, next->context.cpu_user_cs_mask);
15135+#endif
15136+
15137+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
15138+#ifdef CONFIG_PAX_PAGEEXEC
15139+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
15140+#endif
15141+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
15142+#endif
15143+
15144 }
15145+#endif
15146 }
15147-#endif
15148 }
15149
15150 #define activate_mm(prev, next) \
15151diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
15152index e3b7819..b257c64 100644
15153--- a/arch/x86/include/asm/module.h
15154+++ b/arch/x86/include/asm/module.h
15155@@ -5,6 +5,7 @@
15156
15157 #ifdef CONFIG_X86_64
15158 /* X86_64 does not define MODULE_PROC_FAMILY */
15159+#define MODULE_PROC_FAMILY ""
15160 #elif defined CONFIG_M486
15161 #define MODULE_PROC_FAMILY "486 "
15162 #elif defined CONFIG_M586
15163@@ -57,8 +58,20 @@
15164 #error unknown processor family
15165 #endif
15166
15167-#ifdef CONFIG_X86_32
15168-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
15169+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15170+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
15171+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
15172+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
15173+#else
15174+#define MODULE_PAX_KERNEXEC ""
15175 #endif
15176
15177+#ifdef CONFIG_PAX_MEMORY_UDEREF
15178+#define MODULE_PAX_UDEREF "UDEREF "
15179+#else
15180+#define MODULE_PAX_UDEREF ""
15181+#endif
15182+
15183+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
15184+
15185 #endif /* _ASM_X86_MODULE_H */
15186diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
15187index 86f9301..b365cda 100644
15188--- a/arch/x86/include/asm/nmi.h
15189+++ b/arch/x86/include/asm/nmi.h
15190@@ -40,11 +40,11 @@ struct nmiaction {
15191 nmi_handler_t handler;
15192 unsigned long flags;
15193 const char *name;
15194-};
15195+} __do_const;
15196
15197 #define register_nmi_handler(t, fn, fg, n, init...) \
15198 ({ \
15199- static struct nmiaction init fn##_na = { \
15200+ static const struct nmiaction init fn##_na = { \
15201 .handler = (fn), \
15202 .name = (n), \
15203 .flags = (fg), \
15204@@ -52,7 +52,7 @@ struct nmiaction {
15205 __register_nmi_handler((t), &fn##_na); \
15206 })
15207
15208-int __register_nmi_handler(unsigned int, struct nmiaction *);
15209+int __register_nmi_handler(unsigned int, const struct nmiaction *);
15210
15211 void unregister_nmi_handler(unsigned int, const char *);
15212
15213diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
15214index c878924..21f4889 100644
15215--- a/arch/x86/include/asm/page.h
15216+++ b/arch/x86/include/asm/page.h
15217@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
15218 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
15219
15220 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
15221+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
15222
15223 #define __boot_va(x) __va(x)
15224 #define __boot_pa(x) __pa(x)
15225diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
15226index 0f1ddee..e2fc3d1 100644
15227--- a/arch/x86/include/asm/page_64.h
15228+++ b/arch/x86/include/asm/page_64.h
15229@@ -7,9 +7,9 @@
15230
15231 /* duplicated to the one in bootmem.h */
15232 extern unsigned long max_pfn;
15233-extern unsigned long phys_base;
15234+extern const unsigned long phys_base;
15235
15236-static inline unsigned long __phys_addr_nodebug(unsigned long x)
15237+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
15238 {
15239 unsigned long y = x - __START_KERNEL_map;
15240
15241diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
15242index cfdc9ee..3f7b5d6 100644
15243--- a/arch/x86/include/asm/paravirt.h
15244+++ b/arch/x86/include/asm/paravirt.h
15245@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
15246 return (pmd_t) { ret };
15247 }
15248
15249-static inline pmdval_t pmd_val(pmd_t pmd)
15250+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
15251 {
15252 pmdval_t ret;
15253
15254@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
15255 val);
15256 }
15257
15258+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
15259+{
15260+ pgdval_t val = native_pgd_val(pgd);
15261+
15262+ if (sizeof(pgdval_t) > sizeof(long))
15263+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
15264+ val, (u64)val >> 32);
15265+ else
15266+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
15267+ val);
15268+}
15269+
15270 static inline void pgd_clear(pgd_t *pgdp)
15271 {
15272 set_pgd(pgdp, __pgd(0));
15273@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
15274 pv_mmu_ops.set_fixmap(idx, phys, flags);
15275 }
15276
15277+#ifdef CONFIG_PAX_KERNEXEC
15278+static inline unsigned long pax_open_kernel(void)
15279+{
15280+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
15281+}
15282+
15283+static inline unsigned long pax_close_kernel(void)
15284+{
15285+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
15286+}
15287+#else
15288+static inline unsigned long pax_open_kernel(void) { return 0; }
15289+static inline unsigned long pax_close_kernel(void) { return 0; }
15290+#endif
15291+
15292 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
15293
15294 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
15295@@ -926,7 +953,7 @@ extern void default_banner(void);
15296
15297 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
15298 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
15299-#define PARA_INDIRECT(addr) *%cs:addr
15300+#define PARA_INDIRECT(addr) *%ss:addr
15301 #endif
15302
15303 #define INTERRUPT_RETURN \
15304@@ -1001,6 +1028,21 @@ extern void default_banner(void);
15305 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
15306 CLBR_NONE, \
15307 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
15308+
15309+#define GET_CR0_INTO_RDI \
15310+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
15311+ mov %rax,%rdi
15312+
15313+#define SET_RDI_INTO_CR0 \
15314+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15315+
15316+#define GET_CR3_INTO_RDI \
15317+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
15318+ mov %rax,%rdi
15319+
15320+#define SET_RDI_INTO_CR3 \
15321+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
15322+
15323 #endif /* CONFIG_X86_32 */
15324
15325 #endif /* __ASSEMBLY__ */
15326diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
15327index 0db1fca..52310cc 100644
15328--- a/arch/x86/include/asm/paravirt_types.h
15329+++ b/arch/x86/include/asm/paravirt_types.h
15330@@ -84,7 +84,7 @@ struct pv_init_ops {
15331 */
15332 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
15333 unsigned long addr, unsigned len);
15334-};
15335+} __no_const;
15336
15337
15338 struct pv_lazy_ops {
15339@@ -98,7 +98,7 @@ struct pv_time_ops {
15340 unsigned long long (*sched_clock)(void);
15341 unsigned long long (*steal_clock)(int cpu);
15342 unsigned long (*get_tsc_khz)(void);
15343-};
15344+} __no_const;
15345
15346 struct pv_cpu_ops {
15347 /* hooks for various privileged instructions */
15348@@ -192,7 +192,7 @@ struct pv_cpu_ops {
15349
15350 void (*start_context_switch)(struct task_struct *prev);
15351 void (*end_context_switch)(struct task_struct *next);
15352-};
15353+} __no_const;
15354
15355 struct pv_irq_ops {
15356 /*
15357@@ -223,7 +223,7 @@ struct pv_apic_ops {
15358 unsigned long start_eip,
15359 unsigned long start_esp);
15360 #endif
15361-};
15362+} __no_const;
15363
15364 struct pv_mmu_ops {
15365 unsigned long (*read_cr2)(void);
15366@@ -313,6 +313,7 @@ struct pv_mmu_ops {
15367 struct paravirt_callee_save make_pud;
15368
15369 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
15370+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
15371 #endif /* PAGETABLE_LEVELS == 4 */
15372 #endif /* PAGETABLE_LEVELS >= 3 */
15373
15374@@ -324,6 +325,12 @@ struct pv_mmu_ops {
15375 an mfn. We can tell which is which from the index. */
15376 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
15377 phys_addr_t phys, pgprot_t flags);
15378+
15379+#ifdef CONFIG_PAX_KERNEXEC
15380+ unsigned long (*pax_open_kernel)(void);
15381+ unsigned long (*pax_close_kernel)(void);
15382+#endif
15383+
15384 };
15385
15386 struct arch_spinlock;
15387@@ -334,7 +341,7 @@ struct pv_lock_ops {
15388 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
15389 int (*spin_trylock)(struct arch_spinlock *lock);
15390 void (*spin_unlock)(struct arch_spinlock *lock);
15391-};
15392+} __no_const;
15393
15394 /* This contains all the paravirt structures: we get a convenient
15395 * number for each function using the offset which we use to indicate
15396diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
15397index b4389a4..7024269 100644
15398--- a/arch/x86/include/asm/pgalloc.h
15399+++ b/arch/x86/include/asm/pgalloc.h
15400@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
15401 pmd_t *pmd, pte_t *pte)
15402 {
15403 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
15404+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
15405+}
15406+
15407+static inline void pmd_populate_user(struct mm_struct *mm,
15408+ pmd_t *pmd, pte_t *pte)
15409+{
15410+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
15411 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
15412 }
15413
15414@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
15415
15416 #ifdef CONFIG_X86_PAE
15417 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
15418+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
15419+{
15420+ pud_populate(mm, pudp, pmd);
15421+}
15422 #else /* !CONFIG_X86_PAE */
15423 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
15424 {
15425 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
15426 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
15427 }
15428+
15429+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
15430+{
15431+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
15432+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
15433+}
15434 #endif /* CONFIG_X86_PAE */
15435
15436 #if PAGETABLE_LEVELS > 3
15437@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
15438 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
15439 }
15440
15441+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
15442+{
15443+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
15444+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
15445+}
15446+
15447 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
15448 {
15449 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
15450diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
15451index f2b489c..4f7e2e5 100644
15452--- a/arch/x86/include/asm/pgtable-2level.h
15453+++ b/arch/x86/include/asm/pgtable-2level.h
15454@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
15455
15456 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15457 {
15458+ pax_open_kernel();
15459 *pmdp = pmd;
15460+ pax_close_kernel();
15461 }
15462
15463 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15464diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
15465index 4cc9f2b..5fd9226 100644
15466--- a/arch/x86/include/asm/pgtable-3level.h
15467+++ b/arch/x86/include/asm/pgtable-3level.h
15468@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15469
15470 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15471 {
15472+ pax_open_kernel();
15473 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
15474+ pax_close_kernel();
15475 }
15476
15477 static inline void native_set_pud(pud_t *pudp, pud_t pud)
15478 {
15479+ pax_open_kernel();
15480 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
15481+ pax_close_kernel();
15482 }
15483
15484 /*
15485diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
15486index 1e67223..92a9585 100644
15487--- a/arch/x86/include/asm/pgtable.h
15488+++ b/arch/x86/include/asm/pgtable.h
15489@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15490
15491 #ifndef __PAGETABLE_PUD_FOLDED
15492 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
15493+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
15494 #define pgd_clear(pgd) native_pgd_clear(pgd)
15495 #endif
15496
15497@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15498
15499 #define arch_end_context_switch(prev) do {} while(0)
15500
15501+#define pax_open_kernel() native_pax_open_kernel()
15502+#define pax_close_kernel() native_pax_close_kernel()
15503 #endif /* CONFIG_PARAVIRT */
15504
15505+#define __HAVE_ARCH_PAX_OPEN_KERNEL
15506+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
15507+
15508+#ifdef CONFIG_PAX_KERNEXEC
15509+static inline unsigned long native_pax_open_kernel(void)
15510+{
15511+ unsigned long cr0;
15512+
15513+ preempt_disable();
15514+ barrier();
15515+ cr0 = read_cr0() ^ X86_CR0_WP;
15516+ BUG_ON(cr0 & X86_CR0_WP);
15517+ write_cr0(cr0);
15518+ return cr0 ^ X86_CR0_WP;
15519+}
15520+
15521+static inline unsigned long native_pax_close_kernel(void)
15522+{
15523+ unsigned long cr0;
15524+
15525+ cr0 = read_cr0() ^ X86_CR0_WP;
15526+ BUG_ON(!(cr0 & X86_CR0_WP));
15527+ write_cr0(cr0);
15528+ barrier();
15529+ preempt_enable_no_resched();
15530+ return cr0 ^ X86_CR0_WP;
15531+}
15532+#else
15533+static inline unsigned long native_pax_open_kernel(void) { return 0; }
15534+static inline unsigned long native_pax_close_kernel(void) { return 0; }
15535+#endif
15536+
15537 /*
15538 * The following only work if pte_present() is true.
15539 * Undefined behaviour if not..
15540 */
15541+static inline int pte_user(pte_t pte)
15542+{
15543+ return pte_val(pte) & _PAGE_USER;
15544+}
15545+
15546 static inline int pte_dirty(pte_t pte)
15547 {
15548 return pte_flags(pte) & _PAGE_DIRTY;
15549@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
15550 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
15551 }
15552
15553+static inline unsigned long pgd_pfn(pgd_t pgd)
15554+{
15555+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
15556+}
15557+
15558 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
15559
15560 static inline int pmd_large(pmd_t pte)
15561@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
15562 return pte_clear_flags(pte, _PAGE_RW);
15563 }
15564
15565+static inline pte_t pte_mkread(pte_t pte)
15566+{
15567+ return __pte(pte_val(pte) | _PAGE_USER);
15568+}
15569+
15570 static inline pte_t pte_mkexec(pte_t pte)
15571 {
15572- return pte_clear_flags(pte, _PAGE_NX);
15573+#ifdef CONFIG_X86_PAE
15574+ if (__supported_pte_mask & _PAGE_NX)
15575+ return pte_clear_flags(pte, _PAGE_NX);
15576+ else
15577+#endif
15578+ return pte_set_flags(pte, _PAGE_USER);
15579+}
15580+
15581+static inline pte_t pte_exprotect(pte_t pte)
15582+{
15583+#ifdef CONFIG_X86_PAE
15584+ if (__supported_pte_mask & _PAGE_NX)
15585+ return pte_set_flags(pte, _PAGE_NX);
15586+ else
15587+#endif
15588+ return pte_clear_flags(pte, _PAGE_USER);
15589 }
15590
15591 static inline pte_t pte_mkdirty(pte_t pte)
15592@@ -394,6 +459,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
15593 #endif
15594
15595 #ifndef __ASSEMBLY__
15596+
15597+#ifdef CONFIG_PAX_PER_CPU_PGD
15598+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
15599+enum cpu_pgd_type {kernel = 0, user = 1};
15600+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
15601+{
15602+ return cpu_pgd[cpu][type];
15603+}
15604+#endif
15605+
15606 #include <linux/mm_types.h>
15607 #include <linux/log2.h>
15608
15609@@ -529,7 +604,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
15610 * Currently stuck as a macro due to indirect forward reference to
15611 * linux/mmzone.h's __section_mem_map_addr() definition:
15612 */
15613-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
15614+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
15615
15616 /* Find an entry in the second-level page table.. */
15617 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
15618@@ -569,7 +644,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
15619 * Currently stuck as a macro due to indirect forward reference to
15620 * linux/mmzone.h's __section_mem_map_addr() definition:
15621 */
15622-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
15623+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
15624
15625 /* to find an entry in a page-table-directory. */
15626 static inline unsigned long pud_index(unsigned long address)
15627@@ -584,7 +659,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
15628
15629 static inline int pgd_bad(pgd_t pgd)
15630 {
15631- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
15632+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
15633 }
15634
15635 static inline int pgd_none(pgd_t pgd)
15636@@ -607,7 +682,12 @@ static inline int pgd_none(pgd_t pgd)
15637 * pgd_offset() returns a (pgd_t *)
15638 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
15639 */
15640-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
15641+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
15642+
15643+#ifdef CONFIG_PAX_PER_CPU_PGD
15644+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
15645+#endif
15646+
15647 /*
15648 * a shortcut which implies the use of the kernel's pgd, instead
15649 * of a process's
15650@@ -618,6 +698,23 @@ static inline int pgd_none(pgd_t pgd)
15651 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
15652 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
15653
15654+#ifdef CONFIG_X86_32
15655+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
15656+#else
15657+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
15658+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
15659+
15660+#ifdef CONFIG_PAX_MEMORY_UDEREF
15661+#ifdef __ASSEMBLY__
15662+#define pax_user_shadow_base pax_user_shadow_base(%rip)
15663+#else
15664+extern unsigned long pax_user_shadow_base;
15665+extern pgdval_t clone_pgd_mask;
15666+#endif
15667+#endif
15668+
15669+#endif
15670+
15671 #ifndef __ASSEMBLY__
15672
15673 extern int direct_gbpages;
15674@@ -784,11 +881,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
15675 * dst and src can be on the same page, but the range must not overlap,
15676 * and must not cross a page boundary.
15677 */
15678-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
15679+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
15680 {
15681- memcpy(dst, src, count * sizeof(pgd_t));
15682+ pax_open_kernel();
15683+ while (count--)
15684+ *dst++ = *src++;
15685+ pax_close_kernel();
15686 }
15687
15688+#ifdef CONFIG_PAX_PER_CPU_PGD
15689+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
15690+#endif
15691+
15692+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15693+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
15694+#else
15695+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
15696+#endif
15697+
15698 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
15699 static inline int page_level_shift(enum pg_level level)
15700 {
15701diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
15702index 9ee3221..b979c6b 100644
15703--- a/arch/x86/include/asm/pgtable_32.h
15704+++ b/arch/x86/include/asm/pgtable_32.h
15705@@ -25,9 +25,6 @@
15706 struct mm_struct;
15707 struct vm_area_struct;
15708
15709-extern pgd_t swapper_pg_dir[1024];
15710-extern pgd_t initial_page_table[1024];
15711-
15712 static inline void pgtable_cache_init(void) { }
15713 static inline void check_pgt_cache(void) { }
15714 void paging_init(void);
15715@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
15716 # include <asm/pgtable-2level.h>
15717 #endif
15718
15719+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
15720+extern pgd_t initial_page_table[PTRS_PER_PGD];
15721+#ifdef CONFIG_X86_PAE
15722+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
15723+#endif
15724+
15725 #if defined(CONFIG_HIGHPTE)
15726 #define pte_offset_map(dir, address) \
15727 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
15728@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
15729 /* Clear a kernel PTE and flush it from the TLB */
15730 #define kpte_clear_flush(ptep, vaddr) \
15731 do { \
15732+ pax_open_kernel(); \
15733 pte_clear(&init_mm, (vaddr), (ptep)); \
15734+ pax_close_kernel(); \
15735 __flush_tlb_one((vaddr)); \
15736 } while (0)
15737
15738 #endif /* !__ASSEMBLY__ */
15739
15740+#define HAVE_ARCH_UNMAPPED_AREA
15741+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
15742+
15743 /*
15744 * kern_addr_valid() is (1) for FLATMEM and (0) for
15745 * SPARSEMEM and DISCONTIGMEM
15746diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
15747index ed5903b..c7fe163 100644
15748--- a/arch/x86/include/asm/pgtable_32_types.h
15749+++ b/arch/x86/include/asm/pgtable_32_types.h
15750@@ -8,7 +8,7 @@
15751 */
15752 #ifdef CONFIG_X86_PAE
15753 # include <asm/pgtable-3level_types.h>
15754-# define PMD_SIZE (1UL << PMD_SHIFT)
15755+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
15756 # define PMD_MASK (~(PMD_SIZE - 1))
15757 #else
15758 # include <asm/pgtable-2level_types.h>
15759@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
15760 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
15761 #endif
15762
15763+#ifdef CONFIG_PAX_KERNEXEC
15764+#ifndef __ASSEMBLY__
15765+extern unsigned char MODULES_EXEC_VADDR[];
15766+extern unsigned char MODULES_EXEC_END[];
15767+#endif
15768+#include <asm/boot.h>
15769+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
15770+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
15771+#else
15772+#define ktla_ktva(addr) (addr)
15773+#define ktva_ktla(addr) (addr)
15774+#endif
15775+
15776 #define MODULES_VADDR VMALLOC_START
15777 #define MODULES_END VMALLOC_END
15778 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
15779diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
15780index e22c1db..23a625a 100644
15781--- a/arch/x86/include/asm/pgtable_64.h
15782+++ b/arch/x86/include/asm/pgtable_64.h
15783@@ -16,10 +16,14 @@
15784
15785 extern pud_t level3_kernel_pgt[512];
15786 extern pud_t level3_ident_pgt[512];
15787+extern pud_t level3_vmalloc_start_pgt[512];
15788+extern pud_t level3_vmalloc_end_pgt[512];
15789+extern pud_t level3_vmemmap_pgt[512];
15790+extern pud_t level2_vmemmap_pgt[512];
15791 extern pmd_t level2_kernel_pgt[512];
15792 extern pmd_t level2_fixmap_pgt[512];
15793-extern pmd_t level2_ident_pgt[512];
15794-extern pgd_t init_level4_pgt[];
15795+extern pmd_t level2_ident_pgt[512*2];
15796+extern pgd_t init_level4_pgt[512];
15797
15798 #define swapper_pg_dir init_level4_pgt
15799
15800@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
15801
15802 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
15803 {
15804+ pax_open_kernel();
15805 *pmdp = pmd;
15806+ pax_close_kernel();
15807 }
15808
15809 static inline void native_pmd_clear(pmd_t *pmd)
15810@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
15811
15812 static inline void native_set_pud(pud_t *pudp, pud_t pud)
15813 {
15814+ pax_open_kernel();
15815 *pudp = pud;
15816+ pax_close_kernel();
15817 }
15818
15819 static inline void native_pud_clear(pud_t *pud)
15820@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
15821
15822 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
15823 {
15824+ pax_open_kernel();
15825+ *pgdp = pgd;
15826+ pax_close_kernel();
15827+}
15828+
15829+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
15830+{
15831 *pgdp = pgd;
15832 }
15833
15834diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
15835index 2d88344..4679fc3 100644
15836--- a/arch/x86/include/asm/pgtable_64_types.h
15837+++ b/arch/x86/include/asm/pgtable_64_types.h
15838@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
15839 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
15840 #define MODULES_END _AC(0xffffffffff000000, UL)
15841 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
15842+#define MODULES_EXEC_VADDR MODULES_VADDR
15843+#define MODULES_EXEC_END MODULES_END
15844+
15845+#define ktla_ktva(addr) (addr)
15846+#define ktva_ktla(addr) (addr)
15847
15848 #define EARLY_DYNAMIC_PAGE_TABLES 64
15849
15850diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
15851index e642300..0ef8f31 100644
15852--- a/arch/x86/include/asm/pgtable_types.h
15853+++ b/arch/x86/include/asm/pgtable_types.h
15854@@ -16,13 +16,12 @@
15855 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
15856 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
15857 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
15858-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
15859+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
15860 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
15861 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
15862 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
15863-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
15864-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
15865-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
15866+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
15867+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
15868 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
15869
15870 /* If _PAGE_BIT_PRESENT is clear, we use these: */
15871@@ -40,7 +39,6 @@
15872 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
15873 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
15874 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
15875-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
15876 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
15877 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
15878 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
15879@@ -57,8 +55,10 @@
15880
15881 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
15882 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
15883-#else
15884+#elif defined(CONFIG_KMEMCHECK)
15885 #define _PAGE_NX (_AT(pteval_t, 0))
15886+#else
15887+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
15888 #endif
15889
15890 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
15891@@ -116,6 +116,9 @@
15892 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
15893 _PAGE_ACCESSED)
15894
15895+#define PAGE_READONLY_NOEXEC PAGE_READONLY
15896+#define PAGE_SHARED_NOEXEC PAGE_SHARED
15897+
15898 #define __PAGE_KERNEL_EXEC \
15899 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
15900 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
15901@@ -126,7 +129,7 @@
15902 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
15903 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
15904 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
15905-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
15906+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
15907 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
15908 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
15909 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
15910@@ -188,8 +191,8 @@
15911 * bits are combined, this will alow user to access the high address mapped
15912 * VDSO in the presence of CONFIG_COMPAT_VDSO
15913 */
15914-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
15915-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
15916+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15917+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15918 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
15919 #endif
15920
15921@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
15922 {
15923 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
15924 }
15925+#endif
15926
15927+#if PAGETABLE_LEVELS == 3
15928+#include <asm-generic/pgtable-nopud.h>
15929+#endif
15930+
15931+#if PAGETABLE_LEVELS == 2
15932+#include <asm-generic/pgtable-nopmd.h>
15933+#endif
15934+
15935+#ifndef __ASSEMBLY__
15936 #if PAGETABLE_LEVELS > 3
15937 typedef struct { pudval_t pud; } pud_t;
15938
15939@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
15940 return pud.pud;
15941 }
15942 #else
15943-#include <asm-generic/pgtable-nopud.h>
15944-
15945 static inline pudval_t native_pud_val(pud_t pud)
15946 {
15947 return native_pgd_val(pud.pgd);
15948@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
15949 return pmd.pmd;
15950 }
15951 #else
15952-#include <asm-generic/pgtable-nopmd.h>
15953-
15954 static inline pmdval_t native_pmd_val(pmd_t pmd)
15955 {
15956 return native_pgd_val(pmd.pud.pgd);
15957@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
15958
15959 extern pteval_t __supported_pte_mask;
15960 extern void set_nx(void);
15961-extern int nx_enabled;
15962
15963 #define pgprot_writecombine pgprot_writecombine
15964 extern pgprot_t pgprot_writecombine(pgprot_t prot);
15965diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
15966index 22224b3..c5d8d7d 100644
15967--- a/arch/x86/include/asm/processor.h
15968+++ b/arch/x86/include/asm/processor.h
15969@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
15970 : "memory");
15971 }
15972
15973+/* invpcid (%rdx),%rax */
15974+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
15975+
15976+#define INVPCID_SINGLE_ADDRESS 0UL
15977+#define INVPCID_SINGLE_CONTEXT 1UL
15978+#define INVPCID_ALL_GLOBAL 2UL
15979+#define INVPCID_ALL_MONGLOBAL 3UL
15980+
15981+#define PCID_KERNEL 0UL
15982+#define PCID_USER 1UL
15983+#define PCID_NOFLUSH (1UL << 63)
15984+
15985 static inline void load_cr3(pgd_t *pgdir)
15986 {
15987- write_cr3(__pa(pgdir));
15988+ write_cr3(__pa(pgdir) | PCID_KERNEL);
15989 }
15990
15991 #ifdef CONFIG_X86_32
15992@@ -282,7 +294,7 @@ struct tss_struct {
15993
15994 } ____cacheline_aligned;
15995
15996-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
15997+extern struct tss_struct init_tss[NR_CPUS];
15998
15999 /*
16000 * Save the original ist values for checking stack pointers during debugging
16001@@ -452,6 +464,7 @@ struct thread_struct {
16002 unsigned short ds;
16003 unsigned short fsindex;
16004 unsigned short gsindex;
16005+ unsigned short ss;
16006 #endif
16007 #ifdef CONFIG_X86_32
16008 unsigned long ip;
16009@@ -823,11 +836,18 @@ static inline void spin_lock_prefetch(const void *x)
16010 */
16011 #define TASK_SIZE PAGE_OFFSET
16012 #define TASK_SIZE_MAX TASK_SIZE
16013+
16014+#ifdef CONFIG_PAX_SEGMEXEC
16015+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
16016+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
16017+#else
16018 #define STACK_TOP TASK_SIZE
16019-#define STACK_TOP_MAX STACK_TOP
16020+#endif
16021+
16022+#define STACK_TOP_MAX TASK_SIZE
16023
16024 #define INIT_THREAD { \
16025- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16026+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16027 .vm86_info = NULL, \
16028 .sysenter_cs = __KERNEL_CS, \
16029 .io_bitmap_ptr = NULL, \
16030@@ -841,7 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
16031 */
16032 #define INIT_TSS { \
16033 .x86_tss = { \
16034- .sp0 = sizeof(init_stack) + (long)&init_stack, \
16035+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
16036 .ss0 = __KERNEL_DS, \
16037 .ss1 = __KERNEL_CS, \
16038 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
16039@@ -852,11 +872,7 @@ static inline void spin_lock_prefetch(const void *x)
16040 extern unsigned long thread_saved_pc(struct task_struct *tsk);
16041
16042 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
16043-#define KSTK_TOP(info) \
16044-({ \
16045- unsigned long *__ptr = (unsigned long *)(info); \
16046- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
16047-})
16048+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
16049
16050 /*
16051 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
16052@@ -871,7 +887,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16053 #define task_pt_regs(task) \
16054 ({ \
16055 struct pt_regs *__regs__; \
16056- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
16057+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
16058 __regs__ - 1; \
16059 })
16060
16061@@ -881,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16062 /*
16063 * User space process size. 47bits minus one guard page.
16064 */
16065-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
16066+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
16067
16068 /* This decides where the kernel will search for a free chunk of vm
16069 * space during mmap's.
16070 */
16071 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
16072- 0xc0000000 : 0xFFFFe000)
16073+ 0xc0000000 : 0xFFFFf000)
16074
16075 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
16076 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
16077@@ -898,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
16078 #define STACK_TOP_MAX TASK_SIZE_MAX
16079
16080 #define INIT_THREAD { \
16081- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16082+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16083 }
16084
16085 #define INIT_TSS { \
16086- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
16087+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
16088 }
16089
16090 /*
16091@@ -930,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
16092 */
16093 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
16094
16095+#ifdef CONFIG_PAX_SEGMEXEC
16096+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
16097+#endif
16098+
16099 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
16100
16101 /* Get/set a process' ability to use the timestamp counter instruction */
16102@@ -942,7 +962,8 @@ extern int set_tsc_mode(unsigned int val);
16103 extern u16 amd_get_nb_id(int cpu);
16104
16105 struct aperfmperf {
16106- u64 aperf, mperf;
16107+ u64 aperf __intentional_overflow(0);
16108+ u64 mperf __intentional_overflow(0);
16109 };
16110
16111 static inline void get_aperfmperf(struct aperfmperf *am)
16112@@ -970,7 +991,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
16113 return ratio;
16114 }
16115
16116-extern unsigned long arch_align_stack(unsigned long sp);
16117+#define arch_align_stack(x) ((x) & ~0xfUL)
16118 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
16119
16120 void default_idle(void);
16121@@ -980,6 +1001,6 @@ bool xen_set_default_idle(void);
16122 #define xen_set_default_idle 0
16123 #endif
16124
16125-void stop_this_cpu(void *dummy);
16126+void stop_this_cpu(void *dummy) __noreturn;
16127
16128 #endif /* _ASM_X86_PROCESSOR_H */
16129diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
16130index 942a086..6c26446 100644
16131--- a/arch/x86/include/asm/ptrace.h
16132+++ b/arch/x86/include/asm/ptrace.h
16133@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
16134 }
16135
16136 /*
16137- * user_mode_vm(regs) determines whether a register set came from user mode.
16138+ * user_mode(regs) determines whether a register set came from user mode.
16139 * This is true if V8086 mode was enabled OR if the register set was from
16140 * protected mode with RPL-3 CS value. This tricky test checks that with
16141 * one comparison. Many places in the kernel can bypass this full check
16142- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
16143+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
16144+ * be used.
16145 */
16146-static inline int user_mode(struct pt_regs *regs)
16147+static inline int user_mode_novm(struct pt_regs *regs)
16148 {
16149 #ifdef CONFIG_X86_32
16150 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
16151 #else
16152- return !!(regs->cs & 3);
16153+ return !!(regs->cs & SEGMENT_RPL_MASK);
16154 #endif
16155 }
16156
16157-static inline int user_mode_vm(struct pt_regs *regs)
16158+static inline int user_mode(struct pt_regs *regs)
16159 {
16160 #ifdef CONFIG_X86_32
16161 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
16162 USER_RPL;
16163 #else
16164- return user_mode(regs);
16165+ return user_mode_novm(regs);
16166 #endif
16167 }
16168
16169@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
16170 #ifdef CONFIG_X86_64
16171 static inline bool user_64bit_mode(struct pt_regs *regs)
16172 {
16173+ unsigned long cs = regs->cs & 0xffff;
16174 #ifndef CONFIG_PARAVIRT
16175 /*
16176 * On non-paravirt systems, this is the only long mode CPL 3
16177 * selector. We do not allow long mode selectors in the LDT.
16178 */
16179- return regs->cs == __USER_CS;
16180+ return cs == __USER_CS;
16181 #else
16182 /* Headers are too twisted for this to go in paravirt.h. */
16183- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
16184+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
16185 #endif
16186 }
16187
16188@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
16189 * Traps from the kernel do not save sp and ss.
16190 * Use the helper function to retrieve sp.
16191 */
16192- if (offset == offsetof(struct pt_regs, sp) &&
16193- regs->cs == __KERNEL_CS)
16194- return kernel_stack_pointer(regs);
16195+ if (offset == offsetof(struct pt_regs, sp)) {
16196+ unsigned long cs = regs->cs & 0xffff;
16197+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
16198+ return kernel_stack_pointer(regs);
16199+ }
16200 #endif
16201 return *(unsigned long *)((unsigned long)regs + offset);
16202 }
16203diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
16204index 9c6b890..5305f53 100644
16205--- a/arch/x86/include/asm/realmode.h
16206+++ b/arch/x86/include/asm/realmode.h
16207@@ -22,16 +22,14 @@ struct real_mode_header {
16208 #endif
16209 /* APM/BIOS reboot */
16210 u32 machine_real_restart_asm;
16211-#ifdef CONFIG_X86_64
16212 u32 machine_real_restart_seg;
16213-#endif
16214 };
16215
16216 /* This must match data at trampoline_32/64.S */
16217 struct trampoline_header {
16218 #ifdef CONFIG_X86_32
16219 u32 start;
16220- u16 gdt_pad;
16221+ u16 boot_cs;
16222 u16 gdt_limit;
16223 u32 gdt_base;
16224 #else
16225diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
16226index a82c4f1..ac45053 100644
16227--- a/arch/x86/include/asm/reboot.h
16228+++ b/arch/x86/include/asm/reboot.h
16229@@ -6,13 +6,13 @@
16230 struct pt_regs;
16231
16232 struct machine_ops {
16233- void (*restart)(char *cmd);
16234- void (*halt)(void);
16235- void (*power_off)(void);
16236+ void (* __noreturn restart)(char *cmd);
16237+ void (* __noreturn halt)(void);
16238+ void (* __noreturn power_off)(void);
16239 void (*shutdown)(void);
16240 void (*crash_shutdown)(struct pt_regs *);
16241- void (*emergency_restart)(void);
16242-};
16243+ void (* __noreturn emergency_restart)(void);
16244+} __no_const;
16245
16246 extern struct machine_ops machine_ops;
16247
16248diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
16249index cad82c9..2e5c5c1 100644
16250--- a/arch/x86/include/asm/rwsem.h
16251+++ b/arch/x86/include/asm/rwsem.h
16252@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
16253 {
16254 asm volatile("# beginning down_read\n\t"
16255 LOCK_PREFIX _ASM_INC "(%1)\n\t"
16256+
16257+#ifdef CONFIG_PAX_REFCOUNT
16258+ "jno 0f\n"
16259+ LOCK_PREFIX _ASM_DEC "(%1)\n"
16260+ "int $4\n0:\n"
16261+ _ASM_EXTABLE(0b, 0b)
16262+#endif
16263+
16264 /* adds 0x00000001 */
16265 " jns 1f\n"
16266 " call call_rwsem_down_read_failed\n"
16267@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
16268 "1:\n\t"
16269 " mov %1,%2\n\t"
16270 " add %3,%2\n\t"
16271+
16272+#ifdef CONFIG_PAX_REFCOUNT
16273+ "jno 0f\n"
16274+ "sub %3,%2\n"
16275+ "int $4\n0:\n"
16276+ _ASM_EXTABLE(0b, 0b)
16277+#endif
16278+
16279 " jle 2f\n\t"
16280 LOCK_PREFIX " cmpxchg %2,%0\n\t"
16281 " jnz 1b\n\t"
16282@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
16283 long tmp;
16284 asm volatile("# beginning down_write\n\t"
16285 LOCK_PREFIX " xadd %1,(%2)\n\t"
16286+
16287+#ifdef CONFIG_PAX_REFCOUNT
16288+ "jno 0f\n"
16289+ "mov %1,(%2)\n"
16290+ "int $4\n0:\n"
16291+ _ASM_EXTABLE(0b, 0b)
16292+#endif
16293+
16294 /* adds 0xffff0001, returns the old value */
16295 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
16296 /* was the active mask 0 before? */
16297@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
16298 long tmp;
16299 asm volatile("# beginning __up_read\n\t"
16300 LOCK_PREFIX " xadd %1,(%2)\n\t"
16301+
16302+#ifdef CONFIG_PAX_REFCOUNT
16303+ "jno 0f\n"
16304+ "mov %1,(%2)\n"
16305+ "int $4\n0:\n"
16306+ _ASM_EXTABLE(0b, 0b)
16307+#endif
16308+
16309 /* subtracts 1, returns the old value */
16310 " jns 1f\n\t"
16311 " call call_rwsem_wake\n" /* expects old value in %edx */
16312@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
16313 long tmp;
16314 asm volatile("# beginning __up_write\n\t"
16315 LOCK_PREFIX " xadd %1,(%2)\n\t"
16316+
16317+#ifdef CONFIG_PAX_REFCOUNT
16318+ "jno 0f\n"
16319+ "mov %1,(%2)\n"
16320+ "int $4\n0:\n"
16321+ _ASM_EXTABLE(0b, 0b)
16322+#endif
16323+
16324 /* subtracts 0xffff0001, returns the old value */
16325 " jns 1f\n\t"
16326 " call call_rwsem_wake\n" /* expects old value in %edx */
16327@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
16328 {
16329 asm volatile("# beginning __downgrade_write\n\t"
16330 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
16331+
16332+#ifdef CONFIG_PAX_REFCOUNT
16333+ "jno 0f\n"
16334+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
16335+ "int $4\n0:\n"
16336+ _ASM_EXTABLE(0b, 0b)
16337+#endif
16338+
16339 /*
16340 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
16341 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
16342@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
16343 */
16344 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
16345 {
16346- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
16347+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
16348+
16349+#ifdef CONFIG_PAX_REFCOUNT
16350+ "jno 0f\n"
16351+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
16352+ "int $4\n0:\n"
16353+ _ASM_EXTABLE(0b, 0b)
16354+#endif
16355+
16356 : "+m" (sem->count)
16357 : "er" (delta));
16358 }
16359@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
16360 */
16361 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
16362 {
16363- return delta + xadd(&sem->count, delta);
16364+ return delta + xadd_check_overflow(&sem->count, delta);
16365 }
16366
16367 #endif /* __KERNEL__ */
16368diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
16369index c48a950..bc40804 100644
16370--- a/arch/x86/include/asm/segment.h
16371+++ b/arch/x86/include/asm/segment.h
16372@@ -64,10 +64,15 @@
16373 * 26 - ESPFIX small SS
16374 * 27 - per-cpu [ offset to per-cpu data area ]
16375 * 28 - stack_canary-20 [ for stack protector ]
16376- * 29 - unused
16377- * 30 - unused
16378+ * 29 - PCI BIOS CS
16379+ * 30 - PCI BIOS DS
16380 * 31 - TSS for double fault handler
16381 */
16382+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
16383+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
16384+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
16385+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
16386+
16387 #define GDT_ENTRY_TLS_MIN 6
16388 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
16389
16390@@ -79,6 +84,8 @@
16391
16392 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
16393
16394+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
16395+
16396 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
16397
16398 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
16399@@ -104,6 +111,12 @@
16400 #define __KERNEL_STACK_CANARY 0
16401 #endif
16402
16403+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
16404+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
16405+
16406+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
16407+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
16408+
16409 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
16410
16411 /*
16412@@ -141,7 +154,7 @@
16413 */
16414
16415 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
16416-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
16417+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
16418
16419
16420 #else
16421@@ -165,6 +178,8 @@
16422 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
16423 #define __USER32_DS __USER_DS
16424
16425+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
16426+
16427 #define GDT_ENTRY_TSS 8 /* needs two entries */
16428 #define GDT_ENTRY_LDT 10 /* needs two entries */
16429 #define GDT_ENTRY_TLS_MIN 12
16430@@ -173,6 +188,8 @@
16431 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
16432 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
16433
16434+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
16435+
16436 /* TLS indexes for 64bit - hardcoded in arch_prctl */
16437 #define FS_TLS 0
16438 #define GS_TLS 1
16439@@ -180,12 +197,14 @@
16440 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
16441 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
16442
16443-#define GDT_ENTRIES 16
16444+#define GDT_ENTRIES 17
16445
16446 #endif
16447
16448 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
16449+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
16450 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
16451+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
16452 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
16453 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
16454 #ifndef CONFIG_PARAVIRT
16455@@ -265,7 +284,7 @@ static inline unsigned long get_limit(unsigned long segment)
16456 {
16457 unsigned long __limit;
16458 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
16459- return __limit + 1;
16460+ return __limit;
16461 }
16462
16463 #endif /* !__ASSEMBLY__ */
16464diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
16465index 8d3120f..352b440 100644
16466--- a/arch/x86/include/asm/smap.h
16467+++ b/arch/x86/include/asm/smap.h
16468@@ -25,11 +25,40 @@
16469
16470 #include <asm/alternative-asm.h>
16471
16472+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16473+#define ASM_PAX_OPEN_USERLAND \
16474+ 661: jmp 663f; \
16475+ .pushsection .altinstr_replacement, "a" ; \
16476+ 662: pushq %rax; nop; \
16477+ .popsection ; \
16478+ .pushsection .altinstructions, "a" ; \
16479+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
16480+ .popsection ; \
16481+ call __pax_open_userland; \
16482+ popq %rax; \
16483+ 663:
16484+
16485+#define ASM_PAX_CLOSE_USERLAND \
16486+ 661: jmp 663f; \
16487+ .pushsection .altinstr_replacement, "a" ; \
16488+ 662: pushq %rax; nop; \
16489+ .popsection; \
16490+ .pushsection .altinstructions, "a" ; \
16491+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
16492+ .popsection; \
16493+ call __pax_close_userland; \
16494+ popq %rax; \
16495+ 663:
16496+#else
16497+#define ASM_PAX_OPEN_USERLAND
16498+#define ASM_PAX_CLOSE_USERLAND
16499+#endif
16500+
16501 #ifdef CONFIG_X86_SMAP
16502
16503 #define ASM_CLAC \
16504 661: ASM_NOP3 ; \
16505- .pushsection .altinstr_replacement, "ax" ; \
16506+ .pushsection .altinstr_replacement, "a" ; \
16507 662: __ASM_CLAC ; \
16508 .popsection ; \
16509 .pushsection .altinstructions, "a" ; \
16510@@ -38,7 +67,7 @@
16511
16512 #define ASM_STAC \
16513 661: ASM_NOP3 ; \
16514- .pushsection .altinstr_replacement, "ax" ; \
16515+ .pushsection .altinstr_replacement, "a" ; \
16516 662: __ASM_STAC ; \
16517 .popsection ; \
16518 .pushsection .altinstructions, "a" ; \
16519@@ -56,6 +85,37 @@
16520
16521 #include <asm/alternative.h>
16522
16523+#define __HAVE_ARCH_PAX_OPEN_USERLAND
16524+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
16525+
16526+extern void __pax_open_userland(void);
16527+static __always_inline unsigned long pax_open_userland(void)
16528+{
16529+
16530+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16531+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
16532+ :
16533+ : [open] "i" (__pax_open_userland)
16534+ : "memory", "rax");
16535+#endif
16536+
16537+ return 0;
16538+}
16539+
16540+extern void __pax_close_userland(void);
16541+static __always_inline unsigned long pax_close_userland(void)
16542+{
16543+
16544+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16545+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
16546+ :
16547+ : [close] "i" (__pax_close_userland)
16548+ : "memory", "rax");
16549+#endif
16550+
16551+ return 0;
16552+}
16553+
16554 #ifdef CONFIG_X86_SMAP
16555
16556 static __always_inline void clac(void)
16557diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
16558index b073aae..39f9bdd 100644
16559--- a/arch/x86/include/asm/smp.h
16560+++ b/arch/x86/include/asm/smp.h
16561@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
16562 /* cpus sharing the last level cache: */
16563 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
16564 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
16565-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
16566+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
16567
16568 static inline struct cpumask *cpu_sibling_mask(int cpu)
16569 {
16570@@ -79,7 +79,7 @@ struct smp_ops {
16571
16572 void (*send_call_func_ipi)(const struct cpumask *mask);
16573 void (*send_call_func_single_ipi)(int cpu);
16574-};
16575+} __no_const;
16576
16577 /* Globals due to paravirt */
16578 extern void set_cpu_sibling_map(int cpu);
16579@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
16580 extern int safe_smp_processor_id(void);
16581
16582 #elif defined(CONFIG_X86_64_SMP)
16583-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
16584-
16585-#define stack_smp_processor_id() \
16586-({ \
16587- struct thread_info *ti; \
16588- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
16589- ti->cpu; \
16590-})
16591+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
16592+#define stack_smp_processor_id() raw_smp_processor_id()
16593 #define safe_smp_processor_id() smp_processor_id()
16594
16595 #endif
16596diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
16597index 33692ea..350a534 100644
16598--- a/arch/x86/include/asm/spinlock.h
16599+++ b/arch/x86/include/asm/spinlock.h
16600@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
16601 static inline void arch_read_lock(arch_rwlock_t *rw)
16602 {
16603 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
16604+
16605+#ifdef CONFIG_PAX_REFCOUNT
16606+ "jno 0f\n"
16607+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
16608+ "int $4\n0:\n"
16609+ _ASM_EXTABLE(0b, 0b)
16610+#endif
16611+
16612 "jns 1f\n"
16613 "call __read_lock_failed\n\t"
16614 "1:\n"
16615@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
16616 static inline void arch_write_lock(arch_rwlock_t *rw)
16617 {
16618 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
16619+
16620+#ifdef CONFIG_PAX_REFCOUNT
16621+ "jno 0f\n"
16622+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
16623+ "int $4\n0:\n"
16624+ _ASM_EXTABLE(0b, 0b)
16625+#endif
16626+
16627 "jz 1f\n"
16628 "call __write_lock_failed\n\t"
16629 "1:\n"
16630@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
16631
16632 static inline void arch_read_unlock(arch_rwlock_t *rw)
16633 {
16634- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
16635+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
16636+
16637+#ifdef CONFIG_PAX_REFCOUNT
16638+ "jno 0f\n"
16639+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
16640+ "int $4\n0:\n"
16641+ _ASM_EXTABLE(0b, 0b)
16642+#endif
16643+
16644 :"+m" (rw->lock) : : "memory");
16645 }
16646
16647 static inline void arch_write_unlock(arch_rwlock_t *rw)
16648 {
16649- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
16650+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
16651+
16652+#ifdef CONFIG_PAX_REFCOUNT
16653+ "jno 0f\n"
16654+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
16655+ "int $4\n0:\n"
16656+ _ASM_EXTABLE(0b, 0b)
16657+#endif
16658+
16659 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
16660 }
16661
16662diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
16663index 6a99859..03cb807 100644
16664--- a/arch/x86/include/asm/stackprotector.h
16665+++ b/arch/x86/include/asm/stackprotector.h
16666@@ -47,7 +47,7 @@
16667 * head_32 for boot CPU and setup_per_cpu_areas() for others.
16668 */
16669 #define GDT_STACK_CANARY_INIT \
16670- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
16671+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
16672
16673 /*
16674 * Initialize the stackprotector canary value.
16675@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
16676
16677 static inline void load_stack_canary_segment(void)
16678 {
16679-#ifdef CONFIG_X86_32
16680+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16681 asm volatile ("mov %0, %%gs" : : "r" (0));
16682 #endif
16683 }
16684diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
16685index 70bbe39..4ae2bd4 100644
16686--- a/arch/x86/include/asm/stacktrace.h
16687+++ b/arch/x86/include/asm/stacktrace.h
16688@@ -11,28 +11,20 @@
16689
16690 extern int kstack_depth_to_print;
16691
16692-struct thread_info;
16693+struct task_struct;
16694 struct stacktrace_ops;
16695
16696-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
16697- unsigned long *stack,
16698- unsigned long bp,
16699- const struct stacktrace_ops *ops,
16700- void *data,
16701- unsigned long *end,
16702- int *graph);
16703+typedef unsigned long walk_stack_t(struct task_struct *task,
16704+ void *stack_start,
16705+ unsigned long *stack,
16706+ unsigned long bp,
16707+ const struct stacktrace_ops *ops,
16708+ void *data,
16709+ unsigned long *end,
16710+ int *graph);
16711
16712-extern unsigned long
16713-print_context_stack(struct thread_info *tinfo,
16714- unsigned long *stack, unsigned long bp,
16715- const struct stacktrace_ops *ops, void *data,
16716- unsigned long *end, int *graph);
16717-
16718-extern unsigned long
16719-print_context_stack_bp(struct thread_info *tinfo,
16720- unsigned long *stack, unsigned long bp,
16721- const struct stacktrace_ops *ops, void *data,
16722- unsigned long *end, int *graph);
16723+extern walk_stack_t print_context_stack;
16724+extern walk_stack_t print_context_stack_bp;
16725
16726 /* Generic stack tracer with callbacks */
16727
16728@@ -40,7 +32,7 @@ struct stacktrace_ops {
16729 void (*address)(void *data, unsigned long address, int reliable);
16730 /* On negative return stop dumping */
16731 int (*stack)(void *data, char *name);
16732- walk_stack_t walk_stack;
16733+ walk_stack_t *walk_stack;
16734 };
16735
16736 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
16737diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
16738index 4ec45b3..a4f0a8a 100644
16739--- a/arch/x86/include/asm/switch_to.h
16740+++ b/arch/x86/include/asm/switch_to.h
16741@@ -108,7 +108,7 @@ do { \
16742 "call __switch_to\n\t" \
16743 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
16744 __switch_canary \
16745- "movq %P[thread_info](%%rsi),%%r8\n\t" \
16746+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
16747 "movq %%rax,%%rdi\n\t" \
16748 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
16749 "jnz ret_from_fork\n\t" \
16750@@ -119,7 +119,7 @@ do { \
16751 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
16752 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
16753 [_tif_fork] "i" (_TIF_FORK), \
16754- [thread_info] "i" (offsetof(struct task_struct, stack)), \
16755+ [thread_info] "m" (current_tinfo), \
16756 [current_task] "m" (current_task) \
16757 __switch_canary_iparam \
16758 : "memory", "cc" __EXTRA_CLOBBER)
16759diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
16760index a1df6e8..e002940 100644
16761--- a/arch/x86/include/asm/thread_info.h
16762+++ b/arch/x86/include/asm/thread_info.h
16763@@ -10,6 +10,7 @@
16764 #include <linux/compiler.h>
16765 #include <asm/page.h>
16766 #include <asm/types.h>
16767+#include <asm/percpu.h>
16768
16769 /*
16770 * low level task data that entry.S needs immediate access to
16771@@ -23,7 +24,6 @@ struct exec_domain;
16772 #include <linux/atomic.h>
16773
16774 struct thread_info {
16775- struct task_struct *task; /* main task structure */
16776 struct exec_domain *exec_domain; /* execution domain */
16777 __u32 flags; /* low level flags */
16778 __u32 status; /* thread synchronous flags */
16779@@ -33,19 +33,13 @@ struct thread_info {
16780 mm_segment_t addr_limit;
16781 struct restart_block restart_block;
16782 void __user *sysenter_return;
16783-#ifdef CONFIG_X86_32
16784- unsigned long previous_esp; /* ESP of the previous stack in
16785- case of nested (IRQ) stacks
16786- */
16787- __u8 supervisor_stack[0];
16788-#endif
16789+ unsigned long lowest_stack;
16790 unsigned int sig_on_uaccess_error:1;
16791 unsigned int uaccess_err:1; /* uaccess failed */
16792 };
16793
16794-#define INIT_THREAD_INFO(tsk) \
16795+#define INIT_THREAD_INFO \
16796 { \
16797- .task = &tsk, \
16798 .exec_domain = &default_exec_domain, \
16799 .flags = 0, \
16800 .cpu = 0, \
16801@@ -56,7 +50,7 @@ struct thread_info {
16802 }, \
16803 }
16804
16805-#define init_thread_info (init_thread_union.thread_info)
16806+#define init_thread_info (init_thread_union.stack)
16807 #define init_stack (init_thread_union.stack)
16808
16809 #else /* !__ASSEMBLY__ */
16810@@ -97,6 +91,7 @@ struct thread_info {
16811 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
16812 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
16813 #define TIF_X32 30 /* 32-bit native x86-64 binary */
16814+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
16815
16816 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
16817 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
16818@@ -121,17 +116,18 @@ struct thread_info {
16819 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
16820 #define _TIF_ADDR32 (1 << TIF_ADDR32)
16821 #define _TIF_X32 (1 << TIF_X32)
16822+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
16823
16824 /* work to do in syscall_trace_enter() */
16825 #define _TIF_WORK_SYSCALL_ENTRY \
16826 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
16827 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
16828- _TIF_NOHZ)
16829+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
16830
16831 /* work to do in syscall_trace_leave() */
16832 #define _TIF_WORK_SYSCALL_EXIT \
16833 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
16834- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
16835+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
16836
16837 /* work to do on interrupt/exception return */
16838 #define _TIF_WORK_MASK \
16839@@ -142,7 +138,7 @@ struct thread_info {
16840 /* work to do on any return to user space */
16841 #define _TIF_ALLWORK_MASK \
16842 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
16843- _TIF_NOHZ)
16844+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
16845
16846 /* Only used for 64 bit */
16847 #define _TIF_DO_NOTIFY_MASK \
16848@@ -158,45 +154,40 @@ struct thread_info {
16849
16850 #define PREEMPT_ACTIVE 0x10000000
16851
16852-#ifdef CONFIG_X86_32
16853-
16854-#define STACK_WARN (THREAD_SIZE/8)
16855-/*
16856- * macros/functions for gaining access to the thread information structure
16857- *
16858- * preempt_count needs to be 1 initially, until the scheduler is functional.
16859- */
16860-#ifndef __ASSEMBLY__
16861-
16862-
16863-/* how to get the current stack pointer from C */
16864-register unsigned long current_stack_pointer asm("esp") __used;
16865-
16866-/* how to get the thread information struct from C */
16867-static inline struct thread_info *current_thread_info(void)
16868-{
16869- return (struct thread_info *)
16870- (current_stack_pointer & ~(THREAD_SIZE - 1));
16871-}
16872-
16873-#else /* !__ASSEMBLY__ */
16874-
16875+#ifdef __ASSEMBLY__
16876 /* how to get the thread information struct from ASM */
16877 #define GET_THREAD_INFO(reg) \
16878- movl $-THREAD_SIZE, reg; \
16879- andl %esp, reg
16880+ mov PER_CPU_VAR(current_tinfo), reg
16881
16882 /* use this one if reg already contains %esp */
16883-#define GET_THREAD_INFO_WITH_ESP(reg) \
16884- andl $-THREAD_SIZE, reg
16885+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
16886+#else
16887+/* how to get the thread information struct from C */
16888+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
16889+
16890+static __always_inline struct thread_info *current_thread_info(void)
16891+{
16892+ return this_cpu_read_stable(current_tinfo);
16893+}
16894+#endif
16895+
16896+#ifdef CONFIG_X86_32
16897+
16898+#define STACK_WARN (THREAD_SIZE/8)
16899+/*
16900+ * macros/functions for gaining access to the thread information structure
16901+ *
16902+ * preempt_count needs to be 1 initially, until the scheduler is functional.
16903+ */
16904+#ifndef __ASSEMBLY__
16905+
16906+/* how to get the current stack pointer from C */
16907+register unsigned long current_stack_pointer asm("esp") __used;
16908
16909 #endif
16910
16911 #else /* X86_32 */
16912
16913-#include <asm/percpu.h>
16914-#define KERNEL_STACK_OFFSET (5*8)
16915-
16916 /*
16917 * macros/functions for gaining access to the thread information structure
16918 * preempt_count needs to be 1 initially, until the scheduler is functional.
16919@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
16920 #ifndef __ASSEMBLY__
16921 DECLARE_PER_CPU(unsigned long, kernel_stack);
16922
16923-static inline struct thread_info *current_thread_info(void)
16924-{
16925- struct thread_info *ti;
16926- ti = (void *)(this_cpu_read_stable(kernel_stack) +
16927- KERNEL_STACK_OFFSET - THREAD_SIZE);
16928- return ti;
16929-}
16930-
16931-#else /* !__ASSEMBLY__ */
16932-
16933-/* how to get the thread information struct from ASM */
16934-#define GET_THREAD_INFO(reg) \
16935- movq PER_CPU_VAR(kernel_stack),reg ; \
16936- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
16937-
16938-/*
16939- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
16940- * a certain register (to be used in assembler memory operands).
16941- */
16942-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
16943-
16944+/* how to get the current stack pointer from C */
16945+register unsigned long current_stack_pointer asm("rsp") __used;
16946 #endif
16947
16948 #endif /* !X86_32 */
16949@@ -283,5 +255,12 @@ static inline bool is_ia32_task(void)
16950 extern void arch_task_cache_init(void);
16951 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
16952 extern void arch_release_task_struct(struct task_struct *tsk);
16953+
16954+#define __HAVE_THREAD_FUNCTIONS
16955+#define task_thread_info(task) (&(task)->tinfo)
16956+#define task_stack_page(task) ((task)->stack)
16957+#define setup_thread_stack(p, org) do {} while (0)
16958+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
16959+
16960 #endif
16961 #endif /* _ASM_X86_THREAD_INFO_H */
16962diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
16963index 50a7fc0..d00c622 100644
16964--- a/arch/x86/include/asm/tlbflush.h
16965+++ b/arch/x86/include/asm/tlbflush.h
16966@@ -17,18 +17,39 @@
16967
16968 static inline void __native_flush_tlb(void)
16969 {
16970- native_write_cr3(native_read_cr3());
16971+
16972+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16973+ if (static_cpu_has(X86_FEATURE_PCID)) {
16974+ unsigned int cpu = raw_get_cpu();
16975+
16976+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16977+ unsigned long descriptor[2];
16978+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
16979+ } else {
16980+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16981+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16982+ }
16983+ raw_put_cpu_no_resched();
16984+ } else
16985+#endif
16986+
16987+ native_write_cr3(native_read_cr3());
16988 }
16989
16990 static inline void __native_flush_tlb_global_irq_disabled(void)
16991 {
16992- unsigned long cr4;
16993+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16994+ unsigned long descriptor[2];
16995+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
16996+ } else {
16997+ unsigned long cr4;
16998
16999- cr4 = native_read_cr4();
17000- /* clear PGE */
17001- native_write_cr4(cr4 & ~X86_CR4_PGE);
17002- /* write old PGE again and flush TLBs */
17003- native_write_cr4(cr4);
17004+ cr4 = native_read_cr4();
17005+ /* clear PGE */
17006+ native_write_cr4(cr4 & ~X86_CR4_PGE);
17007+ /* write old PGE again and flush TLBs */
17008+ native_write_cr4(cr4);
17009+ }
17010 }
17011
17012 static inline void __native_flush_tlb_global(void)
17013@@ -49,7 +70,33 @@ static inline void __native_flush_tlb_global(void)
17014
17015 static inline void __native_flush_tlb_single(unsigned long addr)
17016 {
17017- asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17018+
17019+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17020+ if (static_cpu_has(X86_FEATURE_PCID) && addr < TASK_SIZE_MAX) {
17021+ unsigned int cpu = raw_get_cpu();
17022+
17023+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17024+ unsigned long descriptor[2];
17025+ descriptor[0] = PCID_USER;
17026+ descriptor[1] = addr;
17027+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17028+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17029+ descriptor[0] = PCID_KERNEL;
17030+ descriptor[1] = addr + pax_user_shadow_base;
17031+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
17032+ }
17033+ } else {
17034+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
17035+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17036+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17037+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF))
17038+ asm volatile("invlpg (%0)" ::"r" (addr + pax_user_shadow_base) : "memory");
17039+ }
17040+ raw_put_cpu_no_resched();
17041+ } else
17042+#endif
17043+
17044+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
17045 }
17046
17047 static inline void __flush_tlb_all(void)
17048diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
17049index 5ee2687..74590b9 100644
17050--- a/arch/x86/include/asm/uaccess.h
17051+++ b/arch/x86/include/asm/uaccess.h
17052@@ -7,6 +7,7 @@
17053 #include <linux/compiler.h>
17054 #include <linux/thread_info.h>
17055 #include <linux/string.h>
17056+#include <linux/sched.h>
17057 #include <asm/asm.h>
17058 #include <asm/page.h>
17059 #include <asm/smap.h>
17060@@ -29,7 +30,12 @@
17061
17062 #define get_ds() (KERNEL_DS)
17063 #define get_fs() (current_thread_info()->addr_limit)
17064+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17065+void __set_fs(mm_segment_t x);
17066+void set_fs(mm_segment_t x);
17067+#else
17068 #define set_fs(x) (current_thread_info()->addr_limit = (x))
17069+#endif
17070
17071 #define segment_eq(a, b) ((a).seg == (b).seg)
17072
17073@@ -77,8 +83,33 @@
17074 * checks that the pointer is in the user space range - after calling
17075 * this function, memory access functions may still return -EFAULT.
17076 */
17077-#define access_ok(type, addr, size) \
17078- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17079+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
17080+#define access_ok(type, addr, size) \
17081+({ \
17082+ long __size = size; \
17083+ unsigned long __addr = (unsigned long)addr; \
17084+ unsigned long __addr_ao = __addr & PAGE_MASK; \
17085+ unsigned long __end_ao = __addr + __size - 1; \
17086+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
17087+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
17088+ while(__addr_ao <= __end_ao) { \
17089+ char __c_ao; \
17090+ __addr_ao += PAGE_SIZE; \
17091+ if (__size > PAGE_SIZE) \
17092+ cond_resched(); \
17093+ if (__get_user(__c_ao, (char __user *)__addr)) \
17094+ break; \
17095+ if (type != VERIFY_WRITE) { \
17096+ __addr = __addr_ao; \
17097+ continue; \
17098+ } \
17099+ if (__put_user(__c_ao, (char __user *)__addr)) \
17100+ break; \
17101+ __addr = __addr_ao; \
17102+ } \
17103+ } \
17104+ __ret_ao; \
17105+})
17106
17107 /*
17108 * The exception table consists of pairs of addresses relative to the
17109@@ -165,10 +196,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17110 register __inttype(*(ptr)) __val_gu asm("%edx"); \
17111 __chk_user_ptr(ptr); \
17112 might_fault(); \
17113+ pax_open_userland(); \
17114 asm volatile("call __get_user_%P3" \
17115 : "=a" (__ret_gu), "=r" (__val_gu) \
17116 : "0" (ptr), "i" (sizeof(*(ptr)))); \
17117 (x) = (__typeof__(*(ptr))) __val_gu; \
17118+ pax_close_userland(); \
17119 __ret_gu; \
17120 })
17121
17122@@ -176,13 +209,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17123 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
17124 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
17125
17126-
17127+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17128+#define __copyuser_seg "gs;"
17129+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
17130+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
17131+#else
17132+#define __copyuser_seg
17133+#define __COPYUSER_SET_ES
17134+#define __COPYUSER_RESTORE_ES
17135+#endif
17136
17137 #ifdef CONFIG_X86_32
17138 #define __put_user_asm_u64(x, addr, err, errret) \
17139 asm volatile(ASM_STAC "\n" \
17140- "1: movl %%eax,0(%2)\n" \
17141- "2: movl %%edx,4(%2)\n" \
17142+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
17143+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
17144 "3: " ASM_CLAC "\n" \
17145 ".section .fixup,\"ax\"\n" \
17146 "4: movl %3,%0\n" \
17147@@ -195,8 +236,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
17148
17149 #define __put_user_asm_ex_u64(x, addr) \
17150 asm volatile(ASM_STAC "\n" \
17151- "1: movl %%eax,0(%1)\n" \
17152- "2: movl %%edx,4(%1)\n" \
17153+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
17154+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
17155 "3: " ASM_CLAC "\n" \
17156 _ASM_EXTABLE_EX(1b, 2b) \
17157 _ASM_EXTABLE_EX(2b, 3b) \
17158@@ -246,7 +287,8 @@ extern void __put_user_8(void);
17159 __typeof__(*(ptr)) __pu_val; \
17160 __chk_user_ptr(ptr); \
17161 might_fault(); \
17162- __pu_val = x; \
17163+ __pu_val = (x); \
17164+ pax_open_userland(); \
17165 switch (sizeof(*(ptr))) { \
17166 case 1: \
17167 __put_user_x(1, __pu_val, ptr, __ret_pu); \
17168@@ -264,6 +306,7 @@ extern void __put_user_8(void);
17169 __put_user_x(X, __pu_val, ptr, __ret_pu); \
17170 break; \
17171 } \
17172+ pax_close_userland(); \
17173 __ret_pu; \
17174 })
17175
17176@@ -344,8 +387,10 @@ do { \
17177 } while (0)
17178
17179 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17180+do { \
17181+ pax_open_userland(); \
17182 asm volatile(ASM_STAC "\n" \
17183- "1: mov"itype" %2,%"rtype"1\n" \
17184+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
17185 "2: " ASM_CLAC "\n" \
17186 ".section .fixup,\"ax\"\n" \
17187 "3: mov %3,%0\n" \
17188@@ -353,8 +398,10 @@ do { \
17189 " jmp 2b\n" \
17190 ".previous\n" \
17191 _ASM_EXTABLE(1b, 3b) \
17192- : "=r" (err), ltype(x) \
17193- : "m" (__m(addr)), "i" (errret), "0" (err))
17194+ : "=r" (err), ltype (x) \
17195+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
17196+ pax_close_userland(); \
17197+} while (0)
17198
17199 #define __get_user_size_ex(x, ptr, size) \
17200 do { \
17201@@ -378,7 +425,7 @@ do { \
17202 } while (0)
17203
17204 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
17205- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
17206+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
17207 "2:\n" \
17208 _ASM_EXTABLE_EX(1b, 2b) \
17209 : ltype(x) : "m" (__m(addr)))
17210@@ -395,13 +442,24 @@ do { \
17211 int __gu_err; \
17212 unsigned long __gu_val; \
17213 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
17214- (x) = (__force __typeof__(*(ptr)))__gu_val; \
17215+ (x) = (__typeof__(*(ptr)))__gu_val; \
17216 __gu_err; \
17217 })
17218
17219 /* FIXME: this hack is definitely wrong -AK */
17220 struct __large_struct { unsigned long buf[100]; };
17221-#define __m(x) (*(struct __large_struct __user *)(x))
17222+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17223+#define ____m(x) \
17224+({ \
17225+ unsigned long ____x = (unsigned long)(x); \
17226+ if (____x < pax_user_shadow_base) \
17227+ ____x += pax_user_shadow_base; \
17228+ (typeof(x))____x; \
17229+})
17230+#else
17231+#define ____m(x) (x)
17232+#endif
17233+#define __m(x) (*(struct __large_struct __user *)____m(x))
17234
17235 /*
17236 * Tell gcc we read from memory instead of writing: this is because
17237@@ -409,8 +467,10 @@ struct __large_struct { unsigned long buf[100]; };
17238 * aliasing issues.
17239 */
17240 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
17241+do { \
17242+ pax_open_userland(); \
17243 asm volatile(ASM_STAC "\n" \
17244- "1: mov"itype" %"rtype"1,%2\n" \
17245+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
17246 "2: " ASM_CLAC "\n" \
17247 ".section .fixup,\"ax\"\n" \
17248 "3: mov %3,%0\n" \
17249@@ -418,10 +478,12 @@ struct __large_struct { unsigned long buf[100]; };
17250 ".previous\n" \
17251 _ASM_EXTABLE(1b, 3b) \
17252 : "=r"(err) \
17253- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
17254+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
17255+ pax_close_userland(); \
17256+} while (0)
17257
17258 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
17259- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
17260+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
17261 "2:\n" \
17262 _ASM_EXTABLE_EX(1b, 2b) \
17263 : : ltype(x), "m" (__m(addr)))
17264@@ -431,11 +493,13 @@ struct __large_struct { unsigned long buf[100]; };
17265 */
17266 #define uaccess_try do { \
17267 current_thread_info()->uaccess_err = 0; \
17268+ pax_open_userland(); \
17269 stac(); \
17270 barrier();
17271
17272 #define uaccess_catch(err) \
17273 clac(); \
17274+ pax_close_userland(); \
17275 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
17276 } while (0)
17277
17278@@ -460,8 +524,12 @@ struct __large_struct { unsigned long buf[100]; };
17279 * On error, the variable @x is set to zero.
17280 */
17281
17282+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17283+#define __get_user(x, ptr) get_user((x), (ptr))
17284+#else
17285 #define __get_user(x, ptr) \
17286 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
17287+#endif
17288
17289 /**
17290 * __put_user: - Write a simple value into user space, with less checking.
17291@@ -483,8 +551,12 @@ struct __large_struct { unsigned long buf[100]; };
17292 * Returns zero on success, or -EFAULT on error.
17293 */
17294
17295+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17296+#define __put_user(x, ptr) put_user((x), (ptr))
17297+#else
17298 #define __put_user(x, ptr) \
17299 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
17300+#endif
17301
17302 #define __get_user_unaligned __get_user
17303 #define __put_user_unaligned __put_user
17304@@ -502,7 +574,7 @@ struct __large_struct { unsigned long buf[100]; };
17305 #define get_user_ex(x, ptr) do { \
17306 unsigned long __gue_val; \
17307 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
17308- (x) = (__force __typeof__(*(ptr)))__gue_val; \
17309+ (x) = (__typeof__(*(ptr)))__gue_val; \
17310 } while (0)
17311
17312 #define put_user_try uaccess_try
17313@@ -519,8 +591,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
17314 extern __must_check long strlen_user(const char __user *str);
17315 extern __must_check long strnlen_user(const char __user *str, long n);
17316
17317-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
17318-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
17319+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
17320+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
17321
17322 /*
17323 * movsl can be slow when source and dest are not both 8-byte aligned
17324diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
17325index 7f760a9..04b1c65 100644
17326--- a/arch/x86/include/asm/uaccess_32.h
17327+++ b/arch/x86/include/asm/uaccess_32.h
17328@@ -11,15 +11,15 @@
17329 #include <asm/page.h>
17330
17331 unsigned long __must_check __copy_to_user_ll
17332- (void __user *to, const void *from, unsigned long n);
17333+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
17334 unsigned long __must_check __copy_from_user_ll
17335- (void *to, const void __user *from, unsigned long n);
17336+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
17337 unsigned long __must_check __copy_from_user_ll_nozero
17338- (void *to, const void __user *from, unsigned long n);
17339+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
17340 unsigned long __must_check __copy_from_user_ll_nocache
17341- (void *to, const void __user *from, unsigned long n);
17342+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
17343 unsigned long __must_check __copy_from_user_ll_nocache_nozero
17344- (void *to, const void __user *from, unsigned long n);
17345+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
17346
17347 /**
17348 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
17349@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
17350 static __always_inline unsigned long __must_check
17351 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
17352 {
17353+ if ((long)n < 0)
17354+ return n;
17355+
17356+ check_object_size(from, n, true);
17357+
17358 if (__builtin_constant_p(n)) {
17359 unsigned long ret;
17360
17361@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
17362 __copy_to_user(void __user *to, const void *from, unsigned long n)
17363 {
17364 might_fault();
17365+
17366 return __copy_to_user_inatomic(to, from, n);
17367 }
17368
17369 static __always_inline unsigned long
17370 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
17371 {
17372+ if ((long)n < 0)
17373+ return n;
17374+
17375 /* Avoid zeroing the tail if the copy fails..
17376 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
17377 * but as the zeroing behaviour is only significant when n is not
17378@@ -137,6 +146,12 @@ static __always_inline unsigned long
17379 __copy_from_user(void *to, const void __user *from, unsigned long n)
17380 {
17381 might_fault();
17382+
17383+ if ((long)n < 0)
17384+ return n;
17385+
17386+ check_object_size(to, n, false);
17387+
17388 if (__builtin_constant_p(n)) {
17389 unsigned long ret;
17390
17391@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
17392 const void __user *from, unsigned long n)
17393 {
17394 might_fault();
17395+
17396+ if ((long)n < 0)
17397+ return n;
17398+
17399 if (__builtin_constant_p(n)) {
17400 unsigned long ret;
17401
17402@@ -181,15 +200,19 @@ static __always_inline unsigned long
17403 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
17404 unsigned long n)
17405 {
17406- return __copy_from_user_ll_nocache_nozero(to, from, n);
17407+ if ((long)n < 0)
17408+ return n;
17409+
17410+ return __copy_from_user_ll_nocache_nozero(to, from, n);
17411 }
17412
17413-unsigned long __must_check copy_to_user(void __user *to,
17414- const void *from, unsigned long n);
17415-unsigned long __must_check _copy_from_user(void *to,
17416- const void __user *from,
17417- unsigned long n);
17418-
17419+extern void copy_to_user_overflow(void)
17420+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
17421+ __compiletime_error("copy_to_user() buffer size is not provably correct")
17422+#else
17423+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
17424+#endif
17425+;
17426
17427 extern void copy_from_user_overflow(void)
17428 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
17429@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
17430 #endif
17431 ;
17432
17433-static inline unsigned long __must_check copy_from_user(void *to,
17434- const void __user *from,
17435- unsigned long n)
17436+/**
17437+ * copy_to_user: - Copy a block of data into user space.
17438+ * @to: Destination address, in user space.
17439+ * @from: Source address, in kernel space.
17440+ * @n: Number of bytes to copy.
17441+ *
17442+ * Context: User context only. This function may sleep.
17443+ *
17444+ * Copy data from kernel space to user space.
17445+ *
17446+ * Returns number of bytes that could not be copied.
17447+ * On success, this will be zero.
17448+ */
17449+static inline unsigned long __must_check
17450+copy_to_user(void __user *to, const void *from, unsigned long n)
17451 {
17452- int sz = __compiletime_object_size(to);
17453+ size_t sz = __compiletime_object_size(from);
17454
17455- if (likely(sz == -1 || sz >= n))
17456- n = _copy_from_user(to, from, n);
17457- else
17458+ if (unlikely(sz != (size_t)-1 && sz < n))
17459+ copy_to_user_overflow();
17460+ else if (access_ok(VERIFY_WRITE, to, n))
17461+ n = __copy_to_user(to, from, n);
17462+ return n;
17463+}
17464+
17465+/**
17466+ * copy_from_user: - Copy a block of data from user space.
17467+ * @to: Destination address, in kernel space.
17468+ * @from: Source address, in user space.
17469+ * @n: Number of bytes to copy.
17470+ *
17471+ * Context: User context only. This function may sleep.
17472+ *
17473+ * Copy data from user space to kernel space.
17474+ *
17475+ * Returns number of bytes that could not be copied.
17476+ * On success, this will be zero.
17477+ *
17478+ * If some data could not be copied, this function will pad the copied
17479+ * data to the requested size using zero bytes.
17480+ */
17481+static inline unsigned long __must_check
17482+copy_from_user(void *to, const void __user *from, unsigned long n)
17483+{
17484+ size_t sz = __compiletime_object_size(to);
17485+
17486+ check_object_size(to, n, false);
17487+
17488+ if (unlikely(sz != (size_t)-1 && sz < n))
17489 copy_from_user_overflow();
17490-
17491+ else if (access_ok(VERIFY_READ, from, n))
17492+ n = __copy_from_user(to, from, n);
17493+ else if ((long)n > 0)
17494+ memset(to, 0, n);
17495 return n;
17496 }
17497
17498diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
17499index 142810c..1f2a0a7 100644
17500--- a/arch/x86/include/asm/uaccess_64.h
17501+++ b/arch/x86/include/asm/uaccess_64.h
17502@@ -10,6 +10,9 @@
17503 #include <asm/alternative.h>
17504 #include <asm/cpufeature.h>
17505 #include <asm/page.h>
17506+#include <asm/pgtable.h>
17507+
17508+#define set_fs(x) (current_thread_info()->addr_limit = (x))
17509
17510 /*
17511 * Copy To/From Userspace
17512@@ -17,13 +20,13 @@
17513
17514 /* Handles exceptions in both to and from, but doesn't do access_ok */
17515 __must_check unsigned long
17516-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
17517+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
17518 __must_check unsigned long
17519-copy_user_generic_string(void *to, const void *from, unsigned len);
17520+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
17521 __must_check unsigned long
17522-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
17523+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
17524
17525-static __always_inline __must_check unsigned long
17526+static __always_inline __must_check __size_overflow(3) unsigned long
17527 copy_user_generic(void *to, const void *from, unsigned len)
17528 {
17529 unsigned ret;
17530@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
17531 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
17532 "=d" (len)),
17533 "1" (to), "2" (from), "3" (len)
17534- : "memory", "rcx", "r8", "r9", "r10", "r11");
17535+ : "memory", "rcx", "r8", "r9", "r11");
17536 return ret;
17537 }
17538
17539+static __always_inline __must_check unsigned long
17540+__copy_to_user(void __user *to, const void *from, unsigned long len);
17541+static __always_inline __must_check unsigned long
17542+__copy_from_user(void *to, const void __user *from, unsigned long len);
17543 __must_check unsigned long
17544-_copy_to_user(void __user *to, const void *from, unsigned len);
17545-__must_check unsigned long
17546-_copy_from_user(void *to, const void __user *from, unsigned len);
17547-__must_check unsigned long
17548-copy_in_user(void __user *to, const void __user *from, unsigned len);
17549+copy_in_user(void __user *to, const void __user *from, unsigned long len);
17550+
17551+extern void copy_to_user_overflow(void)
17552+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
17553+ __compiletime_error("copy_to_user() buffer size is not provably correct")
17554+#else
17555+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
17556+#endif
17557+;
17558+
17559+extern void copy_from_user_overflow(void)
17560+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
17561+ __compiletime_error("copy_from_user() buffer size is not provably correct")
17562+#else
17563+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
17564+#endif
17565+;
17566
17567 static inline unsigned long __must_check copy_from_user(void *to,
17568 const void __user *from,
17569 unsigned long n)
17570 {
17571- int sz = __compiletime_object_size(to);
17572-
17573 might_fault();
17574- if (likely(sz == -1 || sz >= n))
17575- n = _copy_from_user(to, from, n);
17576-#ifdef CONFIG_DEBUG_VM
17577- else
17578- WARN(1, "Buffer overflow detected!\n");
17579-#endif
17580+
17581+ check_object_size(to, n, false);
17582+
17583+ if (access_ok(VERIFY_READ, from, n))
17584+ n = __copy_from_user(to, from, n);
17585+ else if (n < INT_MAX)
17586+ memset(to, 0, n);
17587 return n;
17588 }
17589
17590 static __always_inline __must_check
17591-int copy_to_user(void __user *dst, const void *src, unsigned size)
17592+int copy_to_user(void __user *dst, const void *src, unsigned long size)
17593 {
17594 might_fault();
17595
17596- return _copy_to_user(dst, src, size);
17597+ if (access_ok(VERIFY_WRITE, dst, size))
17598+ size = __copy_to_user(dst, src, size);
17599+ return size;
17600 }
17601
17602 static __always_inline __must_check
17603-int __copy_from_user(void *dst, const void __user *src, unsigned size)
17604+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
17605 {
17606- int ret = 0;
17607+ size_t sz = __compiletime_object_size(dst);
17608+ unsigned ret = 0;
17609
17610 might_fault();
17611+
17612+ if (size > INT_MAX)
17613+ return size;
17614+
17615+ check_object_size(dst, size, false);
17616+
17617+#ifdef CONFIG_PAX_MEMORY_UDEREF
17618+ if (!__access_ok(VERIFY_READ, src, size))
17619+ return size;
17620+#endif
17621+
17622+ if (unlikely(sz != (size_t)-1 && sz < size)) {
17623+ copy_from_user_overflow();
17624+ return size;
17625+ }
17626+
17627 if (!__builtin_constant_p(size))
17628- return copy_user_generic(dst, (__force void *)src, size);
17629+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17630 switch (size) {
17631- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
17632+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
17633 ret, "b", "b", "=q", 1);
17634 return ret;
17635- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
17636+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
17637 ret, "w", "w", "=r", 2);
17638 return ret;
17639- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
17640+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
17641 ret, "l", "k", "=r", 4);
17642 return ret;
17643- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
17644+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17645 ret, "q", "", "=r", 8);
17646 return ret;
17647 case 10:
17648- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
17649+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17650 ret, "q", "", "=r", 10);
17651 if (unlikely(ret))
17652 return ret;
17653 __get_user_asm(*(u16 *)(8 + (char *)dst),
17654- (u16 __user *)(8 + (char __user *)src),
17655+ (const u16 __user *)(8 + (const char __user *)src),
17656 ret, "w", "w", "=r", 2);
17657 return ret;
17658 case 16:
17659- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
17660+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
17661 ret, "q", "", "=r", 16);
17662 if (unlikely(ret))
17663 return ret;
17664 __get_user_asm(*(u64 *)(8 + (char *)dst),
17665- (u64 __user *)(8 + (char __user *)src),
17666+ (const u64 __user *)(8 + (const char __user *)src),
17667 ret, "q", "", "=r", 8);
17668 return ret;
17669 default:
17670- return copy_user_generic(dst, (__force void *)src, size);
17671+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17672 }
17673 }
17674
17675 static __always_inline __must_check
17676-int __copy_to_user(void __user *dst, const void *src, unsigned size)
17677+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
17678 {
17679- int ret = 0;
17680+ size_t sz = __compiletime_object_size(src);
17681+ unsigned ret = 0;
17682
17683 might_fault();
17684+
17685+ if (size > INT_MAX)
17686+ return size;
17687+
17688+ check_object_size(src, size, true);
17689+
17690+#ifdef CONFIG_PAX_MEMORY_UDEREF
17691+ if (!__access_ok(VERIFY_WRITE, dst, size))
17692+ return size;
17693+#endif
17694+
17695+ if (unlikely(sz != (size_t)-1 && sz < size)) {
17696+ copy_to_user_overflow();
17697+ return size;
17698+ }
17699+
17700 if (!__builtin_constant_p(size))
17701- return copy_user_generic((__force void *)dst, src, size);
17702+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17703 switch (size) {
17704- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
17705+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
17706 ret, "b", "b", "iq", 1);
17707 return ret;
17708- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
17709+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
17710 ret, "w", "w", "ir", 2);
17711 return ret;
17712- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
17713+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
17714 ret, "l", "k", "ir", 4);
17715 return ret;
17716- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
17717+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17718 ret, "q", "", "er", 8);
17719 return ret;
17720 case 10:
17721- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
17722+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17723 ret, "q", "", "er", 10);
17724 if (unlikely(ret))
17725 return ret;
17726 asm("":::"memory");
17727- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
17728+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
17729 ret, "w", "w", "ir", 2);
17730 return ret;
17731 case 16:
17732- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
17733+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
17734 ret, "q", "", "er", 16);
17735 if (unlikely(ret))
17736 return ret;
17737 asm("":::"memory");
17738- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
17739+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
17740 ret, "q", "", "er", 8);
17741 return ret;
17742 default:
17743- return copy_user_generic((__force void *)dst, src, size);
17744+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17745 }
17746 }
17747
17748 static __always_inline __must_check
17749-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17750+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
17751 {
17752- int ret = 0;
17753+ unsigned ret = 0;
17754
17755 might_fault();
17756+
17757+ if (size > INT_MAX)
17758+ return size;
17759+
17760+#ifdef CONFIG_PAX_MEMORY_UDEREF
17761+ if (!__access_ok(VERIFY_READ, src, size))
17762+ return size;
17763+ if (!__access_ok(VERIFY_WRITE, dst, size))
17764+ return size;
17765+#endif
17766+
17767 if (!__builtin_constant_p(size))
17768- return copy_user_generic((__force void *)dst,
17769- (__force void *)src, size);
17770+ return copy_user_generic((__force_kernel void *)____m(dst),
17771+ (__force_kernel const void *)____m(src), size);
17772 switch (size) {
17773 case 1: {
17774 u8 tmp;
17775- __get_user_asm(tmp, (u8 __user *)src,
17776+ __get_user_asm(tmp, (const u8 __user *)src,
17777 ret, "b", "b", "=q", 1);
17778 if (likely(!ret))
17779 __put_user_asm(tmp, (u8 __user *)dst,
17780@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17781 }
17782 case 2: {
17783 u16 tmp;
17784- __get_user_asm(tmp, (u16 __user *)src,
17785+ __get_user_asm(tmp, (const u16 __user *)src,
17786 ret, "w", "w", "=r", 2);
17787 if (likely(!ret))
17788 __put_user_asm(tmp, (u16 __user *)dst,
17789@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17790
17791 case 4: {
17792 u32 tmp;
17793- __get_user_asm(tmp, (u32 __user *)src,
17794+ __get_user_asm(tmp, (const u32 __user *)src,
17795 ret, "l", "k", "=r", 4);
17796 if (likely(!ret))
17797 __put_user_asm(tmp, (u32 __user *)dst,
17798@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17799 }
17800 case 8: {
17801 u64 tmp;
17802- __get_user_asm(tmp, (u64 __user *)src,
17803+ __get_user_asm(tmp, (const u64 __user *)src,
17804 ret, "q", "", "=r", 8);
17805 if (likely(!ret))
17806 __put_user_asm(tmp, (u64 __user *)dst,
17807@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
17808 return ret;
17809 }
17810 default:
17811- return copy_user_generic((__force void *)dst,
17812- (__force void *)src, size);
17813+ return copy_user_generic((__force_kernel void *)____m(dst),
17814+ (__force_kernel const void *)____m(src), size);
17815 }
17816 }
17817
17818 static __must_check __always_inline int
17819-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
17820+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
17821 {
17822- return copy_user_generic(dst, (__force const void *)src, size);
17823+ if (size > INT_MAX)
17824+ return size;
17825+
17826+#ifdef CONFIG_PAX_MEMORY_UDEREF
17827+ if (!__access_ok(VERIFY_READ, src, size))
17828+ return size;
17829+#endif
17830+
17831+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
17832 }
17833
17834-static __must_check __always_inline int
17835-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
17836+static __must_check __always_inline unsigned long
17837+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
17838 {
17839- return copy_user_generic((__force void *)dst, src, size);
17840+ if (size > INT_MAX)
17841+ return size;
17842+
17843+#ifdef CONFIG_PAX_MEMORY_UDEREF
17844+ if (!__access_ok(VERIFY_WRITE, dst, size))
17845+ return size;
17846+#endif
17847+
17848+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
17849 }
17850
17851-extern long __copy_user_nocache(void *dst, const void __user *src,
17852- unsigned size, int zerorest);
17853+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
17854+ unsigned long size, int zerorest) __size_overflow(3);
17855
17856-static inline int
17857-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
17858+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
17859 {
17860 might_sleep();
17861+
17862+ if (size > INT_MAX)
17863+ return size;
17864+
17865+#ifdef CONFIG_PAX_MEMORY_UDEREF
17866+ if (!__access_ok(VERIFY_READ, src, size))
17867+ return size;
17868+#endif
17869+
17870 return __copy_user_nocache(dst, src, size, 1);
17871 }
17872
17873-static inline int
17874-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
17875- unsigned size)
17876+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
17877+ unsigned long size)
17878 {
17879+ if (size > INT_MAX)
17880+ return size;
17881+
17882+#ifdef CONFIG_PAX_MEMORY_UDEREF
17883+ if (!__access_ok(VERIFY_READ, src, size))
17884+ return size;
17885+#endif
17886+
17887 return __copy_user_nocache(dst, src, size, 0);
17888 }
17889
17890-unsigned long
17891-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
17892+extern unsigned long
17893+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
17894
17895 #endif /* _ASM_X86_UACCESS_64_H */
17896diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
17897index 5b238981..77fdd78 100644
17898--- a/arch/x86/include/asm/word-at-a-time.h
17899+++ b/arch/x86/include/asm/word-at-a-time.h
17900@@ -11,7 +11,7 @@
17901 * and shift, for example.
17902 */
17903 struct word_at_a_time {
17904- const unsigned long one_bits, high_bits;
17905+ unsigned long one_bits, high_bits;
17906 };
17907
17908 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
17909diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
17910index d8d9922..bf6cecb 100644
17911--- a/arch/x86/include/asm/x86_init.h
17912+++ b/arch/x86/include/asm/x86_init.h
17913@@ -129,7 +129,7 @@ struct x86_init_ops {
17914 struct x86_init_timers timers;
17915 struct x86_init_iommu iommu;
17916 struct x86_init_pci pci;
17917-};
17918+} __no_const;
17919
17920 /**
17921 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
17922@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
17923 void (*setup_percpu_clockev)(void);
17924 void (*early_percpu_clock_init)(void);
17925 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
17926-};
17927+} __no_const;
17928
17929 /**
17930 * struct x86_platform_ops - platform specific runtime functions
17931@@ -166,7 +166,7 @@ struct x86_platform_ops {
17932 void (*save_sched_clock_state)(void);
17933 void (*restore_sched_clock_state)(void);
17934 void (*apic_post_init)(void);
17935-};
17936+} __no_const;
17937
17938 struct pci_dev;
17939 struct msi_msg;
17940@@ -180,7 +180,7 @@ struct x86_msi_ops {
17941 void (*teardown_msi_irqs)(struct pci_dev *dev);
17942 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
17943 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
17944-};
17945+} __no_const;
17946
17947 struct IO_APIC_route_entry;
17948 struct io_apic_irq_attr;
17949@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
17950 unsigned int destination, int vector,
17951 struct io_apic_irq_attr *attr);
17952 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
17953-};
17954+} __no_const;
17955
17956 extern struct x86_init_ops x86_init;
17957 extern struct x86_cpuinit_ops x86_cpuinit;
17958diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
17959index 0415cda..3b22adc 100644
17960--- a/arch/x86/include/asm/xsave.h
17961+++ b/arch/x86/include/asm/xsave.h
17962@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
17963 if (unlikely(err))
17964 return -EFAULT;
17965
17966+ pax_open_userland();
17967 __asm__ __volatile__(ASM_STAC "\n"
17968- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
17969+ "1:"
17970+ __copyuser_seg
17971+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
17972 "2: " ASM_CLAC "\n"
17973 ".section .fixup,\"ax\"\n"
17974 "3: movl $-1,%[err]\n"
17975@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
17976 : [err] "=r" (err)
17977 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
17978 : "memory");
17979+ pax_close_userland();
17980 return err;
17981 }
17982
17983 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
17984 {
17985 int err;
17986- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
17987+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
17988 u32 lmask = mask;
17989 u32 hmask = mask >> 32;
17990
17991+ pax_open_userland();
17992 __asm__ __volatile__(ASM_STAC "\n"
17993- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
17994+ "1:"
17995+ __copyuser_seg
17996+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
17997 "2: " ASM_CLAC "\n"
17998 ".section .fixup,\"ax\"\n"
17999 "3: movl $-1,%[err]\n"
18000@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
18001 : [err] "=r" (err)
18002 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
18003 : "memory"); /* memory required? */
18004+ pax_close_userland();
18005 return err;
18006 }
18007
18008diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
18009index bbae024..e1528f9 100644
18010--- a/arch/x86/include/uapi/asm/e820.h
18011+++ b/arch/x86/include/uapi/asm/e820.h
18012@@ -63,7 +63,7 @@ struct e820map {
18013 #define ISA_START_ADDRESS 0xa0000
18014 #define ISA_END_ADDRESS 0x100000
18015
18016-#define BIOS_BEGIN 0x000a0000
18017+#define BIOS_BEGIN 0x000c0000
18018 #define BIOS_END 0x00100000
18019
18020 #define BIOS_ROM_BASE 0xffe00000
18021diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
18022index 7bd3bd3..5dac791 100644
18023--- a/arch/x86/kernel/Makefile
18024+++ b/arch/x86/kernel/Makefile
18025@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
18026 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
18027 obj-$(CONFIG_IRQ_WORK) += irq_work.o
18028 obj-y += probe_roms.o
18029-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
18030+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
18031 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
18032 obj-y += syscall_$(BITS).o
18033 obj-$(CONFIG_X86_64) += vsyscall_64.o
18034diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
18035index 230c8ea..f915130 100644
18036--- a/arch/x86/kernel/acpi/boot.c
18037+++ b/arch/x86/kernel/acpi/boot.c
18038@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
18039 * If your system is blacklisted here, but you find that acpi=force
18040 * works for you, please contact linux-acpi@vger.kernel.org
18041 */
18042-static struct dmi_system_id __initdata acpi_dmi_table[] = {
18043+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
18044 /*
18045 * Boxes that need ACPI disabled
18046 */
18047@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
18048 };
18049
18050 /* second table for DMI checks that should run after early-quirks */
18051-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
18052+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
18053 /*
18054 * HP laptops which use a DSDT reporting as HP/SB400/10000,
18055 * which includes some code which overrides all temperature
18056diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
18057index ec94e11..7fbbec0 100644
18058--- a/arch/x86/kernel/acpi/sleep.c
18059+++ b/arch/x86/kernel/acpi/sleep.c
18060@@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void)
18061 #else /* CONFIG_64BIT */
18062 #ifdef CONFIG_SMP
18063 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
18064+
18065+ pax_open_kernel();
18066 early_gdt_descr.address =
18067 (unsigned long)get_cpu_gdt_table(smp_processor_id());
18068+ pax_close_kernel();
18069+
18070 initial_gs = per_cpu_offset(smp_processor_id());
18071 #endif
18072 initial_code = (unsigned long)wakeup_long64;
18073diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
18074index d1daa66..59fecba 100644
18075--- a/arch/x86/kernel/acpi/wakeup_32.S
18076+++ b/arch/x86/kernel/acpi/wakeup_32.S
18077@@ -29,13 +29,11 @@ wakeup_pmode_return:
18078 # and restore the stack ... but you need gdt for this to work
18079 movl saved_context_esp, %esp
18080
18081- movl %cs:saved_magic, %eax
18082- cmpl $0x12345678, %eax
18083+ cmpl $0x12345678, saved_magic
18084 jne bogus_magic
18085
18086 # jump to place where we left off
18087- movl saved_eip, %eax
18088- jmp *%eax
18089+ jmp *(saved_eip)
18090
18091 bogus_magic:
18092 jmp bogus_magic
18093diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
18094index c15cf9a..0e63558 100644
18095--- a/arch/x86/kernel/alternative.c
18096+++ b/arch/x86/kernel/alternative.c
18097@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
18098 */
18099 for (a = start; a < end; a++) {
18100 instr = (u8 *)&a->instr_offset + a->instr_offset;
18101+
18102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18103+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18104+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
18105+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18106+#endif
18107+
18108 replacement = (u8 *)&a->repl_offset + a->repl_offset;
18109 BUG_ON(a->replacementlen > a->instrlen);
18110 BUG_ON(a->instrlen > sizeof(insnbuf));
18111@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
18112 for (poff = start; poff < end; poff++) {
18113 u8 *ptr = (u8 *)poff + *poff;
18114
18115+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18116+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18117+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18118+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18119+#endif
18120+
18121 if (!*poff || ptr < text || ptr >= text_end)
18122 continue;
18123 /* turn DS segment override prefix into lock prefix */
18124- if (*ptr == 0x3e)
18125+ if (*ktla_ktva(ptr) == 0x3e)
18126 text_poke(ptr, ((unsigned char []){0xf0}), 1);
18127 }
18128 mutex_unlock(&text_mutex);
18129@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
18130 for (poff = start; poff < end; poff++) {
18131 u8 *ptr = (u8 *)poff + *poff;
18132
18133+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18134+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18135+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
18136+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
18137+#endif
18138+
18139 if (!*poff || ptr < text || ptr >= text_end)
18140 continue;
18141 /* turn lock prefix into DS segment override prefix */
18142- if (*ptr == 0xf0)
18143+ if (*ktla_ktva(ptr) == 0xf0)
18144 text_poke(ptr, ((unsigned char []){0x3E}), 1);
18145 }
18146 mutex_unlock(&text_mutex);
18147@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
18148
18149 BUG_ON(p->len > MAX_PATCH_LEN);
18150 /* prep the buffer with the original instructions */
18151- memcpy(insnbuf, p->instr, p->len);
18152+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
18153 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
18154 (unsigned long)p->instr, p->len);
18155
18156@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
18157 if (!uniproc_patched || num_possible_cpus() == 1)
18158 free_init_pages("SMP alternatives",
18159 (unsigned long)__smp_locks,
18160- (unsigned long)__smp_locks_end);
18161+ PAGE_ALIGN((unsigned long)__smp_locks_end));
18162 #endif
18163
18164 apply_paravirt(__parainstructions, __parainstructions_end);
18165@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
18166 * instructions. And on the local CPU you need to be protected again NMI or MCE
18167 * handlers seeing an inconsistent instruction while you patch.
18168 */
18169-void *__init_or_module text_poke_early(void *addr, const void *opcode,
18170+void *__kprobes text_poke_early(void *addr, const void *opcode,
18171 size_t len)
18172 {
18173 unsigned long flags;
18174 local_irq_save(flags);
18175- memcpy(addr, opcode, len);
18176+
18177+ pax_open_kernel();
18178+ memcpy(ktla_ktva(addr), opcode, len);
18179 sync_core();
18180+ pax_close_kernel();
18181+
18182 local_irq_restore(flags);
18183 /* Could also do a CLFLUSH here to speed up CPU recovery; but
18184 that causes hangs on some VIA CPUs. */
18185@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
18186 */
18187 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
18188 {
18189- unsigned long flags;
18190- char *vaddr;
18191+ unsigned char *vaddr = ktla_ktva(addr);
18192 struct page *pages[2];
18193- int i;
18194+ size_t i;
18195
18196 if (!core_kernel_text((unsigned long)addr)) {
18197- pages[0] = vmalloc_to_page(addr);
18198- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
18199+ pages[0] = vmalloc_to_page(vaddr);
18200+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
18201 } else {
18202- pages[0] = virt_to_page(addr);
18203+ pages[0] = virt_to_page(vaddr);
18204 WARN_ON(!PageReserved(pages[0]));
18205- pages[1] = virt_to_page(addr + PAGE_SIZE);
18206+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
18207 }
18208 BUG_ON(!pages[0]);
18209- local_irq_save(flags);
18210- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
18211- if (pages[1])
18212- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
18213- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
18214- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
18215- clear_fixmap(FIX_TEXT_POKE0);
18216- if (pages[1])
18217- clear_fixmap(FIX_TEXT_POKE1);
18218- local_flush_tlb();
18219- sync_core();
18220- /* Could also do a CLFLUSH here to speed up CPU recovery; but
18221- that causes hangs on some VIA CPUs. */
18222+ text_poke_early(addr, opcode, len);
18223 for (i = 0; i < len; i++)
18224- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
18225- local_irq_restore(flags);
18226+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
18227 return addr;
18228 }
18229
18230diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
18231index 904611b..004dde6 100644
18232--- a/arch/x86/kernel/apic/apic.c
18233+++ b/arch/x86/kernel/apic/apic.c
18234@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
18235 /*
18236 * Debug level, exported for io_apic.c
18237 */
18238-unsigned int apic_verbosity;
18239+int apic_verbosity;
18240
18241 int pic_mode;
18242
18243@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
18244 apic_write(APIC_ESR, 0);
18245 v1 = apic_read(APIC_ESR);
18246 ack_APIC_irq();
18247- atomic_inc(&irq_err_count);
18248+ atomic_inc_unchecked(&irq_err_count);
18249
18250 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
18251 smp_processor_id(), v0 , v1);
18252diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
18253index 00c77cf..2dc6a2d 100644
18254--- a/arch/x86/kernel/apic/apic_flat_64.c
18255+++ b/arch/x86/kernel/apic/apic_flat_64.c
18256@@ -157,7 +157,7 @@ static int flat_probe(void)
18257 return 1;
18258 }
18259
18260-static struct apic apic_flat = {
18261+static struct apic apic_flat __read_only = {
18262 .name = "flat",
18263 .probe = flat_probe,
18264 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
18265@@ -271,7 +271,7 @@ static int physflat_probe(void)
18266 return 0;
18267 }
18268
18269-static struct apic apic_physflat = {
18270+static struct apic apic_physflat __read_only = {
18271
18272 .name = "physical flat",
18273 .probe = physflat_probe,
18274diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
18275index e145f28..2752888 100644
18276--- a/arch/x86/kernel/apic/apic_noop.c
18277+++ b/arch/x86/kernel/apic/apic_noop.c
18278@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
18279 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
18280 }
18281
18282-struct apic apic_noop = {
18283+struct apic apic_noop __read_only = {
18284 .name = "noop",
18285 .probe = noop_probe,
18286 .acpi_madt_oem_check = NULL,
18287diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
18288index d50e364..543bee3 100644
18289--- a/arch/x86/kernel/apic/bigsmp_32.c
18290+++ b/arch/x86/kernel/apic/bigsmp_32.c
18291@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
18292 return dmi_bigsmp;
18293 }
18294
18295-static struct apic apic_bigsmp = {
18296+static struct apic apic_bigsmp __read_only = {
18297
18298 .name = "bigsmp",
18299 .probe = probe_bigsmp,
18300diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
18301index 0874799..a7a7892 100644
18302--- a/arch/x86/kernel/apic/es7000_32.c
18303+++ b/arch/x86/kernel/apic/es7000_32.c
18304@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
18305 return ret && es7000_apic_is_cluster();
18306 }
18307
18308-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
18309-static struct apic __refdata apic_es7000_cluster = {
18310+static struct apic apic_es7000_cluster __read_only = {
18311
18312 .name = "es7000",
18313 .probe = probe_es7000,
18314@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
18315 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
18316 };
18317
18318-static struct apic __refdata apic_es7000 = {
18319+static struct apic apic_es7000 __read_only = {
18320
18321 .name = "es7000",
18322 .probe = probe_es7000,
18323diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
18324index 9ed796c..e930fe4 100644
18325--- a/arch/x86/kernel/apic/io_apic.c
18326+++ b/arch/x86/kernel/apic/io_apic.c
18327@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
18328 }
18329 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
18330
18331-void lock_vector_lock(void)
18332+void lock_vector_lock(void) __acquires(vector_lock)
18333 {
18334 /* Used to the online set of cpus does not change
18335 * during assign_irq_vector.
18336@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
18337 raw_spin_lock(&vector_lock);
18338 }
18339
18340-void unlock_vector_lock(void)
18341+void unlock_vector_lock(void) __releases(vector_lock)
18342 {
18343 raw_spin_unlock(&vector_lock);
18344 }
18345@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
18346 ack_APIC_irq();
18347 }
18348
18349-atomic_t irq_mis_count;
18350+atomic_unchecked_t irq_mis_count;
18351
18352 #ifdef CONFIG_GENERIC_PENDING_IRQ
18353 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
18354@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
18355 * at the cpu.
18356 */
18357 if (!(v & (1 << (i & 0x1f)))) {
18358- atomic_inc(&irq_mis_count);
18359+ atomic_inc_unchecked(&irq_mis_count);
18360
18361 eoi_ioapic_irq(irq, cfg);
18362 }
18363diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
18364index d661ee9..791fd33 100644
18365--- a/arch/x86/kernel/apic/numaq_32.c
18366+++ b/arch/x86/kernel/apic/numaq_32.c
18367@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
18368 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
18369 }
18370
18371-/* Use __refdata to keep false positive warning calm. */
18372-static struct apic __refdata apic_numaq = {
18373+static struct apic apic_numaq __read_only = {
18374
18375 .name = "NUMAQ",
18376 .probe = probe_numaq,
18377diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
18378index eb35ef9..f184a21 100644
18379--- a/arch/x86/kernel/apic/probe_32.c
18380+++ b/arch/x86/kernel/apic/probe_32.c
18381@@ -72,7 +72,7 @@ static int probe_default(void)
18382 return 1;
18383 }
18384
18385-static struct apic apic_default = {
18386+static struct apic apic_default __read_only = {
18387
18388 .name = "default",
18389 .probe = probe_default,
18390diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
18391index 77c95c0..434f8a4 100644
18392--- a/arch/x86/kernel/apic/summit_32.c
18393+++ b/arch/x86/kernel/apic/summit_32.c
18394@@ -486,7 +486,7 @@ void setup_summit(void)
18395 }
18396 #endif
18397
18398-static struct apic apic_summit = {
18399+static struct apic apic_summit __read_only = {
18400
18401 .name = "summit",
18402 .probe = probe_summit,
18403diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
18404index c88baa4..757aee1 100644
18405--- a/arch/x86/kernel/apic/x2apic_cluster.c
18406+++ b/arch/x86/kernel/apic/x2apic_cluster.c
18407@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
18408 return notifier_from_errno(err);
18409 }
18410
18411-static struct notifier_block __refdata x2apic_cpu_notifier = {
18412+static struct notifier_block x2apic_cpu_notifier = {
18413 .notifier_call = update_clusterinfo,
18414 };
18415
18416@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
18417 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
18418 }
18419
18420-static struct apic apic_x2apic_cluster = {
18421+static struct apic apic_x2apic_cluster __read_only = {
18422
18423 .name = "cluster x2apic",
18424 .probe = x2apic_cluster_probe,
18425diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
18426index 562a76d..a003c0f 100644
18427--- a/arch/x86/kernel/apic/x2apic_phys.c
18428+++ b/arch/x86/kernel/apic/x2apic_phys.c
18429@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
18430 return apic == &apic_x2apic_phys;
18431 }
18432
18433-static struct apic apic_x2apic_phys = {
18434+static struct apic apic_x2apic_phys __read_only = {
18435
18436 .name = "physical x2apic",
18437 .probe = x2apic_phys_probe,
18438diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
18439index 794f6eb..67e1db2 100644
18440--- a/arch/x86/kernel/apic/x2apic_uv_x.c
18441+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
18442@@ -342,7 +342,7 @@ static int uv_probe(void)
18443 return apic == &apic_x2apic_uv_x;
18444 }
18445
18446-static struct apic __refdata apic_x2apic_uv_x = {
18447+static struct apic apic_x2apic_uv_x __read_only = {
18448
18449 .name = "UV large system",
18450 .probe = uv_probe,
18451diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
18452index 53a4e27..038760a 100644
18453--- a/arch/x86/kernel/apm_32.c
18454+++ b/arch/x86/kernel/apm_32.c
18455@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
18456 * This is for buggy BIOS's that refer to (real mode) segment 0x40
18457 * even though they are called in protected mode.
18458 */
18459-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
18460+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
18461 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
18462
18463 static const char driver_version[] = "1.16ac"; /* no spaces */
18464@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
18465 BUG_ON(cpu != 0);
18466 gdt = get_cpu_gdt_table(cpu);
18467 save_desc_40 = gdt[0x40 / 8];
18468+
18469+ pax_open_kernel();
18470 gdt[0x40 / 8] = bad_bios_desc;
18471+ pax_close_kernel();
18472
18473 apm_irq_save(flags);
18474 APM_DO_SAVE_SEGS;
18475@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
18476 &call->esi);
18477 APM_DO_RESTORE_SEGS;
18478 apm_irq_restore(flags);
18479+
18480+ pax_open_kernel();
18481 gdt[0x40 / 8] = save_desc_40;
18482+ pax_close_kernel();
18483+
18484 put_cpu();
18485
18486 return call->eax & 0xff;
18487@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
18488 BUG_ON(cpu != 0);
18489 gdt = get_cpu_gdt_table(cpu);
18490 save_desc_40 = gdt[0x40 / 8];
18491+
18492+ pax_open_kernel();
18493 gdt[0x40 / 8] = bad_bios_desc;
18494+ pax_close_kernel();
18495
18496 apm_irq_save(flags);
18497 APM_DO_SAVE_SEGS;
18498@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
18499 &call->eax);
18500 APM_DO_RESTORE_SEGS;
18501 apm_irq_restore(flags);
18502+
18503+ pax_open_kernel();
18504 gdt[0x40 / 8] = save_desc_40;
18505+ pax_close_kernel();
18506+
18507 put_cpu();
18508 return error;
18509 }
18510@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
18511 * code to that CPU.
18512 */
18513 gdt = get_cpu_gdt_table(0);
18514+
18515+ pax_open_kernel();
18516 set_desc_base(&gdt[APM_CS >> 3],
18517 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
18518 set_desc_base(&gdt[APM_CS_16 >> 3],
18519 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
18520 set_desc_base(&gdt[APM_DS >> 3],
18521 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
18522+ pax_close_kernel();
18523
18524 proc_create("apm", 0, NULL, &apm_file_ops);
18525
18526diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
18527index 2861082..6d4718e 100644
18528--- a/arch/x86/kernel/asm-offsets.c
18529+++ b/arch/x86/kernel/asm-offsets.c
18530@@ -33,6 +33,8 @@ void common(void) {
18531 OFFSET(TI_status, thread_info, status);
18532 OFFSET(TI_addr_limit, thread_info, addr_limit);
18533 OFFSET(TI_preempt_count, thread_info, preempt_count);
18534+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
18535+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
18536
18537 BLANK();
18538 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
18539@@ -53,8 +55,26 @@ void common(void) {
18540 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
18541 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
18542 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
18543+
18544+#ifdef CONFIG_PAX_KERNEXEC
18545+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
18546 #endif
18547
18548+#ifdef CONFIG_PAX_MEMORY_UDEREF
18549+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
18550+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
18551+#ifdef CONFIG_X86_64
18552+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
18553+#endif
18554+#endif
18555+
18556+#endif
18557+
18558+ BLANK();
18559+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
18560+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
18561+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
18562+
18563 #ifdef CONFIG_XEN
18564 BLANK();
18565 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
18566diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
18567index e7c798b..2b2019b 100644
18568--- a/arch/x86/kernel/asm-offsets_64.c
18569+++ b/arch/x86/kernel/asm-offsets_64.c
18570@@ -77,6 +77,7 @@ int main(void)
18571 BLANK();
18572 #undef ENTRY
18573
18574+ DEFINE(TSS_size, sizeof(struct tss_struct));
18575 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
18576 BLANK();
18577
18578diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
18579index b0684e4..22ccfd7 100644
18580--- a/arch/x86/kernel/cpu/Makefile
18581+++ b/arch/x86/kernel/cpu/Makefile
18582@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
18583 CFLAGS_REMOVE_perf_event.o = -pg
18584 endif
18585
18586-# Make sure load_percpu_segment has no stackprotector
18587-nostackp := $(call cc-option, -fno-stack-protector)
18588-CFLAGS_common.o := $(nostackp)
18589-
18590 obj-y := intel_cacheinfo.o scattered.o topology.o
18591 obj-y += proc.o capflags.o powerflags.o common.o
18592 obj-y += rdrand.o
18593diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
18594index 5013a48..0782c53 100644
18595--- a/arch/x86/kernel/cpu/amd.c
18596+++ b/arch/x86/kernel/cpu/amd.c
18597@@ -744,7 +744,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
18598 unsigned int size)
18599 {
18600 /* AMD errata T13 (order #21922) */
18601- if ((c->x86 == 6)) {
18602+ if (c->x86 == 6) {
18603 /* Duron Rev A0 */
18604 if (c->x86_model == 3 && c->x86_mask == 0)
18605 size = 64;
18606diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
18607index 22018f7..a5883af 100644
18608--- a/arch/x86/kernel/cpu/common.c
18609+++ b/arch/x86/kernel/cpu/common.c
18610@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
18611
18612 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
18613
18614-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
18615-#ifdef CONFIG_X86_64
18616- /*
18617- * We need valid kernel segments for data and code in long mode too
18618- * IRET will check the segment types kkeil 2000/10/28
18619- * Also sysret mandates a special GDT layout
18620- *
18621- * TLS descriptors are currently at a different place compared to i386.
18622- * Hopefully nobody expects them at a fixed place (Wine?)
18623- */
18624- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
18625- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
18626- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
18627- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
18628- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
18629- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
18630-#else
18631- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
18632- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18633- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
18634- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
18635- /*
18636- * Segments used for calling PnP BIOS have byte granularity.
18637- * They code segments and data segments have fixed 64k limits,
18638- * the transfer segment sizes are set at run time.
18639- */
18640- /* 32-bit code */
18641- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
18642- /* 16-bit code */
18643- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
18644- /* 16-bit data */
18645- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
18646- /* 16-bit data */
18647- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
18648- /* 16-bit data */
18649- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
18650- /*
18651- * The APM segments have byte granularity and their bases
18652- * are set at run time. All have 64k limits.
18653- */
18654- /* 32-bit code */
18655- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
18656- /* 16-bit code */
18657- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
18658- /* data */
18659- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
18660-
18661- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18662- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
18663- GDT_STACK_CANARY_INIT
18664-#endif
18665-} };
18666-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
18667-
18668 static int __init x86_xsave_setup(char *s)
18669 {
18670 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
18671@@ -288,6 +234,40 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
18672 set_in_cr4(X86_CR4_SMAP);
18673 }
18674
18675+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18676+static __init int setup_disable_pcid(char *arg)
18677+{
18678+ setup_clear_cpu_cap(X86_FEATURE_PCID);
18679+ if (clone_pgd_mask != ~(pgdval_t)0UL)
18680+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
18681+ return 1;
18682+}
18683+__setup("nopcid", setup_disable_pcid);
18684+
18685+static void setup_pcid(struct cpuinfo_x86 *c)
18686+{
18687+ if (cpu_has(c, X86_FEATURE_PCID))
18688+ printk("PAX: PCID detected\n");
18689+
18690+ if (cpu_has(c, X86_FEATURE_INVPCID))
18691+ printk("PAX: INVPCID detected\n");
18692+
18693+ if (cpu_has(c, X86_FEATURE_PCID)) {
18694+ set_in_cr4(X86_CR4_PCIDE);
18695+ clone_pgd_mask = ~(pgdval_t)0UL;
18696+ if (pax_user_shadow_base)
18697+ printk("PAX: weak UDEREF enabled\n");
18698+ else {
18699+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
18700+ printk("PAX: strong UDEREF enabled\n");
18701+ }
18702+ } else if (pax_user_shadow_base)
18703+ printk("PAX: slow and weak UDEREF enabled\n");
18704+ else
18705+ printk("PAX: UDEREF disabled\n");
18706+}
18707+#endif
18708+
18709 /*
18710 * Some CPU features depend on higher CPUID levels, which may not always
18711 * be available due to CPUID level capping or broken virtualization
18712@@ -386,7 +366,7 @@ void switch_to_new_gdt(int cpu)
18713 {
18714 struct desc_ptr gdt_descr;
18715
18716- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
18717+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18718 gdt_descr.size = GDT_SIZE - 1;
18719 load_gdt(&gdt_descr);
18720 /* Reload the per-cpu base */
18721@@ -874,6 +854,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
18722 setup_smep(c);
18723 setup_smap(c);
18724
18725+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18726+ setup_pcid(c);
18727+#endif
18728+
18729 /*
18730 * The vendor-specific functions might have changed features.
18731 * Now we do "generic changes."
18732@@ -882,6 +866,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
18733 /* Filter out anything that depends on CPUID levels we don't have */
18734 filter_cpuid_features(c, true);
18735
18736+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18737+ setup_clear_cpu_cap(X86_FEATURE_SEP);
18738+#endif
18739+
18740 /* If the model name is still unset, do table lookup. */
18741 if (!c->x86_model_id[0]) {
18742 const char *p;
18743@@ -1069,10 +1057,12 @@ static __init int setup_disablecpuid(char *arg)
18744 }
18745 __setup("clearcpuid=", setup_disablecpuid);
18746
18747+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
18748+EXPORT_PER_CPU_SYMBOL(current_tinfo);
18749+
18750 #ifdef CONFIG_X86_64
18751 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
18752-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
18753- (unsigned long) nmi_idt_table };
18754+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
18755
18756 DEFINE_PER_CPU_FIRST(union irq_stack_union,
18757 irq_stack_union) __aligned(PAGE_SIZE);
18758@@ -1086,7 +1076,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
18759 EXPORT_PER_CPU_SYMBOL(current_task);
18760
18761 DEFINE_PER_CPU(unsigned long, kernel_stack) =
18762- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
18763+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
18764 EXPORT_PER_CPU_SYMBOL(kernel_stack);
18765
18766 DEFINE_PER_CPU(char *, irq_stack_ptr) =
18767@@ -1231,7 +1221,7 @@ void __cpuinit cpu_init(void)
18768 load_ucode_ap();
18769
18770 cpu = stack_smp_processor_id();
18771- t = &per_cpu(init_tss, cpu);
18772+ t = init_tss + cpu;
18773 oist = &per_cpu(orig_ist, cpu);
18774
18775 #ifdef CONFIG_NUMA
18776@@ -1257,7 +1247,7 @@ void __cpuinit cpu_init(void)
18777 switch_to_new_gdt(cpu);
18778 loadsegment(fs, 0);
18779
18780- load_idt((const struct desc_ptr *)&idt_descr);
18781+ load_idt(&idt_descr);
18782
18783 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
18784 syscall_init();
18785@@ -1266,7 +1256,6 @@ void __cpuinit cpu_init(void)
18786 wrmsrl(MSR_KERNEL_GS_BASE, 0);
18787 barrier();
18788
18789- x86_configure_nx();
18790 enable_x2apic();
18791
18792 /*
18793@@ -1318,7 +1307,7 @@ void __cpuinit cpu_init(void)
18794 {
18795 int cpu = smp_processor_id();
18796 struct task_struct *curr = current;
18797- struct tss_struct *t = &per_cpu(init_tss, cpu);
18798+ struct tss_struct *t = init_tss + cpu;
18799 struct thread_struct *thread = &curr->thread;
18800
18801 show_ucode_info_early();
18802diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
18803index 7c6f7d5..8cac382 100644
18804--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
18805+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
18806@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
18807 };
18808
18809 #ifdef CONFIG_AMD_NB
18810+static struct attribute *default_attrs_amd_nb[] = {
18811+ &type.attr,
18812+ &level.attr,
18813+ &coherency_line_size.attr,
18814+ &physical_line_partition.attr,
18815+ &ways_of_associativity.attr,
18816+ &number_of_sets.attr,
18817+ &size.attr,
18818+ &shared_cpu_map.attr,
18819+ &shared_cpu_list.attr,
18820+ NULL,
18821+ NULL,
18822+ NULL,
18823+ NULL
18824+};
18825+
18826 static struct attribute ** __cpuinit amd_l3_attrs(void)
18827 {
18828 static struct attribute **attrs;
18829@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
18830
18831 n = ARRAY_SIZE(default_attrs);
18832
18833- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
18834- n += 2;
18835-
18836- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
18837- n += 1;
18838-
18839- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
18840- if (attrs == NULL)
18841- return attrs = default_attrs;
18842-
18843- for (n = 0; default_attrs[n]; n++)
18844- attrs[n] = default_attrs[n];
18845+ attrs = default_attrs_amd_nb;
18846
18847 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
18848 attrs[n++] = &cache_disable_0.attr;
18849@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
18850 .default_attrs = default_attrs,
18851 };
18852
18853+#ifdef CONFIG_AMD_NB
18854+static struct kobj_type ktype_cache_amd_nb = {
18855+ .sysfs_ops = &sysfs_ops,
18856+ .default_attrs = default_attrs_amd_nb,
18857+};
18858+#endif
18859+
18860 static struct kobj_type ktype_percpu_entry = {
18861 .sysfs_ops = &sysfs_ops,
18862 };
18863@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
18864 return retval;
18865 }
18866
18867+#ifdef CONFIG_AMD_NB
18868+ amd_l3_attrs();
18869+#endif
18870+
18871 for (i = 0; i < num_cache_leaves; i++) {
18872+ struct kobj_type *ktype;
18873+
18874 this_object = INDEX_KOBJECT_PTR(cpu, i);
18875 this_object->cpu = cpu;
18876 this_object->index = i;
18877
18878 this_leaf = CPUID4_INFO_IDX(cpu, i);
18879
18880- ktype_cache.default_attrs = default_attrs;
18881+ ktype = &ktype_cache;
18882 #ifdef CONFIG_AMD_NB
18883 if (this_leaf->base.nb)
18884- ktype_cache.default_attrs = amd_l3_attrs();
18885+ ktype = &ktype_cache_amd_nb;
18886 #endif
18887 retval = kobject_init_and_add(&(this_object->kobj),
18888- &ktype_cache,
18889+ ktype,
18890 per_cpu(ici_cache_kobject, cpu),
18891 "index%1lu", i);
18892 if (unlikely(retval)) {
18893@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
18894 return NOTIFY_OK;
18895 }
18896
18897-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
18898+static struct notifier_block cacheinfo_cpu_notifier = {
18899 .notifier_call = cacheinfo_cpu_callback,
18900 };
18901
18902diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
18903index 9239504..b2471ce 100644
18904--- a/arch/x86/kernel/cpu/mcheck/mce.c
18905+++ b/arch/x86/kernel/cpu/mcheck/mce.c
18906@@ -45,6 +45,7 @@
18907 #include <asm/processor.h>
18908 #include <asm/mce.h>
18909 #include <asm/msr.h>
18910+#include <asm/local.h>
18911
18912 #include "mce-internal.h"
18913
18914@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
18915 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
18916 m->cs, m->ip);
18917
18918- if (m->cs == __KERNEL_CS)
18919+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
18920 print_symbol("{%s}", m->ip);
18921 pr_cont("\n");
18922 }
18923@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
18924
18925 #define PANIC_TIMEOUT 5 /* 5 seconds */
18926
18927-static atomic_t mce_paniced;
18928+static atomic_unchecked_t mce_paniced;
18929
18930 static int fake_panic;
18931-static atomic_t mce_fake_paniced;
18932+static atomic_unchecked_t mce_fake_paniced;
18933
18934 /* Panic in progress. Enable interrupts and wait for final IPI */
18935 static void wait_for_panic(void)
18936@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
18937 /*
18938 * Make sure only one CPU runs in machine check panic
18939 */
18940- if (atomic_inc_return(&mce_paniced) > 1)
18941+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
18942 wait_for_panic();
18943 barrier();
18944
18945@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
18946 console_verbose();
18947 } else {
18948 /* Don't log too much for fake panic */
18949- if (atomic_inc_return(&mce_fake_paniced) > 1)
18950+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
18951 return;
18952 }
18953 /* First print corrected ones that are still unlogged */
18954@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
18955 if (!fake_panic) {
18956 if (panic_timeout == 0)
18957 panic_timeout = mca_cfg.panic_timeout;
18958- panic(msg);
18959+ panic("%s", msg);
18960 } else
18961 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
18962 }
18963@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
18964 * might have been modified by someone else.
18965 */
18966 rmb();
18967- if (atomic_read(&mce_paniced))
18968+ if (atomic_read_unchecked(&mce_paniced))
18969 wait_for_panic();
18970 if (!mca_cfg.monarch_timeout)
18971 goto out;
18972@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
18973 }
18974
18975 /* Call the installed machine check handler for this CPU setup. */
18976-void (*machine_check_vector)(struct pt_regs *, long error_code) =
18977+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
18978 unexpected_machine_check;
18979
18980 /*
18981@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
18982 return;
18983 }
18984
18985+ pax_open_kernel();
18986 machine_check_vector = do_machine_check;
18987+ pax_close_kernel();
18988
18989 __mcheck_cpu_init_generic();
18990 __mcheck_cpu_init_vendor(c);
18991@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
18992 */
18993
18994 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
18995-static int mce_chrdev_open_count; /* #times opened */
18996+static local_t mce_chrdev_open_count; /* #times opened */
18997 static int mce_chrdev_open_exclu; /* already open exclusive? */
18998
18999 static int mce_chrdev_open(struct inode *inode, struct file *file)
19000@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19001 spin_lock(&mce_chrdev_state_lock);
19002
19003 if (mce_chrdev_open_exclu ||
19004- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
19005+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
19006 spin_unlock(&mce_chrdev_state_lock);
19007
19008 return -EBUSY;
19009@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
19010
19011 if (file->f_flags & O_EXCL)
19012 mce_chrdev_open_exclu = 1;
19013- mce_chrdev_open_count++;
19014+ local_inc(&mce_chrdev_open_count);
19015
19016 spin_unlock(&mce_chrdev_state_lock);
19017
19018@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
19019 {
19020 spin_lock(&mce_chrdev_state_lock);
19021
19022- mce_chrdev_open_count--;
19023+ local_dec(&mce_chrdev_open_count);
19024 mce_chrdev_open_exclu = 0;
19025
19026 spin_unlock(&mce_chrdev_state_lock);
19027@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
19028 return NOTIFY_OK;
19029 }
19030
19031-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
19032+static struct notifier_block mce_cpu_notifier = {
19033 .notifier_call = mce_cpu_callback,
19034 };
19035
19036@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
19037
19038 for (i = 0; i < mca_cfg.banks; i++) {
19039 struct mce_bank *b = &mce_banks[i];
19040- struct device_attribute *a = &b->attr;
19041+ device_attribute_no_const *a = &b->attr;
19042
19043 sysfs_attr_init(&a->attr);
19044 a->attr.name = b->attrname;
19045@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
19046 static void mce_reset(void)
19047 {
19048 cpu_missing = 0;
19049- atomic_set(&mce_fake_paniced, 0);
19050+ atomic_set_unchecked(&mce_fake_paniced, 0);
19051 atomic_set(&mce_executing, 0);
19052 atomic_set(&mce_callin, 0);
19053 atomic_set(&global_nwo, 0);
19054diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
19055index 1c044b1..37a2a43 100644
19056--- a/arch/x86/kernel/cpu/mcheck/p5.c
19057+++ b/arch/x86/kernel/cpu/mcheck/p5.c
19058@@ -11,6 +11,7 @@
19059 #include <asm/processor.h>
19060 #include <asm/mce.h>
19061 #include <asm/msr.h>
19062+#include <asm/pgtable.h>
19063
19064 /* By default disabled */
19065 int mce_p5_enabled __read_mostly;
19066@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
19067 if (!cpu_has(c, X86_FEATURE_MCE))
19068 return;
19069
19070+ pax_open_kernel();
19071 machine_check_vector = pentium_machine_check;
19072+ pax_close_kernel();
19073 /* Make sure the vector pointer is visible before we enable MCEs: */
19074 wmb();
19075
19076diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19077index 47a1870..8c019a7 100644
19078--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
19079+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
19080@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
19081 return notifier_from_errno(err);
19082 }
19083
19084-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
19085+static struct notifier_block thermal_throttle_cpu_notifier =
19086 {
19087 .notifier_call = thermal_throttle_cpu_callback,
19088 };
19089diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
19090index e9a701a..35317d6 100644
19091--- a/arch/x86/kernel/cpu/mcheck/winchip.c
19092+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
19093@@ -10,6 +10,7 @@
19094 #include <asm/processor.h>
19095 #include <asm/mce.h>
19096 #include <asm/msr.h>
19097+#include <asm/pgtable.h>
19098
19099 /* Machine check handler for WinChip C6: */
19100 static void winchip_machine_check(struct pt_regs *regs, long error_code)
19101@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
19102 {
19103 u32 lo, hi;
19104
19105+ pax_open_kernel();
19106 machine_check_vector = winchip_machine_check;
19107+ pax_close_kernel();
19108 /* Make sure the vector pointer is visible before we enable MCEs: */
19109 wmb();
19110
19111diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
19112index ca22b73..9987afe 100644
19113--- a/arch/x86/kernel/cpu/mtrr/main.c
19114+++ b/arch/x86/kernel/cpu/mtrr/main.c
19115@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
19116 u64 size_or_mask, size_and_mask;
19117 static bool mtrr_aps_delayed_init;
19118
19119-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
19120+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
19121
19122 const struct mtrr_ops *mtrr_if;
19123
19124diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
19125index df5e41f..816c719 100644
19126--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
19127+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
19128@@ -25,7 +25,7 @@ struct mtrr_ops {
19129 int (*validate_add_page)(unsigned long base, unsigned long size,
19130 unsigned int type);
19131 int (*have_wrcomb)(void);
19132-};
19133+} __do_const;
19134
19135 extern int generic_get_free_region(unsigned long base, unsigned long size,
19136 int replace_reg);
19137diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
19138index 1025f3c..824f677 100644
19139--- a/arch/x86/kernel/cpu/perf_event.c
19140+++ b/arch/x86/kernel/cpu/perf_event.c
19141@@ -1311,7 +1311,7 @@ static void __init pmu_check_apic(void)
19142 pr_info("no hardware sampling interrupt available.\n");
19143 }
19144
19145-static struct attribute_group x86_pmu_format_group = {
19146+static attribute_group_no_const x86_pmu_format_group = {
19147 .name = "format",
19148 .attrs = NULL,
19149 };
19150@@ -1410,7 +1410,7 @@ static struct attribute *events_attr[] = {
19151 NULL,
19152 };
19153
19154-static struct attribute_group x86_pmu_events_group = {
19155+static attribute_group_no_const x86_pmu_events_group = {
19156 .name = "events",
19157 .attrs = events_attr,
19158 };
19159@@ -1920,7 +1920,7 @@ static unsigned long get_segment_base(unsigned int segment)
19160 if (idx > GDT_ENTRIES)
19161 return 0;
19162
19163- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
19164+ desc = get_cpu_gdt_table(smp_processor_id());
19165 }
19166
19167 return get_desc_base(desc + idx);
19168@@ -2010,7 +2010,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
19169 break;
19170
19171 perf_callchain_store(entry, frame.return_address);
19172- fp = frame.next_frame;
19173+ fp = (const void __force_user *)frame.next_frame;
19174 }
19175 }
19176
19177diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
19178index a9e2207..d70c83a 100644
19179--- a/arch/x86/kernel/cpu/perf_event_intel.c
19180+++ b/arch/x86/kernel/cpu/perf_event_intel.c
19181@@ -2022,10 +2022,10 @@ __init int intel_pmu_init(void)
19182 * v2 and above have a perf capabilities MSR
19183 */
19184 if (version > 1) {
19185- u64 capabilities;
19186+ u64 capabilities = x86_pmu.intel_cap.capabilities;
19187
19188- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
19189- x86_pmu.intel_cap.capabilities = capabilities;
19190+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
19191+ x86_pmu.intel_cap.capabilities = capabilities;
19192 }
19193
19194 intel_ds_init();
19195diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19196index 52441a2..f94fae8 100644
19197--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19198+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
19199@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
19200 static int __init uncore_type_init(struct intel_uncore_type *type)
19201 {
19202 struct intel_uncore_pmu *pmus;
19203- struct attribute_group *attr_group;
19204+ attribute_group_no_const *attr_group;
19205 struct attribute **attrs;
19206 int i, j;
19207
19208@@ -3518,7 +3518,7 @@ static int
19209 return NOTIFY_OK;
19210 }
19211
19212-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
19213+static struct notifier_block uncore_cpu_nb = {
19214 .notifier_call = uncore_cpu_notifier,
19215 /*
19216 * to migrate uncore events, our notifier should be executed
19217diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19218index f952891..4722ad4 100644
19219--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19220+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
19221@@ -488,7 +488,7 @@ struct intel_uncore_box {
19222 struct uncore_event_desc {
19223 struct kobj_attribute attr;
19224 const char *config;
19225-};
19226+} __do_const;
19227
19228 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
19229 { \
19230diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
19231index 1e4dbcf..b9a34c2 100644
19232--- a/arch/x86/kernel/cpuid.c
19233+++ b/arch/x86/kernel/cpuid.c
19234@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
19235 return notifier_from_errno(err);
19236 }
19237
19238-static struct notifier_block __refdata cpuid_class_cpu_notifier =
19239+static struct notifier_block cpuid_class_cpu_notifier =
19240 {
19241 .notifier_call = cpuid_class_cpu_callback,
19242 };
19243diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
19244index 74467fe..18793d5 100644
19245--- a/arch/x86/kernel/crash.c
19246+++ b/arch/x86/kernel/crash.c
19247@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
19248 {
19249 #ifdef CONFIG_X86_32
19250 struct pt_regs fixed_regs;
19251-#endif
19252
19253-#ifdef CONFIG_X86_32
19254- if (!user_mode_vm(regs)) {
19255+ if (!user_mode(regs)) {
19256 crash_fixup_ss_esp(&fixed_regs, regs);
19257 regs = &fixed_regs;
19258 }
19259diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
19260index afa64ad..dce67dd 100644
19261--- a/arch/x86/kernel/crash_dump_64.c
19262+++ b/arch/x86/kernel/crash_dump_64.c
19263@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
19264 return -ENOMEM;
19265
19266 if (userbuf) {
19267- if (copy_to_user(buf, vaddr + offset, csize)) {
19268+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
19269 iounmap(vaddr);
19270 return -EFAULT;
19271 }
19272diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
19273index 155a13f..1672b9b 100644
19274--- a/arch/x86/kernel/doublefault_32.c
19275+++ b/arch/x86/kernel/doublefault_32.c
19276@@ -11,7 +11,7 @@
19277
19278 #define DOUBLEFAULT_STACKSIZE (1024)
19279 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
19280-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
19281+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
19282
19283 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
19284
19285@@ -21,7 +21,7 @@ static void doublefault_fn(void)
19286 unsigned long gdt, tss;
19287
19288 native_store_gdt(&gdt_desc);
19289- gdt = gdt_desc.address;
19290+ gdt = (unsigned long)gdt_desc.address;
19291
19292 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
19293
19294@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
19295 /* 0x2 bit is always set */
19296 .flags = X86_EFLAGS_SF | 0x2,
19297 .sp = STACK_START,
19298- .es = __USER_DS,
19299+ .es = __KERNEL_DS,
19300 .cs = __KERNEL_CS,
19301 .ss = __KERNEL_DS,
19302- .ds = __USER_DS,
19303+ .ds = __KERNEL_DS,
19304 .fs = __KERNEL_PERCPU,
19305
19306 .__cr3 = __pa_nodebug(swapper_pg_dir),
19307diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
19308index deb6421..76bbc12 100644
19309--- a/arch/x86/kernel/dumpstack.c
19310+++ b/arch/x86/kernel/dumpstack.c
19311@@ -2,6 +2,9 @@
19312 * Copyright (C) 1991, 1992 Linus Torvalds
19313 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
19314 */
19315+#ifdef CONFIG_GRKERNSEC_HIDESYM
19316+#define __INCLUDED_BY_HIDESYM 1
19317+#endif
19318 #include <linux/kallsyms.h>
19319 #include <linux/kprobes.h>
19320 #include <linux/uaccess.h>
19321@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
19322 static void
19323 print_ftrace_graph_addr(unsigned long addr, void *data,
19324 const struct stacktrace_ops *ops,
19325- struct thread_info *tinfo, int *graph)
19326+ struct task_struct *task, int *graph)
19327 {
19328- struct task_struct *task;
19329 unsigned long ret_addr;
19330 int index;
19331
19332 if (addr != (unsigned long)return_to_handler)
19333 return;
19334
19335- task = tinfo->task;
19336 index = task->curr_ret_stack;
19337
19338 if (!task->ret_stack || index < *graph)
19339@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
19340 static inline void
19341 print_ftrace_graph_addr(unsigned long addr, void *data,
19342 const struct stacktrace_ops *ops,
19343- struct thread_info *tinfo, int *graph)
19344+ struct task_struct *task, int *graph)
19345 { }
19346 #endif
19347
19348@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
19349 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
19350 */
19351
19352-static inline int valid_stack_ptr(struct thread_info *tinfo,
19353- void *p, unsigned int size, void *end)
19354+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
19355 {
19356- void *t = tinfo;
19357 if (end) {
19358 if (p < end && p >= (end-THREAD_SIZE))
19359 return 1;
19360@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
19361 }
19362
19363 unsigned long
19364-print_context_stack(struct thread_info *tinfo,
19365+print_context_stack(struct task_struct *task, void *stack_start,
19366 unsigned long *stack, unsigned long bp,
19367 const struct stacktrace_ops *ops, void *data,
19368 unsigned long *end, int *graph)
19369 {
19370 struct stack_frame *frame = (struct stack_frame *)bp;
19371
19372- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
19373+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
19374 unsigned long addr;
19375
19376 addr = *stack;
19377@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
19378 } else {
19379 ops->address(data, addr, 0);
19380 }
19381- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
19382+ print_ftrace_graph_addr(addr, data, ops, task, graph);
19383 }
19384 stack++;
19385 }
19386@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
19387 EXPORT_SYMBOL_GPL(print_context_stack);
19388
19389 unsigned long
19390-print_context_stack_bp(struct thread_info *tinfo,
19391+print_context_stack_bp(struct task_struct *task, void *stack_start,
19392 unsigned long *stack, unsigned long bp,
19393 const struct stacktrace_ops *ops, void *data,
19394 unsigned long *end, int *graph)
19395@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
19396 struct stack_frame *frame = (struct stack_frame *)bp;
19397 unsigned long *ret_addr = &frame->return_address;
19398
19399- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
19400+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
19401 unsigned long addr = *ret_addr;
19402
19403 if (!__kernel_text_address(addr))
19404@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
19405 ops->address(data, addr, 1);
19406 frame = frame->next_frame;
19407 ret_addr = &frame->return_address;
19408- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
19409+ print_ftrace_graph_addr(addr, data, ops, task, graph);
19410 }
19411
19412 return (unsigned long)frame;
19413@@ -150,7 +149,7 @@ static int print_trace_stack(void *data, char *name)
19414 static void print_trace_address(void *data, unsigned long addr, int reliable)
19415 {
19416 touch_nmi_watchdog();
19417- printk(data);
19418+ printk("%s", (char *)data);
19419 printk_address(addr, reliable);
19420 }
19421
19422@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
19423 }
19424 EXPORT_SYMBOL_GPL(oops_begin);
19425
19426+extern void gr_handle_kernel_exploit(void);
19427+
19428 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
19429 {
19430 if (regs && kexec_should_crash(current))
19431@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
19432 panic("Fatal exception in interrupt");
19433 if (panic_on_oops)
19434 panic("Fatal exception");
19435- do_exit(signr);
19436+
19437+ gr_handle_kernel_exploit();
19438+
19439+ do_group_exit(signr);
19440 }
19441
19442 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
19443@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
19444 print_modules();
19445 show_regs(regs);
19446 #ifdef CONFIG_X86_32
19447- if (user_mode_vm(regs)) {
19448+ if (user_mode(regs)) {
19449 sp = regs->sp;
19450 ss = regs->ss & 0xffff;
19451 } else {
19452@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
19453 unsigned long flags = oops_begin();
19454 int sig = SIGSEGV;
19455
19456- if (!user_mode_vm(regs))
19457+ if (!user_mode(regs))
19458 report_bug(regs->ip, regs);
19459
19460 if (__die(str, regs, err))
19461diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
19462index f2a1770..540657f 100644
19463--- a/arch/x86/kernel/dumpstack_32.c
19464+++ b/arch/x86/kernel/dumpstack_32.c
19465@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19466 bp = stack_frame(task, regs);
19467
19468 for (;;) {
19469- struct thread_info *context;
19470+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
19471
19472- context = (struct thread_info *)
19473- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
19474- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
19475+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
19476
19477- stack = (unsigned long *)context->previous_esp;
19478- if (!stack)
19479+ if (stack_start == task_stack_page(task))
19480 break;
19481+ stack = *(unsigned long **)stack_start;
19482 if (ops->stack(data, "IRQ") < 0)
19483 break;
19484 touch_nmi_watchdog();
19485@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
19486 int i;
19487
19488 show_regs_print_info(KERN_EMERG);
19489- __show_regs(regs, !user_mode_vm(regs));
19490+ __show_regs(regs, !user_mode(regs));
19491
19492 /*
19493 * When in-kernel, we also print out the stack and code at the
19494 * time of the fault..
19495 */
19496- if (!user_mode_vm(regs)) {
19497+ if (!user_mode(regs)) {
19498 unsigned int code_prologue = code_bytes * 43 / 64;
19499 unsigned int code_len = code_bytes;
19500 unsigned char c;
19501 u8 *ip;
19502+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
19503
19504 pr_emerg("Stack:\n");
19505 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
19506
19507 pr_emerg("Code:");
19508
19509- ip = (u8 *)regs->ip - code_prologue;
19510+ ip = (u8 *)regs->ip - code_prologue + cs_base;
19511 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
19512 /* try starting at IP */
19513- ip = (u8 *)regs->ip;
19514+ ip = (u8 *)regs->ip + cs_base;
19515 code_len = code_len - code_prologue + 1;
19516 }
19517 for (i = 0; i < code_len; i++, ip++) {
19518@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
19519 pr_cont(" Bad EIP value.");
19520 break;
19521 }
19522- if (ip == (u8 *)regs->ip)
19523+ if (ip == (u8 *)regs->ip + cs_base)
19524 pr_cont(" <%02x>", c);
19525 else
19526 pr_cont(" %02x", c);
19527@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
19528 {
19529 unsigned short ud2;
19530
19531+ ip = ktla_ktva(ip);
19532 if (ip < PAGE_OFFSET)
19533 return 0;
19534 if (probe_kernel_address((unsigned short *)ip, ud2))
19535@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
19536
19537 return ud2 == 0x0b0f;
19538 }
19539+
19540+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19541+void pax_check_alloca(unsigned long size)
19542+{
19543+ unsigned long sp = (unsigned long)&sp, stack_left;
19544+
19545+ /* all kernel stacks are of the same size */
19546+ stack_left = sp & (THREAD_SIZE - 1);
19547+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19548+}
19549+EXPORT_SYMBOL(pax_check_alloca);
19550+#endif
19551diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
19552index addb207..99635fa 100644
19553--- a/arch/x86/kernel/dumpstack_64.c
19554+++ b/arch/x86/kernel/dumpstack_64.c
19555@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19556 unsigned long *irq_stack_end =
19557 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
19558 unsigned used = 0;
19559- struct thread_info *tinfo;
19560 int graph = 0;
19561 unsigned long dummy;
19562+ void *stack_start;
19563
19564 if (!task)
19565 task = current;
19566@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19567 * current stack address. If the stacks consist of nested
19568 * exceptions
19569 */
19570- tinfo = task_thread_info(task);
19571 for (;;) {
19572 char *id;
19573 unsigned long *estack_end;
19574+
19575 estack_end = in_exception_stack(cpu, (unsigned long)stack,
19576 &used, &id);
19577
19578@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19579 if (ops->stack(data, id) < 0)
19580 break;
19581
19582- bp = ops->walk_stack(tinfo, stack, bp, ops,
19583+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
19584 data, estack_end, &graph);
19585 ops->stack(data, "<EOE>");
19586 /*
19587@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19588 * second-to-last pointer (index -2 to end) in the
19589 * exception stack:
19590 */
19591+ if ((u16)estack_end[-1] != __KERNEL_DS)
19592+ goto out;
19593 stack = (unsigned long *) estack_end[-2];
19594 continue;
19595 }
19596@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19597 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
19598 if (ops->stack(data, "IRQ") < 0)
19599 break;
19600- bp = ops->walk_stack(tinfo, stack, bp,
19601+ bp = ops->walk_stack(task, irq_stack, stack, bp,
19602 ops, data, irq_stack_end, &graph);
19603 /*
19604 * We link to the next stack (which would be
19605@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
19606 /*
19607 * This handles the process stack:
19608 */
19609- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
19610+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
19611+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
19612+out:
19613 put_cpu();
19614 }
19615 EXPORT_SYMBOL(dump_trace);
19616@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
19617
19618 return ud2 == 0x0b0f;
19619 }
19620+
19621+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19622+void pax_check_alloca(unsigned long size)
19623+{
19624+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
19625+ unsigned cpu, used;
19626+ char *id;
19627+
19628+ /* check the process stack first */
19629+ stack_start = (unsigned long)task_stack_page(current);
19630+ stack_end = stack_start + THREAD_SIZE;
19631+ if (likely(stack_start <= sp && sp < stack_end)) {
19632+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
19633+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19634+ return;
19635+ }
19636+
19637+ cpu = get_cpu();
19638+
19639+ /* check the irq stacks */
19640+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
19641+ stack_start = stack_end - IRQ_STACK_SIZE;
19642+ if (stack_start <= sp && sp < stack_end) {
19643+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
19644+ put_cpu();
19645+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19646+ return;
19647+ }
19648+
19649+ /* check the exception stacks */
19650+ used = 0;
19651+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
19652+ stack_start = stack_end - EXCEPTION_STKSZ;
19653+ if (stack_end && stack_start <= sp && sp < stack_end) {
19654+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
19655+ put_cpu();
19656+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
19657+ return;
19658+ }
19659+
19660+ put_cpu();
19661+
19662+ /* unknown stack */
19663+ BUG();
19664+}
19665+EXPORT_SYMBOL(pax_check_alloca);
19666+#endif
19667diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
19668index d32abea..74daf4f 100644
19669--- a/arch/x86/kernel/e820.c
19670+++ b/arch/x86/kernel/e820.c
19671@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
19672
19673 static void early_panic(char *msg)
19674 {
19675- early_printk(msg);
19676- panic(msg);
19677+ early_printk("%s", msg);
19678+ panic("%s", msg);
19679 }
19680
19681 static int userdef __initdata;
19682diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
19683index d15f575..d692043 100644
19684--- a/arch/x86/kernel/early_printk.c
19685+++ b/arch/x86/kernel/early_printk.c
19686@@ -7,6 +7,7 @@
19687 #include <linux/pci_regs.h>
19688 #include <linux/pci_ids.h>
19689 #include <linux/errno.h>
19690+#include <linux/sched.h>
19691 #include <asm/io.h>
19692 #include <asm/processor.h>
19693 #include <asm/fcntl.h>
19694diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
19695index 8f3e2de..6b71e39 100644
19696--- a/arch/x86/kernel/entry_32.S
19697+++ b/arch/x86/kernel/entry_32.S
19698@@ -177,13 +177,153 @@
19699 /*CFI_REL_OFFSET gs, PT_GS*/
19700 .endm
19701 .macro SET_KERNEL_GS reg
19702+
19703+#ifdef CONFIG_CC_STACKPROTECTOR
19704 movl $(__KERNEL_STACK_CANARY), \reg
19705+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
19706+ movl $(__USER_DS), \reg
19707+#else
19708+ xorl \reg, \reg
19709+#endif
19710+
19711 movl \reg, %gs
19712 .endm
19713
19714 #endif /* CONFIG_X86_32_LAZY_GS */
19715
19716-.macro SAVE_ALL
19717+.macro pax_enter_kernel
19718+#ifdef CONFIG_PAX_KERNEXEC
19719+ call pax_enter_kernel
19720+#endif
19721+.endm
19722+
19723+.macro pax_exit_kernel
19724+#ifdef CONFIG_PAX_KERNEXEC
19725+ call pax_exit_kernel
19726+#endif
19727+.endm
19728+
19729+#ifdef CONFIG_PAX_KERNEXEC
19730+ENTRY(pax_enter_kernel)
19731+#ifdef CONFIG_PARAVIRT
19732+ pushl %eax
19733+ pushl %ecx
19734+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
19735+ mov %eax, %esi
19736+#else
19737+ mov %cr0, %esi
19738+#endif
19739+ bts $16, %esi
19740+ jnc 1f
19741+ mov %cs, %esi
19742+ cmp $__KERNEL_CS, %esi
19743+ jz 3f
19744+ ljmp $__KERNEL_CS, $3f
19745+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
19746+2:
19747+#ifdef CONFIG_PARAVIRT
19748+ mov %esi, %eax
19749+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
19750+#else
19751+ mov %esi, %cr0
19752+#endif
19753+3:
19754+#ifdef CONFIG_PARAVIRT
19755+ popl %ecx
19756+ popl %eax
19757+#endif
19758+ ret
19759+ENDPROC(pax_enter_kernel)
19760+
19761+ENTRY(pax_exit_kernel)
19762+#ifdef CONFIG_PARAVIRT
19763+ pushl %eax
19764+ pushl %ecx
19765+#endif
19766+ mov %cs, %esi
19767+ cmp $__KERNEXEC_KERNEL_CS, %esi
19768+ jnz 2f
19769+#ifdef CONFIG_PARAVIRT
19770+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
19771+ mov %eax, %esi
19772+#else
19773+ mov %cr0, %esi
19774+#endif
19775+ btr $16, %esi
19776+ ljmp $__KERNEL_CS, $1f
19777+1:
19778+#ifdef CONFIG_PARAVIRT
19779+ mov %esi, %eax
19780+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
19781+#else
19782+ mov %esi, %cr0
19783+#endif
19784+2:
19785+#ifdef CONFIG_PARAVIRT
19786+ popl %ecx
19787+ popl %eax
19788+#endif
19789+ ret
19790+ENDPROC(pax_exit_kernel)
19791+#endif
19792+
19793+ .macro pax_erase_kstack
19794+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19795+ call pax_erase_kstack
19796+#endif
19797+ .endm
19798+
19799+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19800+/*
19801+ * ebp: thread_info
19802+ */
19803+ENTRY(pax_erase_kstack)
19804+ pushl %edi
19805+ pushl %ecx
19806+ pushl %eax
19807+
19808+ mov TI_lowest_stack(%ebp), %edi
19809+ mov $-0xBEEF, %eax
19810+ std
19811+
19812+1: mov %edi, %ecx
19813+ and $THREAD_SIZE_asm - 1, %ecx
19814+ shr $2, %ecx
19815+ repne scasl
19816+ jecxz 2f
19817+
19818+ cmp $2*16, %ecx
19819+ jc 2f
19820+
19821+ mov $2*16, %ecx
19822+ repe scasl
19823+ jecxz 2f
19824+ jne 1b
19825+
19826+2: cld
19827+ mov %esp, %ecx
19828+ sub %edi, %ecx
19829+
19830+ cmp $THREAD_SIZE_asm, %ecx
19831+ jb 3f
19832+ ud2
19833+3:
19834+
19835+ shr $2, %ecx
19836+ rep stosl
19837+
19838+ mov TI_task_thread_sp0(%ebp), %edi
19839+ sub $128, %edi
19840+ mov %edi, TI_lowest_stack(%ebp)
19841+
19842+ popl %eax
19843+ popl %ecx
19844+ popl %edi
19845+ ret
19846+ENDPROC(pax_erase_kstack)
19847+#endif
19848+
19849+.macro __SAVE_ALL _DS
19850 cld
19851 PUSH_GS
19852 pushl_cfi %fs
19853@@ -206,7 +346,7 @@
19854 CFI_REL_OFFSET ecx, 0
19855 pushl_cfi %ebx
19856 CFI_REL_OFFSET ebx, 0
19857- movl $(__USER_DS), %edx
19858+ movl $\_DS, %edx
19859 movl %edx, %ds
19860 movl %edx, %es
19861 movl $(__KERNEL_PERCPU), %edx
19862@@ -214,6 +354,15 @@
19863 SET_KERNEL_GS %edx
19864 .endm
19865
19866+.macro SAVE_ALL
19867+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
19868+ __SAVE_ALL __KERNEL_DS
19869+ pax_enter_kernel
19870+#else
19871+ __SAVE_ALL __USER_DS
19872+#endif
19873+.endm
19874+
19875 .macro RESTORE_INT_REGS
19876 popl_cfi %ebx
19877 CFI_RESTORE ebx
19878@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
19879 popfl_cfi
19880 jmp syscall_exit
19881 CFI_ENDPROC
19882-END(ret_from_fork)
19883+ENDPROC(ret_from_fork)
19884
19885 ENTRY(ret_from_kernel_thread)
19886 CFI_STARTPROC
19887@@ -344,7 +493,15 @@ ret_from_intr:
19888 andl $SEGMENT_RPL_MASK, %eax
19889 #endif
19890 cmpl $USER_RPL, %eax
19891+
19892+#ifdef CONFIG_PAX_KERNEXEC
19893+ jae resume_userspace
19894+
19895+ pax_exit_kernel
19896+ jmp resume_kernel
19897+#else
19898 jb resume_kernel # not returning to v8086 or userspace
19899+#endif
19900
19901 ENTRY(resume_userspace)
19902 LOCKDEP_SYS_EXIT
19903@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
19904 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
19905 # int/exception return?
19906 jne work_pending
19907- jmp restore_all
19908-END(ret_from_exception)
19909+ jmp restore_all_pax
19910+ENDPROC(ret_from_exception)
19911
19912 #ifdef CONFIG_PREEMPT
19913 ENTRY(resume_kernel)
19914@@ -372,7 +529,7 @@ need_resched:
19915 jz restore_all
19916 call preempt_schedule_irq
19917 jmp need_resched
19918-END(resume_kernel)
19919+ENDPROC(resume_kernel)
19920 #endif
19921 CFI_ENDPROC
19922 /*
19923@@ -406,30 +563,45 @@ sysenter_past_esp:
19924 /*CFI_REL_OFFSET cs, 0*/
19925 /*
19926 * Push current_thread_info()->sysenter_return to the stack.
19927- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
19928- * pushed above; +8 corresponds to copy_thread's esp0 setting.
19929 */
19930- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
19931+ pushl_cfi $0
19932 CFI_REL_OFFSET eip, 0
19933
19934 pushl_cfi %eax
19935 SAVE_ALL
19936+ GET_THREAD_INFO(%ebp)
19937+ movl TI_sysenter_return(%ebp),%ebp
19938+ movl %ebp,PT_EIP(%esp)
19939 ENABLE_INTERRUPTS(CLBR_NONE)
19940
19941 /*
19942 * Load the potential sixth argument from user stack.
19943 * Careful about security.
19944 */
19945+ movl PT_OLDESP(%esp),%ebp
19946+
19947+#ifdef CONFIG_PAX_MEMORY_UDEREF
19948+ mov PT_OLDSS(%esp),%ds
19949+1: movl %ds:(%ebp),%ebp
19950+ push %ss
19951+ pop %ds
19952+#else
19953 cmpl $__PAGE_OFFSET-3,%ebp
19954 jae syscall_fault
19955 ASM_STAC
19956 1: movl (%ebp),%ebp
19957 ASM_CLAC
19958+#endif
19959+
19960 movl %ebp,PT_EBP(%esp)
19961 _ASM_EXTABLE(1b,syscall_fault)
19962
19963 GET_THREAD_INFO(%ebp)
19964
19965+#ifdef CONFIG_PAX_RANDKSTACK
19966+ pax_erase_kstack
19967+#endif
19968+
19969 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
19970 jnz sysenter_audit
19971 sysenter_do_call:
19972@@ -444,12 +616,24 @@ sysenter_do_call:
19973 testl $_TIF_ALLWORK_MASK, %ecx
19974 jne sysexit_audit
19975 sysenter_exit:
19976+
19977+#ifdef CONFIG_PAX_RANDKSTACK
19978+ pushl_cfi %eax
19979+ movl %esp, %eax
19980+ call pax_randomize_kstack
19981+ popl_cfi %eax
19982+#endif
19983+
19984+ pax_erase_kstack
19985+
19986 /* if something modifies registers it must also disable sysexit */
19987 movl PT_EIP(%esp), %edx
19988 movl PT_OLDESP(%esp), %ecx
19989 xorl %ebp,%ebp
19990 TRACE_IRQS_ON
19991 1: mov PT_FS(%esp), %fs
19992+2: mov PT_DS(%esp), %ds
19993+3: mov PT_ES(%esp), %es
19994 PTGS_TO_GS
19995 ENABLE_INTERRUPTS_SYSEXIT
19996
19997@@ -466,6 +650,9 @@ sysenter_audit:
19998 movl %eax,%edx /* 2nd arg: syscall number */
19999 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
20000 call __audit_syscall_entry
20001+
20002+ pax_erase_kstack
20003+
20004 pushl_cfi %ebx
20005 movl PT_EAX(%esp),%eax /* reload syscall number */
20006 jmp sysenter_do_call
20007@@ -491,10 +678,16 @@ sysexit_audit:
20008
20009 CFI_ENDPROC
20010 .pushsection .fixup,"ax"
20011-2: movl $0,PT_FS(%esp)
20012+4: movl $0,PT_FS(%esp)
20013+ jmp 1b
20014+5: movl $0,PT_DS(%esp)
20015+ jmp 1b
20016+6: movl $0,PT_ES(%esp)
20017 jmp 1b
20018 .popsection
20019- _ASM_EXTABLE(1b,2b)
20020+ _ASM_EXTABLE(1b,4b)
20021+ _ASM_EXTABLE(2b,5b)
20022+ _ASM_EXTABLE(3b,6b)
20023 PTGS_TO_GS_EX
20024 ENDPROC(ia32_sysenter_target)
20025
20026@@ -509,6 +702,11 @@ ENTRY(system_call)
20027 pushl_cfi %eax # save orig_eax
20028 SAVE_ALL
20029 GET_THREAD_INFO(%ebp)
20030+
20031+#ifdef CONFIG_PAX_RANDKSTACK
20032+ pax_erase_kstack
20033+#endif
20034+
20035 # system call tracing in operation / emulation
20036 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
20037 jnz syscall_trace_entry
20038@@ -527,6 +725,15 @@ syscall_exit:
20039 testl $_TIF_ALLWORK_MASK, %ecx # current->work
20040 jne syscall_exit_work
20041
20042+restore_all_pax:
20043+
20044+#ifdef CONFIG_PAX_RANDKSTACK
20045+ movl %esp, %eax
20046+ call pax_randomize_kstack
20047+#endif
20048+
20049+ pax_erase_kstack
20050+
20051 restore_all:
20052 TRACE_IRQS_IRET
20053 restore_all_notrace:
20054@@ -583,14 +790,34 @@ ldt_ss:
20055 * compensating for the offset by changing to the ESPFIX segment with
20056 * a base address that matches for the difference.
20057 */
20058-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
20059+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
20060 mov %esp, %edx /* load kernel esp */
20061 mov PT_OLDESP(%esp), %eax /* load userspace esp */
20062 mov %dx, %ax /* eax: new kernel esp */
20063 sub %eax, %edx /* offset (low word is 0) */
20064+#ifdef CONFIG_SMP
20065+ movl PER_CPU_VAR(cpu_number), %ebx
20066+ shll $PAGE_SHIFT_asm, %ebx
20067+ addl $cpu_gdt_table, %ebx
20068+#else
20069+ movl $cpu_gdt_table, %ebx
20070+#endif
20071 shr $16, %edx
20072- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
20073- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
20074+
20075+#ifdef CONFIG_PAX_KERNEXEC
20076+ mov %cr0, %esi
20077+ btr $16, %esi
20078+ mov %esi, %cr0
20079+#endif
20080+
20081+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
20082+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
20083+
20084+#ifdef CONFIG_PAX_KERNEXEC
20085+ bts $16, %esi
20086+ mov %esi, %cr0
20087+#endif
20088+
20089 pushl_cfi $__ESPFIX_SS
20090 pushl_cfi %eax /* new kernel esp */
20091 /* Disable interrupts, but do not irqtrace this section: we
20092@@ -619,20 +846,18 @@ work_resched:
20093 movl TI_flags(%ebp), %ecx
20094 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
20095 # than syscall tracing?
20096- jz restore_all
20097+ jz restore_all_pax
20098 testb $_TIF_NEED_RESCHED, %cl
20099 jnz work_resched
20100
20101 work_notifysig: # deal with pending signals and
20102 # notify-resume requests
20103+ movl %esp, %eax
20104 #ifdef CONFIG_VM86
20105 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
20106- movl %esp, %eax
20107 jne work_notifysig_v86 # returning to kernel-space or
20108 # vm86-space
20109 1:
20110-#else
20111- movl %esp, %eax
20112 #endif
20113 TRACE_IRQS_ON
20114 ENABLE_INTERRUPTS(CLBR_NONE)
20115@@ -653,7 +878,7 @@ work_notifysig_v86:
20116 movl %eax, %esp
20117 jmp 1b
20118 #endif
20119-END(work_pending)
20120+ENDPROC(work_pending)
20121
20122 # perform syscall exit tracing
20123 ALIGN
20124@@ -661,11 +886,14 @@ syscall_trace_entry:
20125 movl $-ENOSYS,PT_EAX(%esp)
20126 movl %esp, %eax
20127 call syscall_trace_enter
20128+
20129+ pax_erase_kstack
20130+
20131 /* What it returned is what we'll actually use. */
20132 cmpl $(NR_syscalls), %eax
20133 jnae syscall_call
20134 jmp syscall_exit
20135-END(syscall_trace_entry)
20136+ENDPROC(syscall_trace_entry)
20137
20138 # perform syscall exit tracing
20139 ALIGN
20140@@ -678,21 +906,25 @@ syscall_exit_work:
20141 movl %esp, %eax
20142 call syscall_trace_leave
20143 jmp resume_userspace
20144-END(syscall_exit_work)
20145+ENDPROC(syscall_exit_work)
20146 CFI_ENDPROC
20147
20148 RING0_INT_FRAME # can't unwind into user space anyway
20149 syscall_fault:
20150+#ifdef CONFIG_PAX_MEMORY_UDEREF
20151+ push %ss
20152+ pop %ds
20153+#endif
20154 ASM_CLAC
20155 GET_THREAD_INFO(%ebp)
20156 movl $-EFAULT,PT_EAX(%esp)
20157 jmp resume_userspace
20158-END(syscall_fault)
20159+ENDPROC(syscall_fault)
20160
20161 syscall_badsys:
20162 movl $-ENOSYS,PT_EAX(%esp)
20163 jmp resume_userspace
20164-END(syscall_badsys)
20165+ENDPROC(syscall_badsys)
20166 CFI_ENDPROC
20167 /*
20168 * End of kprobes section
20169@@ -708,8 +940,15 @@ END(syscall_badsys)
20170 * normal stack and adjusts ESP with the matching offset.
20171 */
20172 /* fixup the stack */
20173- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
20174- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
20175+#ifdef CONFIG_SMP
20176+ movl PER_CPU_VAR(cpu_number), %ebx
20177+ shll $PAGE_SHIFT_asm, %ebx
20178+ addl $cpu_gdt_table, %ebx
20179+#else
20180+ movl $cpu_gdt_table, %ebx
20181+#endif
20182+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
20183+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
20184 shl $16, %eax
20185 addl %esp, %eax /* the adjusted stack pointer */
20186 pushl_cfi $__KERNEL_DS
20187@@ -762,7 +1001,7 @@ vector=vector+1
20188 .endr
20189 2: jmp common_interrupt
20190 .endr
20191-END(irq_entries_start)
20192+ENDPROC(irq_entries_start)
20193
20194 .previous
20195 END(interrupt)
20196@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
20197 pushl_cfi $do_coprocessor_error
20198 jmp error_code
20199 CFI_ENDPROC
20200-END(coprocessor_error)
20201+ENDPROC(coprocessor_error)
20202
20203 ENTRY(simd_coprocessor_error)
20204 RING0_INT_FRAME
20205@@ -826,7 +1065,7 @@ ENTRY(simd_coprocessor_error)
20206 .section .altinstructions,"a"
20207 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
20208 .previous
20209-.section .altinstr_replacement,"ax"
20210+.section .altinstr_replacement,"a"
20211 663: pushl $do_simd_coprocessor_error
20212 664:
20213 .previous
20214@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
20215 #endif
20216 jmp error_code
20217 CFI_ENDPROC
20218-END(simd_coprocessor_error)
20219+ENDPROC(simd_coprocessor_error)
20220
20221 ENTRY(device_not_available)
20222 RING0_INT_FRAME
20223@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
20224 pushl_cfi $do_device_not_available
20225 jmp error_code
20226 CFI_ENDPROC
20227-END(device_not_available)
20228+ENDPROC(device_not_available)
20229
20230 #ifdef CONFIG_PARAVIRT
20231 ENTRY(native_iret)
20232 iret
20233 _ASM_EXTABLE(native_iret, iret_exc)
20234-END(native_iret)
20235+ENDPROC(native_iret)
20236
20237 ENTRY(native_irq_enable_sysexit)
20238 sti
20239 sysexit
20240-END(native_irq_enable_sysexit)
20241+ENDPROC(native_irq_enable_sysexit)
20242 #endif
20243
20244 ENTRY(overflow)
20245@@ -865,7 +1104,7 @@ ENTRY(overflow)
20246 pushl_cfi $do_overflow
20247 jmp error_code
20248 CFI_ENDPROC
20249-END(overflow)
20250+ENDPROC(overflow)
20251
20252 ENTRY(bounds)
20253 RING0_INT_FRAME
20254@@ -874,7 +1113,7 @@ ENTRY(bounds)
20255 pushl_cfi $do_bounds
20256 jmp error_code
20257 CFI_ENDPROC
20258-END(bounds)
20259+ENDPROC(bounds)
20260
20261 ENTRY(invalid_op)
20262 RING0_INT_FRAME
20263@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
20264 pushl_cfi $do_invalid_op
20265 jmp error_code
20266 CFI_ENDPROC
20267-END(invalid_op)
20268+ENDPROC(invalid_op)
20269
20270 ENTRY(coprocessor_segment_overrun)
20271 RING0_INT_FRAME
20272@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
20273 pushl_cfi $do_coprocessor_segment_overrun
20274 jmp error_code
20275 CFI_ENDPROC
20276-END(coprocessor_segment_overrun)
20277+ENDPROC(coprocessor_segment_overrun)
20278
20279 ENTRY(invalid_TSS)
20280 RING0_EC_FRAME
20281@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
20282 pushl_cfi $do_invalid_TSS
20283 jmp error_code
20284 CFI_ENDPROC
20285-END(invalid_TSS)
20286+ENDPROC(invalid_TSS)
20287
20288 ENTRY(segment_not_present)
20289 RING0_EC_FRAME
20290@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
20291 pushl_cfi $do_segment_not_present
20292 jmp error_code
20293 CFI_ENDPROC
20294-END(segment_not_present)
20295+ENDPROC(segment_not_present)
20296
20297 ENTRY(stack_segment)
20298 RING0_EC_FRAME
20299@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
20300 pushl_cfi $do_stack_segment
20301 jmp error_code
20302 CFI_ENDPROC
20303-END(stack_segment)
20304+ENDPROC(stack_segment)
20305
20306 ENTRY(alignment_check)
20307 RING0_EC_FRAME
20308@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
20309 pushl_cfi $do_alignment_check
20310 jmp error_code
20311 CFI_ENDPROC
20312-END(alignment_check)
20313+ENDPROC(alignment_check)
20314
20315 ENTRY(divide_error)
20316 RING0_INT_FRAME
20317@@ -933,7 +1172,7 @@ ENTRY(divide_error)
20318 pushl_cfi $do_divide_error
20319 jmp error_code
20320 CFI_ENDPROC
20321-END(divide_error)
20322+ENDPROC(divide_error)
20323
20324 #ifdef CONFIG_X86_MCE
20325 ENTRY(machine_check)
20326@@ -943,7 +1182,7 @@ ENTRY(machine_check)
20327 pushl_cfi machine_check_vector
20328 jmp error_code
20329 CFI_ENDPROC
20330-END(machine_check)
20331+ENDPROC(machine_check)
20332 #endif
20333
20334 ENTRY(spurious_interrupt_bug)
20335@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
20336 pushl_cfi $do_spurious_interrupt_bug
20337 jmp error_code
20338 CFI_ENDPROC
20339-END(spurious_interrupt_bug)
20340+ENDPROC(spurious_interrupt_bug)
20341 /*
20342 * End of kprobes section
20343 */
20344@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
20345
20346 ENTRY(mcount)
20347 ret
20348-END(mcount)
20349+ENDPROC(mcount)
20350
20351 ENTRY(ftrace_caller)
20352 cmpl $0, function_trace_stop
20353@@ -1096,7 +1335,7 @@ ftrace_graph_call:
20354 .globl ftrace_stub
20355 ftrace_stub:
20356 ret
20357-END(ftrace_caller)
20358+ENDPROC(ftrace_caller)
20359
20360 ENTRY(ftrace_regs_caller)
20361 pushf /* push flags before compare (in cs location) */
20362@@ -1197,7 +1436,7 @@ trace:
20363 popl %ecx
20364 popl %eax
20365 jmp ftrace_stub
20366-END(mcount)
20367+ENDPROC(mcount)
20368 #endif /* CONFIG_DYNAMIC_FTRACE */
20369 #endif /* CONFIG_FUNCTION_TRACER */
20370
20371@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
20372 popl %ecx
20373 popl %eax
20374 ret
20375-END(ftrace_graph_caller)
20376+ENDPROC(ftrace_graph_caller)
20377
20378 .globl return_to_handler
20379 return_to_handler:
20380@@ -1271,15 +1510,18 @@ error_code:
20381 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
20382 REG_TO_PTGS %ecx
20383 SET_KERNEL_GS %ecx
20384- movl $(__USER_DS), %ecx
20385+ movl $(__KERNEL_DS), %ecx
20386 movl %ecx, %ds
20387 movl %ecx, %es
20388+
20389+ pax_enter_kernel
20390+
20391 TRACE_IRQS_OFF
20392 movl %esp,%eax # pt_regs pointer
20393 call *%edi
20394 jmp ret_from_exception
20395 CFI_ENDPROC
20396-END(page_fault)
20397+ENDPROC(page_fault)
20398
20399 /*
20400 * Debug traps and NMI can happen at the one SYSENTER instruction
20401@@ -1322,7 +1564,7 @@ debug_stack_correct:
20402 call do_debug
20403 jmp ret_from_exception
20404 CFI_ENDPROC
20405-END(debug)
20406+ENDPROC(debug)
20407
20408 /*
20409 * NMI is doubly nasty. It can happen _while_ we're handling
20410@@ -1360,6 +1602,9 @@ nmi_stack_correct:
20411 xorl %edx,%edx # zero error code
20412 movl %esp,%eax # pt_regs pointer
20413 call do_nmi
20414+
20415+ pax_exit_kernel
20416+
20417 jmp restore_all_notrace
20418 CFI_ENDPROC
20419
20420@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
20421 FIXUP_ESPFIX_STACK # %eax == %esp
20422 xorl %edx,%edx # zero error code
20423 call do_nmi
20424+
20425+ pax_exit_kernel
20426+
20427 RESTORE_REGS
20428 lss 12+4(%esp), %esp # back to espfix stack
20429 CFI_ADJUST_CFA_OFFSET -24
20430 jmp irq_return
20431 CFI_ENDPROC
20432-END(nmi)
20433+ENDPROC(nmi)
20434
20435 ENTRY(int3)
20436 RING0_INT_FRAME
20437@@ -1414,14 +1662,14 @@ ENTRY(int3)
20438 call do_int3
20439 jmp ret_from_exception
20440 CFI_ENDPROC
20441-END(int3)
20442+ENDPROC(int3)
20443
20444 ENTRY(general_protection)
20445 RING0_EC_FRAME
20446 pushl_cfi $do_general_protection
20447 jmp error_code
20448 CFI_ENDPROC
20449-END(general_protection)
20450+ENDPROC(general_protection)
20451
20452 #ifdef CONFIG_KVM_GUEST
20453 ENTRY(async_page_fault)
20454@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
20455 pushl_cfi $do_async_page_fault
20456 jmp error_code
20457 CFI_ENDPROC
20458-END(async_page_fault)
20459+ENDPROC(async_page_fault)
20460 #endif
20461
20462 /*
20463diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
20464index 7272089..833fdf8 100644
20465--- a/arch/x86/kernel/entry_64.S
20466+++ b/arch/x86/kernel/entry_64.S
20467@@ -59,6 +59,8 @@
20468 #include <asm/context_tracking.h>
20469 #include <asm/smap.h>
20470 #include <linux/err.h>
20471+#include <asm/pgtable.h>
20472+#include <asm/alternative-asm.h>
20473
20474 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
20475 #include <linux/elf-em.h>
20476@@ -80,8 +82,9 @@
20477 #ifdef CONFIG_DYNAMIC_FTRACE
20478
20479 ENTRY(function_hook)
20480+ pax_force_retaddr
20481 retq
20482-END(function_hook)
20483+ENDPROC(function_hook)
20484
20485 /* skip is set if stack has been adjusted */
20486 .macro ftrace_caller_setup skip=0
20487@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
20488 #endif
20489
20490 GLOBAL(ftrace_stub)
20491+ pax_force_retaddr
20492 retq
20493-END(ftrace_caller)
20494+ENDPROC(ftrace_caller)
20495
20496 ENTRY(ftrace_regs_caller)
20497 /* Save the current flags before compare (in SS location)*/
20498@@ -191,7 +195,7 @@ ftrace_restore_flags:
20499 popfq
20500 jmp ftrace_stub
20501
20502-END(ftrace_regs_caller)
20503+ENDPROC(ftrace_regs_caller)
20504
20505
20506 #else /* ! CONFIG_DYNAMIC_FTRACE */
20507@@ -212,6 +216,7 @@ ENTRY(function_hook)
20508 #endif
20509
20510 GLOBAL(ftrace_stub)
20511+ pax_force_retaddr
20512 retq
20513
20514 trace:
20515@@ -225,12 +230,13 @@ trace:
20516 #endif
20517 subq $MCOUNT_INSN_SIZE, %rdi
20518
20519+ pax_force_fptr ftrace_trace_function
20520 call *ftrace_trace_function
20521
20522 MCOUNT_RESTORE_FRAME
20523
20524 jmp ftrace_stub
20525-END(function_hook)
20526+ENDPROC(function_hook)
20527 #endif /* CONFIG_DYNAMIC_FTRACE */
20528 #endif /* CONFIG_FUNCTION_TRACER */
20529
20530@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
20531
20532 MCOUNT_RESTORE_FRAME
20533
20534+ pax_force_retaddr
20535 retq
20536-END(ftrace_graph_caller)
20537+ENDPROC(ftrace_graph_caller)
20538
20539 GLOBAL(return_to_handler)
20540 subq $24, %rsp
20541@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
20542 movq 8(%rsp), %rdx
20543 movq (%rsp), %rax
20544 addq $24, %rsp
20545+ pax_force_fptr %rdi
20546 jmp *%rdi
20547+ENDPROC(return_to_handler)
20548 #endif
20549
20550
20551@@ -284,6 +293,427 @@ ENTRY(native_usergs_sysret64)
20552 ENDPROC(native_usergs_sysret64)
20553 #endif /* CONFIG_PARAVIRT */
20554
20555+ .macro ljmpq sel, off
20556+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
20557+ .byte 0x48; ljmp *1234f(%rip)
20558+ .pushsection .rodata
20559+ .align 16
20560+ 1234: .quad \off; .word \sel
20561+ .popsection
20562+#else
20563+ pushq $\sel
20564+ pushq $\off
20565+ lretq
20566+#endif
20567+ .endm
20568+
20569+ .macro pax_enter_kernel
20570+ pax_set_fptr_mask
20571+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20572+ call pax_enter_kernel
20573+#endif
20574+ .endm
20575+
20576+ .macro pax_exit_kernel
20577+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20578+ call pax_exit_kernel
20579+#endif
20580+
20581+ .endm
20582+
20583+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
20584+ENTRY(pax_enter_kernel)
20585+ pushq %rdi
20586+
20587+#ifdef CONFIG_PARAVIRT
20588+ PV_SAVE_REGS(CLBR_RDI)
20589+#endif
20590+
20591+#ifdef CONFIG_PAX_KERNEXEC
20592+ GET_CR0_INTO_RDI
20593+ bts $16,%rdi
20594+ jnc 3f
20595+ mov %cs,%edi
20596+ cmp $__KERNEL_CS,%edi
20597+ jnz 2f
20598+1:
20599+#endif
20600+
20601+#ifdef CONFIG_PAX_MEMORY_UDEREF
20602+ 661: jmp 111f
20603+ .pushsection .altinstr_replacement, "a"
20604+ 662: ASM_NOP2
20605+ .popsection
20606+ .pushsection .altinstructions, "a"
20607+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20608+ .popsection
20609+ GET_CR3_INTO_RDI
20610+ cmp $0,%dil
20611+ jnz 112f
20612+ mov $__KERNEL_DS,%edi
20613+ mov %edi,%ss
20614+ jmp 111f
20615+112: cmp $1,%dil
20616+ jz 113f
20617+ ud2
20618+113: sub $4097,%rdi
20619+ bts $63,%rdi
20620+ SET_RDI_INTO_CR3
20621+ mov $__UDEREF_KERNEL_DS,%edi
20622+ mov %edi,%ss
20623+111:
20624+#endif
20625+
20626+#ifdef CONFIG_PARAVIRT
20627+ PV_RESTORE_REGS(CLBR_RDI)
20628+#endif
20629+
20630+ popq %rdi
20631+ pax_force_retaddr
20632+ retq
20633+
20634+#ifdef CONFIG_PAX_KERNEXEC
20635+2: ljmpq __KERNEL_CS,1b
20636+3: ljmpq __KERNEXEC_KERNEL_CS,4f
20637+4: SET_RDI_INTO_CR0
20638+ jmp 1b
20639+#endif
20640+ENDPROC(pax_enter_kernel)
20641+
20642+ENTRY(pax_exit_kernel)
20643+ pushq %rdi
20644+
20645+#ifdef CONFIG_PARAVIRT
20646+ PV_SAVE_REGS(CLBR_RDI)
20647+#endif
20648+
20649+#ifdef CONFIG_PAX_KERNEXEC
20650+ mov %cs,%rdi
20651+ cmp $__KERNEXEC_KERNEL_CS,%edi
20652+ jz 2f
20653+ GET_CR0_INTO_RDI
20654+ bts $16,%rdi
20655+ jnc 4f
20656+1:
20657+#endif
20658+
20659+#ifdef CONFIG_PAX_MEMORY_UDEREF
20660+ 661: jmp 111f
20661+ .pushsection .altinstr_replacement, "a"
20662+ 662: ASM_NOP2
20663+ .popsection
20664+ .pushsection .altinstructions, "a"
20665+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20666+ .popsection
20667+ mov %ss,%edi
20668+ cmp $__UDEREF_KERNEL_DS,%edi
20669+ jnz 111f
20670+ GET_CR3_INTO_RDI
20671+ cmp $0,%dil
20672+ jz 112f
20673+ ud2
20674+112: add $4097,%rdi
20675+ bts $63,%rdi
20676+ SET_RDI_INTO_CR3
20677+ mov $__KERNEL_DS,%edi
20678+ mov %edi,%ss
20679+111:
20680+#endif
20681+
20682+#ifdef CONFIG_PARAVIRT
20683+ PV_RESTORE_REGS(CLBR_RDI);
20684+#endif
20685+
20686+ popq %rdi
20687+ pax_force_retaddr
20688+ retq
20689+
20690+#ifdef CONFIG_PAX_KERNEXEC
20691+2: GET_CR0_INTO_RDI
20692+ btr $16,%rdi
20693+ jnc 4f
20694+ ljmpq __KERNEL_CS,3f
20695+3: SET_RDI_INTO_CR0
20696+ jmp 1b
20697+4: ud2
20698+ jmp 4b
20699+#endif
20700+ENDPROC(pax_exit_kernel)
20701+#endif
20702+
20703+ .macro pax_enter_kernel_user
20704+ pax_set_fptr_mask
20705+#ifdef CONFIG_PAX_MEMORY_UDEREF
20706+ call pax_enter_kernel_user
20707+#endif
20708+ .endm
20709+
20710+ .macro pax_exit_kernel_user
20711+#ifdef CONFIG_PAX_MEMORY_UDEREF
20712+ call pax_exit_kernel_user
20713+#endif
20714+#ifdef CONFIG_PAX_RANDKSTACK
20715+ pushq %rax
20716+ pushq %r11
20717+ call pax_randomize_kstack
20718+ popq %r11
20719+ popq %rax
20720+#endif
20721+ .endm
20722+
20723+#ifdef CONFIG_PAX_MEMORY_UDEREF
20724+ENTRY(pax_enter_kernel_user)
20725+ pushq %rdi
20726+ pushq %rbx
20727+
20728+#ifdef CONFIG_PARAVIRT
20729+ PV_SAVE_REGS(CLBR_RDI)
20730+#endif
20731+
20732+ 661: jmp 111f
20733+ .pushsection .altinstr_replacement, "a"
20734+ 662: ASM_NOP2
20735+ .popsection
20736+ .pushsection .altinstructions, "a"
20737+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20738+ .popsection
20739+ GET_CR3_INTO_RDI
20740+ cmp $1,%dil
20741+ jnz 3f
20742+ sub $4097,%rdi
20743+ bts $63,%rdi
20744+ jmp 2f
20745+111:
20746+
20747+ GET_CR3_INTO_RDI
20748+ mov %rdi,%rbx
20749+ add $__START_KERNEL_map,%rbx
20750+ sub phys_base(%rip),%rbx
20751+
20752+#ifdef CONFIG_PARAVIRT
20753+ cmpl $0, pv_info+PARAVIRT_enabled
20754+ jz 1f
20755+ pushq %rdi
20756+ i = 0
20757+ .rept USER_PGD_PTRS
20758+ mov i*8(%rbx),%rsi
20759+ mov $0,%sil
20760+ lea i*8(%rbx),%rdi
20761+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
20762+ i = i + 1
20763+ .endr
20764+ popq %rdi
20765+ jmp 2f
20766+1:
20767+#endif
20768+
20769+ i = 0
20770+ .rept USER_PGD_PTRS
20771+ movb $0,i*8(%rbx)
20772+ i = i + 1
20773+ .endr
20774+
20775+#ifdef CONFIG_PAX_KERNEXEC
20776+ GET_CR0_INTO_RDI
20777+ bts $16,%rdi
20778+ SET_RDI_INTO_CR0
20779+#endif
20780+
20781+2: SET_RDI_INTO_CR3
20782+
20783+#ifdef CONFIG_PARAVIRT
20784+ PV_RESTORE_REGS(CLBR_RDI)
20785+#endif
20786+
20787+ popq %rbx
20788+ popq %rdi
20789+ pax_force_retaddr
20790+ retq
20791+3: ud2
20792+ENDPROC(pax_enter_kernel_user)
20793+
20794+ENTRY(pax_exit_kernel_user)
20795+ pushq %rdi
20796+ pushq %rbx
20797+
20798+#ifdef CONFIG_PARAVIRT
20799+ PV_SAVE_REGS(CLBR_RDI)
20800+#endif
20801+
20802+ GET_CR3_INTO_RDI
20803+ 661: jmp 1f
20804+ .pushsection .altinstr_replacement, "a"
20805+ 662: ASM_NOP2
20806+ .popsection
20807+ .pushsection .altinstructions, "a"
20808+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20809+ .popsection
20810+ cmp $0,%dil
20811+ jnz 3f
20812+ add $4097,%rdi
20813+ bts $63,%rdi
20814+ SET_RDI_INTO_CR3
20815+ jmp 2f
20816+1:
20817+ mov %rdi,%rbx
20818+ add $__START_KERNEL_map,%rbx
20819+ sub phys_base(%rip),%rbx
20820+
20821+#ifdef CONFIG_PARAVIRT
20822+ cmpl $0, pv_info+PARAVIRT_enabled
20823+ jz 1f
20824+ pushq %rdi
20825+ i = 0
20826+ .rept USER_PGD_PTRS
20827+ mov i*8(%rbx),%rsi
20828+ mov $0x67,%sil
20829+ lea i*8(%rbx),%rdi
20830+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
20831+ i = i + 1
20832+ .endr
20833+ popq %rdi
20834+ jmp 2f
20835+1:
20836+#endif
20837+
20838+#ifdef CONFIG_PAX_KERNEXEC
20839+ GET_CR0_INTO_RDI
20840+ btr $16,%rdi
20841+ jnc 3f
20842+ SET_RDI_INTO_CR0
20843+#endif
20844+
20845+ i = 0
20846+ .rept USER_PGD_PTRS
20847+ movb $0x67,i*8(%rbx)
20848+ i = i + 1
20849+ .endr
20850+2:
20851+
20852+#ifdef CONFIG_PARAVIRT
20853+ PV_RESTORE_REGS(CLBR_RDI)
20854+#endif
20855+
20856+ popq %rbx
20857+ popq %rdi
20858+ pax_force_retaddr
20859+ retq
20860+3: ud2
20861+ENDPROC(pax_exit_kernel_user)
20862+#endif
20863+
20864+ .macro pax_enter_kernel_nmi
20865+ pax_set_fptr_mask
20866+
20867+#ifdef CONFIG_PAX_KERNEXEC
20868+ GET_CR0_INTO_RDI
20869+ bts $16,%rdi
20870+ jc 110f
20871+ SET_RDI_INTO_CR0
20872+ or $2,%ebx
20873+110:
20874+#endif
20875+
20876+#ifdef CONFIG_PAX_MEMORY_UDEREF
20877+ 661: jmp 111f
20878+ .pushsection .altinstr_replacement, "a"
20879+ 662: ASM_NOP2
20880+ .popsection
20881+ .pushsection .altinstructions, "a"
20882+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
20883+ .popsection
20884+ GET_CR3_INTO_RDI
20885+ cmp $0,%dil
20886+ jz 111f
20887+ sub $4097,%rdi
20888+ or $4,%ebx
20889+ bts $63,%rdi
20890+ SET_RDI_INTO_CR3
20891+ mov $__UDEREF_KERNEL_DS,%edi
20892+ mov %edi,%ss
20893+111:
20894+#endif
20895+ .endm
20896+
20897+ .macro pax_exit_kernel_nmi
20898+#ifdef CONFIG_PAX_KERNEXEC
20899+ btr $1,%ebx
20900+ jnc 110f
20901+ GET_CR0_INTO_RDI
20902+ btr $16,%rdi
20903+ SET_RDI_INTO_CR0
20904+110:
20905+#endif
20906+
20907+#ifdef CONFIG_PAX_MEMORY_UDEREF
20908+ btr $2,%ebx
20909+ jnc 111f
20910+ GET_CR3_INTO_RDI
20911+ add $4097,%rdi
20912+ bts $63,%rdi
20913+ SET_RDI_INTO_CR3
20914+ mov $__KERNEL_DS,%edi
20915+ mov %edi,%ss
20916+111:
20917+#endif
20918+ .endm
20919+
20920+ .macro pax_erase_kstack
20921+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20922+ call pax_erase_kstack
20923+#endif
20924+ .endm
20925+
20926+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
20927+ENTRY(pax_erase_kstack)
20928+ pushq %rdi
20929+ pushq %rcx
20930+ pushq %rax
20931+ pushq %r11
20932+
20933+ GET_THREAD_INFO(%r11)
20934+ mov TI_lowest_stack(%r11), %rdi
20935+ mov $-0xBEEF, %rax
20936+ std
20937+
20938+1: mov %edi, %ecx
20939+ and $THREAD_SIZE_asm - 1, %ecx
20940+ shr $3, %ecx
20941+ repne scasq
20942+ jecxz 2f
20943+
20944+ cmp $2*8, %ecx
20945+ jc 2f
20946+
20947+ mov $2*8, %ecx
20948+ repe scasq
20949+ jecxz 2f
20950+ jne 1b
20951+
20952+2: cld
20953+ mov %esp, %ecx
20954+ sub %edi, %ecx
20955+
20956+ cmp $THREAD_SIZE_asm, %rcx
20957+ jb 3f
20958+ ud2
20959+3:
20960+
20961+ shr $3, %ecx
20962+ rep stosq
20963+
20964+ mov TI_task_thread_sp0(%r11), %rdi
20965+ sub $256, %rdi
20966+ mov %rdi, TI_lowest_stack(%r11)
20967+
20968+ popq %r11
20969+ popq %rax
20970+ popq %rcx
20971+ popq %rdi
20972+ pax_force_retaddr
20973+ ret
20974+ENDPROC(pax_erase_kstack)
20975+#endif
20976
20977 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
20978 #ifdef CONFIG_TRACE_IRQFLAGS
20979@@ -375,8 +805,8 @@ ENDPROC(native_usergs_sysret64)
20980 .endm
20981
20982 .macro UNFAKE_STACK_FRAME
20983- addq $8*6, %rsp
20984- CFI_ADJUST_CFA_OFFSET -(6*8)
20985+ addq $8*6 + ARG_SKIP, %rsp
20986+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
20987 .endm
20988
20989 /*
20990@@ -463,7 +893,7 @@ ENDPROC(native_usergs_sysret64)
20991 movq %rsp, %rsi
20992
20993 leaq -RBP(%rsp),%rdi /* arg1 for handler */
20994- testl $3, CS-RBP(%rsi)
20995+ testb $3, CS-RBP(%rsi)
20996 je 1f
20997 SWAPGS
20998 /*
20999@@ -498,9 +928,10 @@ ENTRY(save_rest)
21000 movq_cfi r15, R15+16
21001 movq %r11, 8(%rsp) /* return address */
21002 FIXUP_TOP_OF_STACK %r11, 16
21003+ pax_force_retaddr
21004 ret
21005 CFI_ENDPROC
21006-END(save_rest)
21007+ENDPROC(save_rest)
21008
21009 /* save complete stack frame */
21010 .pushsection .kprobes.text, "ax"
21011@@ -529,9 +960,10 @@ ENTRY(save_paranoid)
21012 js 1f /* negative -> in kernel */
21013 SWAPGS
21014 xorl %ebx,%ebx
21015-1: ret
21016+1: pax_force_retaddr_bts
21017+ ret
21018 CFI_ENDPROC
21019-END(save_paranoid)
21020+ENDPROC(save_paranoid)
21021 .popsection
21022
21023 /*
21024@@ -553,7 +985,7 @@ ENTRY(ret_from_fork)
21025
21026 RESTORE_REST
21027
21028- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21029+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
21030 jz 1f
21031
21032 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
21033@@ -571,7 +1003,7 @@ ENTRY(ret_from_fork)
21034 RESTORE_REST
21035 jmp int_ret_from_sys_call
21036 CFI_ENDPROC
21037-END(ret_from_fork)
21038+ENDPROC(ret_from_fork)
21039
21040 /*
21041 * System call entry. Up to 6 arguments in registers are supported.
21042@@ -608,7 +1040,7 @@ END(ret_from_fork)
21043 ENTRY(system_call)
21044 CFI_STARTPROC simple
21045 CFI_SIGNAL_FRAME
21046- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
21047+ CFI_DEF_CFA rsp,0
21048 CFI_REGISTER rip,rcx
21049 /*CFI_REGISTER rflags,r11*/
21050 SWAPGS_UNSAFE_STACK
21051@@ -621,16 +1053,23 @@ GLOBAL(system_call_after_swapgs)
21052
21053 movq %rsp,PER_CPU_VAR(old_rsp)
21054 movq PER_CPU_VAR(kernel_stack),%rsp
21055+ SAVE_ARGS 8*6,0
21056+ pax_enter_kernel_user
21057+
21058+#ifdef CONFIG_PAX_RANDKSTACK
21059+ pax_erase_kstack
21060+#endif
21061+
21062 /*
21063 * No need to follow this irqs off/on section - it's straight
21064 * and short:
21065 */
21066 ENABLE_INTERRUPTS(CLBR_NONE)
21067- SAVE_ARGS 8,0
21068 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
21069 movq %rcx,RIP-ARGOFFSET(%rsp)
21070 CFI_REL_OFFSET rip,RIP-ARGOFFSET
21071- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21072+ GET_THREAD_INFO(%rcx)
21073+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
21074 jnz tracesys
21075 system_call_fastpath:
21076 #if __SYSCALL_MASK == ~0
21077@@ -640,7 +1079,7 @@ system_call_fastpath:
21078 cmpl $__NR_syscall_max,%eax
21079 #endif
21080 ja badsys
21081- movq %r10,%rcx
21082+ movq R10-ARGOFFSET(%rsp),%rcx
21083 call *sys_call_table(,%rax,8) # XXX: rip relative
21084 movq %rax,RAX-ARGOFFSET(%rsp)
21085 /*
21086@@ -654,10 +1093,13 @@ sysret_check:
21087 LOCKDEP_SYS_EXIT
21088 DISABLE_INTERRUPTS(CLBR_NONE)
21089 TRACE_IRQS_OFF
21090- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
21091+ GET_THREAD_INFO(%rcx)
21092+ movl TI_flags(%rcx),%edx
21093 andl %edi,%edx
21094 jnz sysret_careful
21095 CFI_REMEMBER_STATE
21096+ pax_exit_kernel_user
21097+ pax_erase_kstack
21098 /*
21099 * sysretq will re-enable interrupts:
21100 */
21101@@ -709,14 +1151,18 @@ badsys:
21102 * jump back to the normal fast path.
21103 */
21104 auditsys:
21105- movq %r10,%r9 /* 6th arg: 4th syscall arg */
21106+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
21107 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
21108 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
21109 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
21110 movq %rax,%rsi /* 2nd arg: syscall number */
21111 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
21112 call __audit_syscall_entry
21113+
21114+ pax_erase_kstack
21115+
21116 LOAD_ARGS 0 /* reload call-clobbered registers */
21117+ pax_set_fptr_mask
21118 jmp system_call_fastpath
21119
21120 /*
21121@@ -737,7 +1183,7 @@ sysret_audit:
21122 /* Do syscall tracing */
21123 tracesys:
21124 #ifdef CONFIG_AUDITSYSCALL
21125- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
21126+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
21127 jz auditsys
21128 #endif
21129 SAVE_REST
21130@@ -745,12 +1191,16 @@ tracesys:
21131 FIXUP_TOP_OF_STACK %rdi
21132 movq %rsp,%rdi
21133 call syscall_trace_enter
21134+
21135+ pax_erase_kstack
21136+
21137 /*
21138 * Reload arg registers from stack in case ptrace changed them.
21139 * We don't reload %rax because syscall_trace_enter() returned
21140 * the value it wants us to use in the table lookup.
21141 */
21142 LOAD_ARGS ARGOFFSET, 1
21143+ pax_set_fptr_mask
21144 RESTORE_REST
21145 #if __SYSCALL_MASK == ~0
21146 cmpq $__NR_syscall_max,%rax
21147@@ -759,7 +1209,7 @@ tracesys:
21148 cmpl $__NR_syscall_max,%eax
21149 #endif
21150 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
21151- movq %r10,%rcx /* fixup for C */
21152+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
21153 call *sys_call_table(,%rax,8)
21154 movq %rax,RAX-ARGOFFSET(%rsp)
21155 /* Use IRET because user could have changed frame */
21156@@ -780,7 +1230,9 @@ GLOBAL(int_with_check)
21157 andl %edi,%edx
21158 jnz int_careful
21159 andl $~TS_COMPAT,TI_status(%rcx)
21160- jmp retint_swapgs
21161+ pax_exit_kernel_user
21162+ pax_erase_kstack
21163+ jmp retint_swapgs_pax
21164
21165 /* Either reschedule or signal or syscall exit tracking needed. */
21166 /* First do a reschedule test. */
21167@@ -826,7 +1278,7 @@ int_restore_rest:
21168 TRACE_IRQS_OFF
21169 jmp int_with_check
21170 CFI_ENDPROC
21171-END(system_call)
21172+ENDPROC(system_call)
21173
21174 .macro FORK_LIKE func
21175 ENTRY(stub_\func)
21176@@ -839,9 +1291,10 @@ ENTRY(stub_\func)
21177 DEFAULT_FRAME 0 8 /* offset 8: return address */
21178 call sys_\func
21179 RESTORE_TOP_OF_STACK %r11, 8
21180+ pax_force_retaddr
21181 ret $REST_SKIP /* pop extended registers */
21182 CFI_ENDPROC
21183-END(stub_\func)
21184+ENDPROC(stub_\func)
21185 .endm
21186
21187 .macro FIXED_FRAME label,func
21188@@ -851,9 +1304,10 @@ ENTRY(\label)
21189 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
21190 call \func
21191 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
21192+ pax_force_retaddr
21193 ret
21194 CFI_ENDPROC
21195-END(\label)
21196+ENDPROC(\label)
21197 .endm
21198
21199 FORK_LIKE clone
21200@@ -870,9 +1324,10 @@ ENTRY(ptregscall_common)
21201 movq_cfi_restore R12+8, r12
21202 movq_cfi_restore RBP+8, rbp
21203 movq_cfi_restore RBX+8, rbx
21204+ pax_force_retaddr
21205 ret $REST_SKIP /* pop extended registers */
21206 CFI_ENDPROC
21207-END(ptregscall_common)
21208+ENDPROC(ptregscall_common)
21209
21210 ENTRY(stub_execve)
21211 CFI_STARTPROC
21212@@ -885,7 +1340,7 @@ ENTRY(stub_execve)
21213 RESTORE_REST
21214 jmp int_ret_from_sys_call
21215 CFI_ENDPROC
21216-END(stub_execve)
21217+ENDPROC(stub_execve)
21218
21219 /*
21220 * sigreturn is special because it needs to restore all registers on return.
21221@@ -902,7 +1357,7 @@ ENTRY(stub_rt_sigreturn)
21222 RESTORE_REST
21223 jmp int_ret_from_sys_call
21224 CFI_ENDPROC
21225-END(stub_rt_sigreturn)
21226+ENDPROC(stub_rt_sigreturn)
21227
21228 #ifdef CONFIG_X86_X32_ABI
21229 ENTRY(stub_x32_rt_sigreturn)
21230@@ -916,7 +1371,7 @@ ENTRY(stub_x32_rt_sigreturn)
21231 RESTORE_REST
21232 jmp int_ret_from_sys_call
21233 CFI_ENDPROC
21234-END(stub_x32_rt_sigreturn)
21235+ENDPROC(stub_x32_rt_sigreturn)
21236
21237 ENTRY(stub_x32_execve)
21238 CFI_STARTPROC
21239@@ -930,7 +1385,7 @@ ENTRY(stub_x32_execve)
21240 RESTORE_REST
21241 jmp int_ret_from_sys_call
21242 CFI_ENDPROC
21243-END(stub_x32_execve)
21244+ENDPROC(stub_x32_execve)
21245
21246 #endif
21247
21248@@ -967,7 +1422,7 @@ vector=vector+1
21249 2: jmp common_interrupt
21250 .endr
21251 CFI_ENDPROC
21252-END(irq_entries_start)
21253+ENDPROC(irq_entries_start)
21254
21255 .previous
21256 END(interrupt)
21257@@ -987,6 +1442,16 @@ END(interrupt)
21258 subq $ORIG_RAX-RBP, %rsp
21259 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
21260 SAVE_ARGS_IRQ
21261+#ifdef CONFIG_PAX_MEMORY_UDEREF
21262+ testb $3, CS(%rdi)
21263+ jnz 1f
21264+ pax_enter_kernel
21265+ jmp 2f
21266+1: pax_enter_kernel_user
21267+2:
21268+#else
21269+ pax_enter_kernel
21270+#endif
21271 call \func
21272 .endm
21273
21274@@ -1019,7 +1484,7 @@ ret_from_intr:
21275
21276 exit_intr:
21277 GET_THREAD_INFO(%rcx)
21278- testl $3,CS-ARGOFFSET(%rsp)
21279+ testb $3,CS-ARGOFFSET(%rsp)
21280 je retint_kernel
21281
21282 /* Interrupt came from user space */
21283@@ -1041,12 +1506,16 @@ retint_swapgs: /* return to user-space */
21284 * The iretq could re-enable interrupts:
21285 */
21286 DISABLE_INTERRUPTS(CLBR_ANY)
21287+ pax_exit_kernel_user
21288+retint_swapgs_pax:
21289 TRACE_IRQS_IRETQ
21290 SWAPGS
21291 jmp restore_args
21292
21293 retint_restore_args: /* return to kernel space */
21294 DISABLE_INTERRUPTS(CLBR_ANY)
21295+ pax_exit_kernel
21296+ pax_force_retaddr (RIP-ARGOFFSET)
21297 /*
21298 * The iretq could re-enable interrupts:
21299 */
21300@@ -1129,7 +1598,7 @@ ENTRY(retint_kernel)
21301 #endif
21302
21303 CFI_ENDPROC
21304-END(common_interrupt)
21305+ENDPROC(common_interrupt)
21306 /*
21307 * End of kprobes section
21308 */
21309@@ -1147,7 +1616,7 @@ ENTRY(\sym)
21310 interrupt \do_sym
21311 jmp ret_from_intr
21312 CFI_ENDPROC
21313-END(\sym)
21314+ENDPROC(\sym)
21315 .endm
21316
21317 #ifdef CONFIG_SMP
21318@@ -1208,12 +1677,22 @@ ENTRY(\sym)
21319 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
21320 call error_entry
21321 DEFAULT_FRAME 0
21322+#ifdef CONFIG_PAX_MEMORY_UDEREF
21323+ testb $3, CS(%rsp)
21324+ jnz 1f
21325+ pax_enter_kernel
21326+ jmp 2f
21327+1: pax_enter_kernel_user
21328+2:
21329+#else
21330+ pax_enter_kernel
21331+#endif
21332 movq %rsp,%rdi /* pt_regs pointer */
21333 xorl %esi,%esi /* no error code */
21334 call \do_sym
21335 jmp error_exit /* %ebx: no swapgs flag */
21336 CFI_ENDPROC
21337-END(\sym)
21338+ENDPROC(\sym)
21339 .endm
21340
21341 .macro paranoidzeroentry sym do_sym
21342@@ -1226,15 +1705,25 @@ ENTRY(\sym)
21343 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
21344 call save_paranoid
21345 TRACE_IRQS_OFF
21346+#ifdef CONFIG_PAX_MEMORY_UDEREF
21347+ testb $3, CS(%rsp)
21348+ jnz 1f
21349+ pax_enter_kernel
21350+ jmp 2f
21351+1: pax_enter_kernel_user
21352+2:
21353+#else
21354+ pax_enter_kernel
21355+#endif
21356 movq %rsp,%rdi /* pt_regs pointer */
21357 xorl %esi,%esi /* no error code */
21358 call \do_sym
21359 jmp paranoid_exit /* %ebx: no swapgs flag */
21360 CFI_ENDPROC
21361-END(\sym)
21362+ENDPROC(\sym)
21363 .endm
21364
21365-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
21366+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
21367 .macro paranoidzeroentry_ist sym do_sym ist
21368 ENTRY(\sym)
21369 INTR_FRAME
21370@@ -1245,14 +1734,30 @@ ENTRY(\sym)
21371 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
21372 call save_paranoid
21373 TRACE_IRQS_OFF_DEBUG
21374+#ifdef CONFIG_PAX_MEMORY_UDEREF
21375+ testb $3, CS(%rsp)
21376+ jnz 1f
21377+ pax_enter_kernel
21378+ jmp 2f
21379+1: pax_enter_kernel_user
21380+2:
21381+#else
21382+ pax_enter_kernel
21383+#endif
21384 movq %rsp,%rdi /* pt_regs pointer */
21385 xorl %esi,%esi /* no error code */
21386+#ifdef CONFIG_SMP
21387+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
21388+ lea init_tss(%r12), %r12
21389+#else
21390+ lea init_tss(%rip), %r12
21391+#endif
21392 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
21393 call \do_sym
21394 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
21395 jmp paranoid_exit /* %ebx: no swapgs flag */
21396 CFI_ENDPROC
21397-END(\sym)
21398+ENDPROC(\sym)
21399 .endm
21400
21401 .macro errorentry sym do_sym
21402@@ -1264,13 +1769,23 @@ ENTRY(\sym)
21403 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
21404 call error_entry
21405 DEFAULT_FRAME 0
21406+#ifdef CONFIG_PAX_MEMORY_UDEREF
21407+ testb $3, CS(%rsp)
21408+ jnz 1f
21409+ pax_enter_kernel
21410+ jmp 2f
21411+1: pax_enter_kernel_user
21412+2:
21413+#else
21414+ pax_enter_kernel
21415+#endif
21416 movq %rsp,%rdi /* pt_regs pointer */
21417 movq ORIG_RAX(%rsp),%rsi /* get error code */
21418 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
21419 call \do_sym
21420 jmp error_exit /* %ebx: no swapgs flag */
21421 CFI_ENDPROC
21422-END(\sym)
21423+ENDPROC(\sym)
21424 .endm
21425
21426 /* error code is on the stack already */
21427@@ -1284,13 +1799,23 @@ ENTRY(\sym)
21428 call save_paranoid
21429 DEFAULT_FRAME 0
21430 TRACE_IRQS_OFF
21431+#ifdef CONFIG_PAX_MEMORY_UDEREF
21432+ testb $3, CS(%rsp)
21433+ jnz 1f
21434+ pax_enter_kernel
21435+ jmp 2f
21436+1: pax_enter_kernel_user
21437+2:
21438+#else
21439+ pax_enter_kernel
21440+#endif
21441 movq %rsp,%rdi /* pt_regs pointer */
21442 movq ORIG_RAX(%rsp),%rsi /* get error code */
21443 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
21444 call \do_sym
21445 jmp paranoid_exit /* %ebx: no swapgs flag */
21446 CFI_ENDPROC
21447-END(\sym)
21448+ENDPROC(\sym)
21449 .endm
21450
21451 zeroentry divide_error do_divide_error
21452@@ -1320,9 +1845,10 @@ gs_change:
21453 2: mfence /* workaround */
21454 SWAPGS
21455 popfq_cfi
21456+ pax_force_retaddr
21457 ret
21458 CFI_ENDPROC
21459-END(native_load_gs_index)
21460+ENDPROC(native_load_gs_index)
21461
21462 _ASM_EXTABLE(gs_change,bad_gs)
21463 .section .fixup,"ax"
21464@@ -1350,9 +1876,10 @@ ENTRY(call_softirq)
21465 CFI_DEF_CFA_REGISTER rsp
21466 CFI_ADJUST_CFA_OFFSET -8
21467 decl PER_CPU_VAR(irq_count)
21468+ pax_force_retaddr
21469 ret
21470 CFI_ENDPROC
21471-END(call_softirq)
21472+ENDPROC(call_softirq)
21473
21474 #ifdef CONFIG_XEN
21475 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
21476@@ -1390,7 +1917,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
21477 decl PER_CPU_VAR(irq_count)
21478 jmp error_exit
21479 CFI_ENDPROC
21480-END(xen_do_hypervisor_callback)
21481+ENDPROC(xen_do_hypervisor_callback)
21482
21483 /*
21484 * Hypervisor uses this for application faults while it executes.
21485@@ -1449,7 +1976,7 @@ ENTRY(xen_failsafe_callback)
21486 SAVE_ALL
21487 jmp error_exit
21488 CFI_ENDPROC
21489-END(xen_failsafe_callback)
21490+ENDPROC(xen_failsafe_callback)
21491
21492 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
21493 xen_hvm_callback_vector xen_evtchn_do_upcall
21494@@ -1501,18 +2028,33 @@ ENTRY(paranoid_exit)
21495 DEFAULT_FRAME
21496 DISABLE_INTERRUPTS(CLBR_NONE)
21497 TRACE_IRQS_OFF_DEBUG
21498- testl %ebx,%ebx /* swapgs needed? */
21499+ testl $1,%ebx /* swapgs needed? */
21500 jnz paranoid_restore
21501- testl $3,CS(%rsp)
21502+ testb $3,CS(%rsp)
21503 jnz paranoid_userspace
21504+#ifdef CONFIG_PAX_MEMORY_UDEREF
21505+ pax_exit_kernel
21506+ TRACE_IRQS_IRETQ 0
21507+ SWAPGS_UNSAFE_STACK
21508+ RESTORE_ALL 8
21509+ pax_force_retaddr_bts
21510+ jmp irq_return
21511+#endif
21512 paranoid_swapgs:
21513+#ifdef CONFIG_PAX_MEMORY_UDEREF
21514+ pax_exit_kernel_user
21515+#else
21516+ pax_exit_kernel
21517+#endif
21518 TRACE_IRQS_IRETQ 0
21519 SWAPGS_UNSAFE_STACK
21520 RESTORE_ALL 8
21521 jmp irq_return
21522 paranoid_restore:
21523+ pax_exit_kernel
21524 TRACE_IRQS_IRETQ_DEBUG 0
21525 RESTORE_ALL 8
21526+ pax_force_retaddr_bts
21527 jmp irq_return
21528 paranoid_userspace:
21529 GET_THREAD_INFO(%rcx)
21530@@ -1541,7 +2083,7 @@ paranoid_schedule:
21531 TRACE_IRQS_OFF
21532 jmp paranoid_userspace
21533 CFI_ENDPROC
21534-END(paranoid_exit)
21535+ENDPROC(paranoid_exit)
21536
21537 /*
21538 * Exception entry point. This expects an error code/orig_rax on the stack.
21539@@ -1568,12 +2110,13 @@ ENTRY(error_entry)
21540 movq_cfi r14, R14+8
21541 movq_cfi r15, R15+8
21542 xorl %ebx,%ebx
21543- testl $3,CS+8(%rsp)
21544+ testb $3,CS+8(%rsp)
21545 je error_kernelspace
21546 error_swapgs:
21547 SWAPGS
21548 error_sti:
21549 TRACE_IRQS_OFF
21550+ pax_force_retaddr_bts
21551 ret
21552
21553 /*
21554@@ -1600,7 +2143,7 @@ bstep_iret:
21555 movq %rcx,RIP+8(%rsp)
21556 jmp error_swapgs
21557 CFI_ENDPROC
21558-END(error_entry)
21559+ENDPROC(error_entry)
21560
21561
21562 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
21563@@ -1611,7 +2154,7 @@ ENTRY(error_exit)
21564 DISABLE_INTERRUPTS(CLBR_NONE)
21565 TRACE_IRQS_OFF
21566 GET_THREAD_INFO(%rcx)
21567- testl %eax,%eax
21568+ testl $1,%eax
21569 jne retint_kernel
21570 LOCKDEP_SYS_EXIT_IRQ
21571 movl TI_flags(%rcx),%edx
21572@@ -1620,7 +2163,7 @@ ENTRY(error_exit)
21573 jnz retint_careful
21574 jmp retint_swapgs
21575 CFI_ENDPROC
21576-END(error_exit)
21577+ENDPROC(error_exit)
21578
21579 /*
21580 * Test if a given stack is an NMI stack or not.
21581@@ -1678,9 +2221,11 @@ ENTRY(nmi)
21582 * If %cs was not the kernel segment, then the NMI triggered in user
21583 * space, which means it is definitely not nested.
21584 */
21585+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
21586+ je 1f
21587 cmpl $__KERNEL_CS, 16(%rsp)
21588 jne first_nmi
21589-
21590+1:
21591 /*
21592 * Check the special variable on the stack to see if NMIs are
21593 * executing.
21594@@ -1714,8 +2259,7 @@ nested_nmi:
21595
21596 1:
21597 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
21598- leaq -1*8(%rsp), %rdx
21599- movq %rdx, %rsp
21600+ subq $8, %rsp
21601 CFI_ADJUST_CFA_OFFSET 1*8
21602 leaq -10*8(%rsp), %rdx
21603 pushq_cfi $__KERNEL_DS
21604@@ -1733,6 +2277,7 @@ nested_nmi_out:
21605 CFI_RESTORE rdx
21606
21607 /* No need to check faults here */
21608+# pax_force_retaddr_bts
21609 INTERRUPT_RETURN
21610
21611 CFI_RESTORE_STATE
21612@@ -1849,6 +2394,8 @@ end_repeat_nmi:
21613 */
21614 movq %cr2, %r12
21615
21616+ pax_enter_kernel_nmi
21617+
21618 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
21619 movq %rsp,%rdi
21620 movq $-1,%rsi
21621@@ -1861,26 +2408,31 @@ end_repeat_nmi:
21622 movq %r12, %cr2
21623 1:
21624
21625- testl %ebx,%ebx /* swapgs needed? */
21626+ testl $1,%ebx /* swapgs needed? */
21627 jnz nmi_restore
21628 nmi_swapgs:
21629 SWAPGS_UNSAFE_STACK
21630 nmi_restore:
21631+ pax_exit_kernel_nmi
21632 /* Pop the extra iret frame at once */
21633 RESTORE_ALL 6*8
21634+ testb $3, 8(%rsp)
21635+ jnz 1f
21636+ pax_force_retaddr_bts
21637+1:
21638
21639 /* Clear the NMI executing stack variable */
21640 movq $0, 5*8(%rsp)
21641 jmp irq_return
21642 CFI_ENDPROC
21643-END(nmi)
21644+ENDPROC(nmi)
21645
21646 ENTRY(ignore_sysret)
21647 CFI_STARTPROC
21648 mov $-ENOSYS,%eax
21649 sysret
21650 CFI_ENDPROC
21651-END(ignore_sysret)
21652+ENDPROC(ignore_sysret)
21653
21654 /*
21655 * End of kprobes section
21656diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
21657index 42a392a..fbbd930 100644
21658--- a/arch/x86/kernel/ftrace.c
21659+++ b/arch/x86/kernel/ftrace.c
21660@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
21661 {
21662 unsigned char replaced[MCOUNT_INSN_SIZE];
21663
21664+ ip = ktla_ktva(ip);
21665+
21666 /*
21667 * Note: Due to modules and __init, code can
21668 * disappear and change, we need to protect against faulting
21669@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
21670 unsigned char old[MCOUNT_INSN_SIZE], *new;
21671 int ret;
21672
21673- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
21674+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
21675 new = ftrace_call_replace(ip, (unsigned long)func);
21676
21677 /* See comment above by declaration of modifying_ftrace_code */
21678@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
21679 /* Also update the regs callback function */
21680 if (!ret) {
21681 ip = (unsigned long)(&ftrace_regs_call);
21682- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
21683+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
21684 new = ftrace_call_replace(ip, (unsigned long)func);
21685 ret = ftrace_modify_code(ip, old, new);
21686 }
21687@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
21688 * kernel identity mapping to modify code.
21689 */
21690 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
21691- ip = (unsigned long)__va(__pa_symbol(ip));
21692+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
21693
21694 return probe_kernel_write((void *)ip, val, size);
21695 }
21696@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
21697 unsigned char replaced[MCOUNT_INSN_SIZE];
21698 unsigned char brk = BREAKPOINT_INSTRUCTION;
21699
21700- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
21701+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
21702 return -EFAULT;
21703
21704 /* Make sure it is what we expect it to be */
21705@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
21706 return ret;
21707
21708 fail_update:
21709- probe_kernel_write((void *)ip, &old_code[0], 1);
21710+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
21711 goto out;
21712 }
21713
21714@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
21715 {
21716 unsigned char code[MCOUNT_INSN_SIZE];
21717
21718+ ip = ktla_ktva(ip);
21719+
21720 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
21721 return -EFAULT;
21722
21723diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
21724index 55b6761..a6456fc 100644
21725--- a/arch/x86/kernel/head64.c
21726+++ b/arch/x86/kernel/head64.c
21727@@ -67,12 +67,12 @@ again:
21728 pgd = *pgd_p;
21729
21730 /*
21731- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
21732- * critical -- __PAGE_OFFSET would point us back into the dynamic
21733+ * The use of __early_va rather than __va here is critical:
21734+ * __va would point us back into the dynamic
21735 * range and we might end up looping forever...
21736 */
21737 if (pgd)
21738- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
21739+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
21740 else {
21741 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
21742 reset_early_page_tables();
21743@@ -82,13 +82,13 @@ again:
21744 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
21745 for (i = 0; i < PTRS_PER_PUD; i++)
21746 pud_p[i] = 0;
21747- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
21748+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
21749 }
21750 pud_p += pud_index(address);
21751 pud = *pud_p;
21752
21753 if (pud)
21754- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
21755+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
21756 else {
21757 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
21758 reset_early_page_tables();
21759@@ -98,7 +98,7 @@ again:
21760 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
21761 for (i = 0; i < PTRS_PER_PMD; i++)
21762 pmd_p[i] = 0;
21763- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
21764+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
21765 }
21766 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
21767 pmd_p[pmd_index(address)] = pmd;
21768@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
21769 if (console_loglevel == 10)
21770 early_printk("Kernel alive\n");
21771
21772- clear_page(init_level4_pgt);
21773 /* set init_level4_pgt kernel high mapping*/
21774 init_level4_pgt[511] = early_level4_pgt[511];
21775
21776diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
21777index 73afd11..0ef46f2 100644
21778--- a/arch/x86/kernel/head_32.S
21779+++ b/arch/x86/kernel/head_32.S
21780@@ -26,6 +26,12 @@
21781 /* Physical address */
21782 #define pa(X) ((X) - __PAGE_OFFSET)
21783
21784+#ifdef CONFIG_PAX_KERNEXEC
21785+#define ta(X) (X)
21786+#else
21787+#define ta(X) ((X) - __PAGE_OFFSET)
21788+#endif
21789+
21790 /*
21791 * References to members of the new_cpu_data structure.
21792 */
21793@@ -55,11 +61,7 @@
21794 * and small than max_low_pfn, otherwise will waste some page table entries
21795 */
21796
21797-#if PTRS_PER_PMD > 1
21798-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
21799-#else
21800-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
21801-#endif
21802+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
21803
21804 /* Number of possible pages in the lowmem region */
21805 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
21806@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
21807 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
21808
21809 /*
21810+ * Real beginning of normal "text" segment
21811+ */
21812+ENTRY(stext)
21813+ENTRY(_stext)
21814+
21815+/*
21816 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
21817 * %esi points to the real-mode code as a 32-bit pointer.
21818 * CS and DS must be 4 GB flat segments, but we don't depend on
21819@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
21820 * can.
21821 */
21822 __HEAD
21823+
21824+#ifdef CONFIG_PAX_KERNEXEC
21825+ jmp startup_32
21826+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
21827+.fill PAGE_SIZE-5,1,0xcc
21828+#endif
21829+
21830 ENTRY(startup_32)
21831 movl pa(stack_start),%ecx
21832
21833@@ -106,6 +121,59 @@ ENTRY(startup_32)
21834 2:
21835 leal -__PAGE_OFFSET(%ecx),%esp
21836
21837+#ifdef CONFIG_SMP
21838+ movl $pa(cpu_gdt_table),%edi
21839+ movl $__per_cpu_load,%eax
21840+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
21841+ rorl $16,%eax
21842+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
21843+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
21844+ movl $__per_cpu_end - 1,%eax
21845+ subl $__per_cpu_start,%eax
21846+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
21847+#endif
21848+
21849+#ifdef CONFIG_PAX_MEMORY_UDEREF
21850+ movl $NR_CPUS,%ecx
21851+ movl $pa(cpu_gdt_table),%edi
21852+1:
21853+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
21854+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
21855+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
21856+ addl $PAGE_SIZE_asm,%edi
21857+ loop 1b
21858+#endif
21859+
21860+#ifdef CONFIG_PAX_KERNEXEC
21861+ movl $pa(boot_gdt),%edi
21862+ movl $__LOAD_PHYSICAL_ADDR,%eax
21863+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
21864+ rorl $16,%eax
21865+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
21866+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
21867+ rorl $16,%eax
21868+
21869+ ljmp $(__BOOT_CS),$1f
21870+1:
21871+
21872+ movl $NR_CPUS,%ecx
21873+ movl $pa(cpu_gdt_table),%edi
21874+ addl $__PAGE_OFFSET,%eax
21875+1:
21876+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
21877+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
21878+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
21879+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
21880+ rorl $16,%eax
21881+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
21882+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
21883+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
21884+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
21885+ rorl $16,%eax
21886+ addl $PAGE_SIZE_asm,%edi
21887+ loop 1b
21888+#endif
21889+
21890 /*
21891 * Clear BSS first so that there are no surprises...
21892 */
21893@@ -201,8 +269,11 @@ ENTRY(startup_32)
21894 movl %eax, pa(max_pfn_mapped)
21895
21896 /* Do early initialization of the fixmap area */
21897- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
21898- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
21899+#ifdef CONFIG_COMPAT_VDSO
21900+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
21901+#else
21902+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
21903+#endif
21904 #else /* Not PAE */
21905
21906 page_pde_offset = (__PAGE_OFFSET >> 20);
21907@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
21908 movl %eax, pa(max_pfn_mapped)
21909
21910 /* Do early initialization of the fixmap area */
21911- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
21912- movl %eax,pa(initial_page_table+0xffc)
21913+#ifdef CONFIG_COMPAT_VDSO
21914+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
21915+#else
21916+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
21917+#endif
21918 #endif
21919
21920 #ifdef CONFIG_PARAVIRT
21921@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
21922 cmpl $num_subarch_entries, %eax
21923 jae bad_subarch
21924
21925- movl pa(subarch_entries)(,%eax,4), %eax
21926- subl $__PAGE_OFFSET, %eax
21927- jmp *%eax
21928+ jmp *pa(subarch_entries)(,%eax,4)
21929
21930 bad_subarch:
21931 WEAK(lguest_entry)
21932@@ -261,10 +333,10 @@ WEAK(xen_entry)
21933 __INITDATA
21934
21935 subarch_entries:
21936- .long default_entry /* normal x86/PC */
21937- .long lguest_entry /* lguest hypervisor */
21938- .long xen_entry /* Xen hypervisor */
21939- .long default_entry /* Moorestown MID */
21940+ .long ta(default_entry) /* normal x86/PC */
21941+ .long ta(lguest_entry) /* lguest hypervisor */
21942+ .long ta(xen_entry) /* Xen hypervisor */
21943+ .long ta(default_entry) /* Moorestown MID */
21944 num_subarch_entries = (. - subarch_entries) / 4
21945 .previous
21946 #else
21947@@ -355,6 +427,7 @@ default_entry:
21948 movl pa(mmu_cr4_features),%eax
21949 movl %eax,%cr4
21950
21951+#ifdef CONFIG_X86_PAE
21952 testb $X86_CR4_PAE, %al # check if PAE is enabled
21953 jz enable_paging
21954
21955@@ -383,6 +456,9 @@ default_entry:
21956 /* Make changes effective */
21957 wrmsr
21958
21959+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
21960+#endif
21961+
21962 enable_paging:
21963
21964 /*
21965@@ -451,14 +527,20 @@ is486:
21966 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
21967 movl %eax,%ss # after changing gdt.
21968
21969- movl $(__USER_DS),%eax # DS/ES contains default USER segment
21970+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
21971 movl %eax,%ds
21972 movl %eax,%es
21973
21974 movl $(__KERNEL_PERCPU), %eax
21975 movl %eax,%fs # set this cpu's percpu
21976
21977+#ifdef CONFIG_CC_STACKPROTECTOR
21978 movl $(__KERNEL_STACK_CANARY),%eax
21979+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
21980+ movl $(__USER_DS),%eax
21981+#else
21982+ xorl %eax,%eax
21983+#endif
21984 movl %eax,%gs
21985
21986 xorl %eax,%eax # Clear LDT
21987@@ -534,8 +616,11 @@ setup_once:
21988 * relocation. Manually set base address in stack canary
21989 * segment descriptor.
21990 */
21991- movl $gdt_page,%eax
21992+ movl $cpu_gdt_table,%eax
21993 movl $stack_canary,%ecx
21994+#ifdef CONFIG_SMP
21995+ addl $__per_cpu_load,%ecx
21996+#endif
21997 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
21998 shrl $16, %ecx
21999 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
22000@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
22001 /* This is global to keep gas from relaxing the jumps */
22002 ENTRY(early_idt_handler)
22003 cld
22004- cmpl $2,%ss:early_recursion_flag
22005+ cmpl $1,%ss:early_recursion_flag
22006 je hlt_loop
22007 incl %ss:early_recursion_flag
22008
22009@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
22010 pushl (20+6*4)(%esp) /* trapno */
22011 pushl $fault_msg
22012 call printk
22013-#endif
22014 call dump_stack
22015+#endif
22016 hlt_loop:
22017 hlt
22018 jmp hlt_loop
22019@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
22020 /* This is the default interrupt "handler" :-) */
22021 ALIGN
22022 ignore_int:
22023- cld
22024 #ifdef CONFIG_PRINTK
22025+ cmpl $2,%ss:early_recursion_flag
22026+ je hlt_loop
22027+ incl %ss:early_recursion_flag
22028+ cld
22029 pushl %eax
22030 pushl %ecx
22031 pushl %edx
22032@@ -634,9 +722,6 @@ ignore_int:
22033 movl $(__KERNEL_DS),%eax
22034 movl %eax,%ds
22035 movl %eax,%es
22036- cmpl $2,early_recursion_flag
22037- je hlt_loop
22038- incl early_recursion_flag
22039 pushl 16(%esp)
22040 pushl 24(%esp)
22041 pushl 32(%esp)
22042@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
22043 /*
22044 * BSS section
22045 */
22046-__PAGE_ALIGNED_BSS
22047- .align PAGE_SIZE
22048 #ifdef CONFIG_X86_PAE
22049+.section .initial_pg_pmd,"a",@progbits
22050 initial_pg_pmd:
22051 .fill 1024*KPMDS,4,0
22052 #else
22053+.section .initial_page_table,"a",@progbits
22054 ENTRY(initial_page_table)
22055 .fill 1024,4,0
22056 #endif
22057+.section .initial_pg_fixmap,"a",@progbits
22058 initial_pg_fixmap:
22059 .fill 1024,4,0
22060+.section .empty_zero_page,"a",@progbits
22061 ENTRY(empty_zero_page)
22062 .fill 4096,1,0
22063+.section .swapper_pg_dir,"a",@progbits
22064 ENTRY(swapper_pg_dir)
22065+#ifdef CONFIG_X86_PAE
22066+ .fill 4,8,0
22067+#else
22068 .fill 1024,4,0
22069+#endif
22070+
22071+/*
22072+ * The IDT has to be page-aligned to simplify the Pentium
22073+ * F0 0F bug workaround.. We have a special link segment
22074+ * for this.
22075+ */
22076+.section .idt,"a",@progbits
22077+ENTRY(idt_table)
22078+ .fill 256,8,0
22079
22080 /*
22081 * This starts the data section.
22082 */
22083 #ifdef CONFIG_X86_PAE
22084-__PAGE_ALIGNED_DATA
22085- /* Page-aligned for the benefit of paravirt? */
22086- .align PAGE_SIZE
22087+.section .initial_page_table,"a",@progbits
22088 ENTRY(initial_page_table)
22089 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
22090 # if KPMDS == 3
22091@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
22092 # error "Kernel PMDs should be 1, 2 or 3"
22093 # endif
22094 .align PAGE_SIZE /* needs to be page-sized too */
22095+
22096+#ifdef CONFIG_PAX_PER_CPU_PGD
22097+ENTRY(cpu_pgd)
22098+ .rept 2*NR_CPUS
22099+ .fill 4,8,0
22100+ .endr
22101+#endif
22102+
22103 #endif
22104
22105 .data
22106 .balign 4
22107 ENTRY(stack_start)
22108- .long init_thread_union+THREAD_SIZE
22109+ .long init_thread_union+THREAD_SIZE-8
22110
22111 __INITRODATA
22112 int_msg:
22113@@ -744,7 +851,7 @@ fault_msg:
22114 * segment size, and 32-bit linear address value:
22115 */
22116
22117- .data
22118+.section .rodata,"a",@progbits
22119 .globl boot_gdt_descr
22120 .globl idt_descr
22121
22122@@ -753,7 +860,7 @@ fault_msg:
22123 .word 0 # 32 bit align gdt_desc.address
22124 boot_gdt_descr:
22125 .word __BOOT_DS+7
22126- .long boot_gdt - __PAGE_OFFSET
22127+ .long pa(boot_gdt)
22128
22129 .word 0 # 32-bit align idt_desc.address
22130 idt_descr:
22131@@ -764,7 +871,7 @@ idt_descr:
22132 .word 0 # 32 bit align gdt_desc.address
22133 ENTRY(early_gdt_descr)
22134 .word GDT_ENTRIES*8-1
22135- .long gdt_page /* Overwritten for secondary CPUs */
22136+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
22137
22138 /*
22139 * The boot_gdt must mirror the equivalent in setup.S and is
22140@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
22141 .align L1_CACHE_BYTES
22142 ENTRY(boot_gdt)
22143 .fill GDT_ENTRY_BOOT_CS,8,0
22144- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
22145- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
22146+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
22147+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
22148+
22149+ .align PAGE_SIZE_asm
22150+ENTRY(cpu_gdt_table)
22151+ .rept NR_CPUS
22152+ .quad 0x0000000000000000 /* NULL descriptor */
22153+ .quad 0x0000000000000000 /* 0x0b reserved */
22154+ .quad 0x0000000000000000 /* 0x13 reserved */
22155+ .quad 0x0000000000000000 /* 0x1b reserved */
22156+
22157+#ifdef CONFIG_PAX_KERNEXEC
22158+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
22159+#else
22160+ .quad 0x0000000000000000 /* 0x20 unused */
22161+#endif
22162+
22163+ .quad 0x0000000000000000 /* 0x28 unused */
22164+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
22165+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
22166+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
22167+ .quad 0x0000000000000000 /* 0x4b reserved */
22168+ .quad 0x0000000000000000 /* 0x53 reserved */
22169+ .quad 0x0000000000000000 /* 0x5b reserved */
22170+
22171+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
22172+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
22173+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
22174+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
22175+
22176+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
22177+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
22178+
22179+ /*
22180+ * Segments used for calling PnP BIOS have byte granularity.
22181+ * The code segments and data segments have fixed 64k limits,
22182+ * the transfer segment sizes are set at run time.
22183+ */
22184+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
22185+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
22186+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
22187+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
22188+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
22189+
22190+ /*
22191+ * The APM segments have byte granularity and their bases
22192+ * are set at run time. All have 64k limits.
22193+ */
22194+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
22195+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
22196+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
22197+
22198+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
22199+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
22200+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
22201+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
22202+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
22203+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
22204+
22205+ /* Be sure this is zeroed to avoid false validations in Xen */
22206+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
22207+ .endr
22208diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
22209index a836860..1b5c665 100644
22210--- a/arch/x86/kernel/head_64.S
22211+++ b/arch/x86/kernel/head_64.S
22212@@ -20,6 +20,8 @@
22213 #include <asm/processor-flags.h>
22214 #include <asm/percpu.h>
22215 #include <asm/nops.h>
22216+#include <asm/cpufeature.h>
22217+#include <asm/alternative-asm.h>
22218
22219 #ifdef CONFIG_PARAVIRT
22220 #include <asm/asm-offsets.h>
22221@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
22222 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
22223 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
22224 L3_START_KERNEL = pud_index(__START_KERNEL_map)
22225+L4_VMALLOC_START = pgd_index(VMALLOC_START)
22226+L3_VMALLOC_START = pud_index(VMALLOC_START)
22227+L4_VMALLOC_END = pgd_index(VMALLOC_END)
22228+L3_VMALLOC_END = pud_index(VMALLOC_END)
22229+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
22230+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
22231
22232 .text
22233 __HEAD
22234@@ -89,11 +97,23 @@ startup_64:
22235 * Fixup the physical addresses in the page table
22236 */
22237 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
22238+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
22239+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
22240+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
22241+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
22242+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
22243
22244- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
22245- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
22246+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
22247+#ifndef CONFIG_XEN
22248+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
22249+#endif
22250
22251- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
22252+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
22253+
22254+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
22255+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
22256+
22257+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
22258
22259 /*
22260 * Set up the identity mapping for the switchover. These
22261@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64)
22262 movq $(init_level4_pgt - __START_KERNEL_map), %rax
22263 1:
22264
22265- /* Enable PAE mode and PGE */
22266- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
22267+ /* Enable PAE mode and PSE/PGE */
22268+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
22269 movq %rcx, %cr4
22270
22271 /* Setup early boot stage 4 level pagetables. */
22272@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64)
22273 movl $MSR_EFER, %ecx
22274 rdmsr
22275 btsl $_EFER_SCE, %eax /* Enable System Call */
22276- btl $20,%edi /* No Execute supported? */
22277+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
22278 jnc 1f
22279 btsl $_EFER_NX, %eax
22280 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
22281+ leaq init_level4_pgt(%rip), %rdi
22282+#ifndef CONFIG_EFI
22283+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
22284+#endif
22285+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
22286+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
22287+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
22288+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
22289 1: wrmsr /* Make changes effective */
22290
22291 /* Setup cr0 */
22292@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64)
22293 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
22294 * address given in m16:64.
22295 */
22296+ pax_set_fptr_mask
22297 movq initial_code(%rip),%rax
22298 pushq $0 # fake return address to stop unwinder
22299 pushq $__KERNEL_CS # set correct cs
22300@@ -388,7 +417,7 @@ ENTRY(early_idt_handler)
22301 call dump_stack
22302 #ifdef CONFIG_KALLSYMS
22303 leaq early_idt_ripmsg(%rip),%rdi
22304- movq 40(%rsp),%rsi # %rip again
22305+ movq 88(%rsp),%rsi # %rip again
22306 call __print_symbol
22307 #endif
22308 #endif /* EARLY_PRINTK */
22309@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler)
22310 early_recursion_flag:
22311 .long 0
22312
22313+ .section .rodata,"a",@progbits
22314 #ifdef CONFIG_EARLY_PRINTK
22315 early_idt_msg:
22316 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
22317@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt)
22318 NEXT_PAGE(early_dynamic_pgts)
22319 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
22320
22321- .data
22322+ .section .rodata,"a",@progbits
22323
22324-#ifndef CONFIG_XEN
22325 NEXT_PAGE(init_level4_pgt)
22326- .fill 512,8,0
22327-#else
22328-NEXT_PAGE(init_level4_pgt)
22329- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
22330 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
22331 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
22332+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
22333+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
22334+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
22335+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
22336+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
22337+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
22338 .org init_level4_pgt + L4_START_KERNEL*8, 0
22339 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
22340 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
22341
22342+#ifdef CONFIG_PAX_PER_CPU_PGD
22343+NEXT_PAGE(cpu_pgd)
22344+ .rept 2*NR_CPUS
22345+ .fill 512,8,0
22346+ .endr
22347+#endif
22348+
22349 NEXT_PAGE(level3_ident_pgt)
22350 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
22351+#ifdef CONFIG_XEN
22352 .fill 511, 8, 0
22353+#else
22354+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
22355+ .fill 510,8,0
22356+#endif
22357+
22358+NEXT_PAGE(level3_vmalloc_start_pgt)
22359+ .fill 512,8,0
22360+
22361+NEXT_PAGE(level3_vmalloc_end_pgt)
22362+ .fill 512,8,0
22363+
22364+NEXT_PAGE(level3_vmemmap_pgt)
22365+ .fill L3_VMEMMAP_START,8,0
22366+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
22367+
22368 NEXT_PAGE(level2_ident_pgt)
22369- /* Since I easily can, map the first 1G.
22370+ /* Since I easily can, map the first 2G.
22371 * Don't set NX because code runs from these pages.
22372 */
22373- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
22374-#endif
22375+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
22376
22377 NEXT_PAGE(level3_kernel_pgt)
22378 .fill L3_START_KERNEL,8,0
22379@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt)
22380 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
22381 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
22382
22383+NEXT_PAGE(level2_vmemmap_pgt)
22384+ .fill 512,8,0
22385+
22386 NEXT_PAGE(level2_kernel_pgt)
22387 /*
22388 * 512 MB kernel mapping. We spend a full page on this pagetable
22389@@ -488,39 +544,70 @@ NEXT_PAGE(level2_kernel_pgt)
22390 KERNEL_IMAGE_SIZE/PMD_SIZE)
22391
22392 NEXT_PAGE(level2_fixmap_pgt)
22393- .fill 506,8,0
22394- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
22395- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
22396- .fill 5,8,0
22397+ .fill 507,8,0
22398+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
22399+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
22400+ .fill 4,8,0
22401
22402-NEXT_PAGE(level1_fixmap_pgt)
22403+NEXT_PAGE(level1_vsyscall_pgt)
22404 .fill 512,8,0
22405
22406 #undef PMDS
22407
22408- .data
22409+ .align PAGE_SIZE
22410+ENTRY(cpu_gdt_table)
22411+ .rept NR_CPUS
22412+ .quad 0x0000000000000000 /* NULL descriptor */
22413+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
22414+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
22415+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
22416+ .quad 0x00cffb000000ffff /* __USER32_CS */
22417+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
22418+ .quad 0x00affb000000ffff /* __USER_CS */
22419+
22420+#ifdef CONFIG_PAX_KERNEXEC
22421+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
22422+#else
22423+ .quad 0x0 /* unused */
22424+#endif
22425+
22426+ .quad 0,0 /* TSS */
22427+ .quad 0,0 /* LDT */
22428+ .quad 0,0,0 /* three TLS descriptors */
22429+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
22430+ /* asm/segment.h:GDT_ENTRIES must match this */
22431+
22432+#ifdef CONFIG_PAX_MEMORY_UDEREF
22433+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
22434+#else
22435+ .quad 0x0 /* unused */
22436+#endif
22437+
22438+ /* zero the remaining page */
22439+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
22440+ .endr
22441+
22442 .align 16
22443 .globl early_gdt_descr
22444 early_gdt_descr:
22445 .word GDT_ENTRIES*8-1
22446 early_gdt_descr_base:
22447- .quad INIT_PER_CPU_VAR(gdt_page)
22448+ .quad cpu_gdt_table
22449
22450 ENTRY(phys_base)
22451 /* This must match the first entry in level2_kernel_pgt */
22452 .quad 0x0000000000000000
22453
22454 #include "../../x86/xen/xen-head.S"
22455-
22456- .section .bss, "aw", @nobits
22457+
22458+ .section .rodata,"a",@progbits
22459+NEXT_PAGE(empty_zero_page)
22460+ .skip PAGE_SIZE
22461+
22462 .align PAGE_SIZE
22463 ENTRY(idt_table)
22464- .skip IDT_ENTRIES * 16
22465+ .fill 512,8,0
22466
22467 .align L1_CACHE_BYTES
22468 ENTRY(nmi_idt_table)
22469- .skip IDT_ENTRIES * 16
22470-
22471- __PAGE_ALIGNED_BSS
22472-NEXT_PAGE(empty_zero_page)
22473- .skip PAGE_SIZE
22474+ .fill 512,8,0
22475diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
22476index 0fa6912..37fce70 100644
22477--- a/arch/x86/kernel/i386_ksyms_32.c
22478+++ b/arch/x86/kernel/i386_ksyms_32.c
22479@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
22480 EXPORT_SYMBOL(cmpxchg8b_emu);
22481 #endif
22482
22483+EXPORT_SYMBOL_GPL(cpu_gdt_table);
22484+
22485 /* Networking helper routines. */
22486 EXPORT_SYMBOL(csum_partial_copy_generic);
22487+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
22488+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
22489
22490 EXPORT_SYMBOL(__get_user_1);
22491 EXPORT_SYMBOL(__get_user_2);
22492@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
22493
22494 EXPORT_SYMBOL(csum_partial);
22495 EXPORT_SYMBOL(empty_zero_page);
22496+
22497+#ifdef CONFIG_PAX_KERNEXEC
22498+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
22499+#endif
22500diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
22501index f7ea30d..6318acc 100644
22502--- a/arch/x86/kernel/i387.c
22503+++ b/arch/x86/kernel/i387.c
22504@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
22505 static inline bool interrupted_user_mode(void)
22506 {
22507 struct pt_regs *regs = get_irq_regs();
22508- return regs && user_mode_vm(regs);
22509+ return regs && user_mode(regs);
22510 }
22511
22512 /*
22513diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
22514index 9a5c460..84868423 100644
22515--- a/arch/x86/kernel/i8259.c
22516+++ b/arch/x86/kernel/i8259.c
22517@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
22518 static void make_8259A_irq(unsigned int irq)
22519 {
22520 disable_irq_nosync(irq);
22521- io_apic_irqs &= ~(1<<irq);
22522+ io_apic_irqs &= ~(1UL<<irq);
22523 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
22524 i8259A_chip.name);
22525 enable_irq(irq);
22526@@ -209,7 +209,7 @@ spurious_8259A_irq:
22527 "spurious 8259A interrupt: IRQ%d.\n", irq);
22528 spurious_irq_mask |= irqmask;
22529 }
22530- atomic_inc(&irq_err_count);
22531+ atomic_inc_unchecked(&irq_err_count);
22532 /*
22533 * Theoretically we do not have to handle this IRQ,
22534 * but in Linux this does not cause problems and is
22535@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
22536 /* (slave's support for AEOI in flat mode is to be investigated) */
22537 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
22538
22539+ pax_open_kernel();
22540 if (auto_eoi)
22541 /*
22542 * In AEOI mode we just have to mask the interrupt
22543 * when acking.
22544 */
22545- i8259A_chip.irq_mask_ack = disable_8259A_irq;
22546+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
22547 else
22548- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
22549+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
22550+ pax_close_kernel();
22551
22552 udelay(100); /* wait for 8259A to initialize */
22553
22554diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
22555index a979b5b..1d6db75 100644
22556--- a/arch/x86/kernel/io_delay.c
22557+++ b/arch/x86/kernel/io_delay.c
22558@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
22559 * Quirk table for systems that misbehave (lock up, etc.) if port
22560 * 0x80 is used:
22561 */
22562-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
22563+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
22564 {
22565 .callback = dmi_io_delay_0xed_port,
22566 .ident = "Compaq Presario V6000",
22567diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
22568index 4ddaf66..6292f4e 100644
22569--- a/arch/x86/kernel/ioport.c
22570+++ b/arch/x86/kernel/ioport.c
22571@@ -6,6 +6,7 @@
22572 #include <linux/sched.h>
22573 #include <linux/kernel.h>
22574 #include <linux/capability.h>
22575+#include <linux/security.h>
22576 #include <linux/errno.h>
22577 #include <linux/types.h>
22578 #include <linux/ioport.h>
22579@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
22580
22581 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
22582 return -EINVAL;
22583+#ifdef CONFIG_GRKERNSEC_IO
22584+ if (turn_on && grsec_disable_privio) {
22585+ gr_handle_ioperm();
22586+ return -EPERM;
22587+ }
22588+#endif
22589 if (turn_on && !capable(CAP_SYS_RAWIO))
22590 return -EPERM;
22591
22592@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
22593 * because the ->io_bitmap_max value must match the bitmap
22594 * contents:
22595 */
22596- tss = &per_cpu(init_tss, get_cpu());
22597+ tss = init_tss + get_cpu();
22598
22599 if (turn_on)
22600 bitmap_clear(t->io_bitmap_ptr, from, num);
22601@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
22602 return -EINVAL;
22603 /* Trying to gain more privileges? */
22604 if (level > old) {
22605+#ifdef CONFIG_GRKERNSEC_IO
22606+ if (grsec_disable_privio) {
22607+ gr_handle_iopl();
22608+ return -EPERM;
22609+ }
22610+#endif
22611 if (!capable(CAP_SYS_RAWIO))
22612 return -EPERM;
22613 }
22614diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
22615index ac0631d..ff7cb62 100644
22616--- a/arch/x86/kernel/irq.c
22617+++ b/arch/x86/kernel/irq.c
22618@@ -18,7 +18,7 @@
22619 #include <asm/mce.h>
22620 #include <asm/hw_irq.h>
22621
22622-atomic_t irq_err_count;
22623+atomic_unchecked_t irq_err_count;
22624
22625 /* Function pointer for generic interrupt vector handling */
22626 void (*x86_platform_ipi_callback)(void) = NULL;
22627@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
22628 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
22629 seq_printf(p, " Machine check polls\n");
22630 #endif
22631- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
22632+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
22633 #if defined(CONFIG_X86_IO_APIC)
22634- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
22635+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
22636 #endif
22637 return 0;
22638 }
22639@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
22640
22641 u64 arch_irq_stat(void)
22642 {
22643- u64 sum = atomic_read(&irq_err_count);
22644+ u64 sum = atomic_read_unchecked(&irq_err_count);
22645 return sum;
22646 }
22647
22648diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
22649index 344faf8..355f60d 100644
22650--- a/arch/x86/kernel/irq_32.c
22651+++ b/arch/x86/kernel/irq_32.c
22652@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
22653 __asm__ __volatile__("andl %%esp,%0" :
22654 "=r" (sp) : "0" (THREAD_SIZE - 1));
22655
22656- return sp < (sizeof(struct thread_info) + STACK_WARN);
22657+ return sp < STACK_WARN;
22658 }
22659
22660 static void print_stack_overflow(void)
22661@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
22662 * per-CPU IRQ handling contexts (thread information and stack)
22663 */
22664 union irq_ctx {
22665- struct thread_info tinfo;
22666- u32 stack[THREAD_SIZE/sizeof(u32)];
22667+ unsigned long previous_esp;
22668+ u32 stack[THREAD_SIZE/sizeof(u32)];
22669 } __attribute__((aligned(THREAD_SIZE)));
22670
22671 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
22672@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
22673 static inline int
22674 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
22675 {
22676- union irq_ctx *curctx, *irqctx;
22677+ union irq_ctx *irqctx;
22678 u32 *isp, arg1, arg2;
22679
22680- curctx = (union irq_ctx *) current_thread_info();
22681 irqctx = __this_cpu_read(hardirq_ctx);
22682
22683 /*
22684@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
22685 * handler) we can't do that and just have to keep using the
22686 * current stack (which is the irq stack already after all)
22687 */
22688- if (unlikely(curctx == irqctx))
22689+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
22690 return 0;
22691
22692 /* build the stack frame on the IRQ stack */
22693- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
22694- irqctx->tinfo.task = curctx->tinfo.task;
22695- irqctx->tinfo.previous_esp = current_stack_pointer;
22696+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
22697+ irqctx->previous_esp = current_stack_pointer;
22698
22699- /* Copy the preempt_count so that the [soft]irq checks work. */
22700- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
22701+#ifdef CONFIG_PAX_MEMORY_UDEREF
22702+ __set_fs(MAKE_MM_SEG(0));
22703+#endif
22704
22705 if (unlikely(overflow))
22706 call_on_stack(print_stack_overflow, isp);
22707@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
22708 : "0" (irq), "1" (desc), "2" (isp),
22709 "D" (desc->handle_irq)
22710 : "memory", "cc", "ecx");
22711+
22712+#ifdef CONFIG_PAX_MEMORY_UDEREF
22713+ __set_fs(current_thread_info()->addr_limit);
22714+#endif
22715+
22716 return 1;
22717 }
22718
22719@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
22720 */
22721 void __cpuinit irq_ctx_init(int cpu)
22722 {
22723- union irq_ctx *irqctx;
22724-
22725 if (per_cpu(hardirq_ctx, cpu))
22726 return;
22727
22728- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
22729- THREADINFO_GFP,
22730- THREAD_SIZE_ORDER));
22731- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
22732- irqctx->tinfo.cpu = cpu;
22733- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
22734- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
22735-
22736- per_cpu(hardirq_ctx, cpu) = irqctx;
22737-
22738- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
22739- THREADINFO_GFP,
22740- THREAD_SIZE_ORDER));
22741- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
22742- irqctx->tinfo.cpu = cpu;
22743- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
22744-
22745- per_cpu(softirq_ctx, cpu) = irqctx;
22746+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
22747+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
22748+
22749+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
22750+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
22751
22752 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
22753 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
22754@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
22755 asmlinkage void do_softirq(void)
22756 {
22757 unsigned long flags;
22758- struct thread_info *curctx;
22759 union irq_ctx *irqctx;
22760 u32 *isp;
22761
22762@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
22763 local_irq_save(flags);
22764
22765 if (local_softirq_pending()) {
22766- curctx = current_thread_info();
22767 irqctx = __this_cpu_read(softirq_ctx);
22768- irqctx->tinfo.task = curctx->task;
22769- irqctx->tinfo.previous_esp = current_stack_pointer;
22770+ irqctx->previous_esp = current_stack_pointer;
22771
22772 /* build the stack frame on the softirq stack */
22773- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
22774+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
22775+
22776+#ifdef CONFIG_PAX_MEMORY_UDEREF
22777+ __set_fs(MAKE_MM_SEG(0));
22778+#endif
22779
22780 call_on_stack(__do_softirq, isp);
22781+
22782+#ifdef CONFIG_PAX_MEMORY_UDEREF
22783+ __set_fs(current_thread_info()->addr_limit);
22784+#endif
22785+
22786 /*
22787 * Shouldn't happen, we returned above if in_interrupt():
22788 */
22789@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
22790 if (unlikely(!desc))
22791 return false;
22792
22793- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
22794+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
22795 if (unlikely(overflow))
22796 print_stack_overflow();
22797 desc->handle_irq(irq, desc);
22798diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
22799index d04d3ec..ea4b374 100644
22800--- a/arch/x86/kernel/irq_64.c
22801+++ b/arch/x86/kernel/irq_64.c
22802@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
22803 u64 estack_top, estack_bottom;
22804 u64 curbase = (u64)task_stack_page(current);
22805
22806- if (user_mode_vm(regs))
22807+ if (user_mode(regs))
22808 return;
22809
22810 if (regs->sp >= curbase + sizeof(struct thread_info) +
22811diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
22812index dc1404b..bbc43e7 100644
22813--- a/arch/x86/kernel/kdebugfs.c
22814+++ b/arch/x86/kernel/kdebugfs.c
22815@@ -27,7 +27,7 @@ struct setup_data_node {
22816 u32 len;
22817 };
22818
22819-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
22820+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
22821 size_t count, loff_t *ppos)
22822 {
22823 struct setup_data_node *node = file->private_data;
22824diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
22825index 836f832..a8bda67 100644
22826--- a/arch/x86/kernel/kgdb.c
22827+++ b/arch/x86/kernel/kgdb.c
22828@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
22829 #ifdef CONFIG_X86_32
22830 switch (regno) {
22831 case GDB_SS:
22832- if (!user_mode_vm(regs))
22833+ if (!user_mode(regs))
22834 *(unsigned long *)mem = __KERNEL_DS;
22835 break;
22836 case GDB_SP:
22837- if (!user_mode_vm(regs))
22838+ if (!user_mode(regs))
22839 *(unsigned long *)mem = kernel_stack_pointer(regs);
22840 break;
22841 case GDB_GS:
22842@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
22843 bp->attr.bp_addr = breakinfo[breakno].addr;
22844 bp->attr.bp_len = breakinfo[breakno].len;
22845 bp->attr.bp_type = breakinfo[breakno].type;
22846- info->address = breakinfo[breakno].addr;
22847+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
22848+ info->address = ktla_ktva(breakinfo[breakno].addr);
22849+ else
22850+ info->address = breakinfo[breakno].addr;
22851 info->len = breakinfo[breakno].len;
22852 info->type = breakinfo[breakno].type;
22853 val = arch_install_hw_breakpoint(bp);
22854@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
22855 case 'k':
22856 /* clear the trace bit */
22857 linux_regs->flags &= ~X86_EFLAGS_TF;
22858- atomic_set(&kgdb_cpu_doing_single_step, -1);
22859+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
22860
22861 /* set the trace bit if we're stepping */
22862 if (remcomInBuffer[0] == 's') {
22863 linux_regs->flags |= X86_EFLAGS_TF;
22864- atomic_set(&kgdb_cpu_doing_single_step,
22865+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
22866 raw_smp_processor_id());
22867 }
22868
22869@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
22870
22871 switch (cmd) {
22872 case DIE_DEBUG:
22873- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
22874+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
22875 if (user_mode(regs))
22876 return single_step_cont(regs, args);
22877 break;
22878@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
22879 #endif /* CONFIG_DEBUG_RODATA */
22880
22881 bpt->type = BP_BREAKPOINT;
22882- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
22883+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
22884 BREAK_INSTR_SIZE);
22885 if (err)
22886 return err;
22887- err = probe_kernel_write((char *)bpt->bpt_addr,
22888+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
22889 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
22890 #ifdef CONFIG_DEBUG_RODATA
22891 if (!err)
22892@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
22893 return -EBUSY;
22894 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
22895 BREAK_INSTR_SIZE);
22896- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
22897+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
22898 if (err)
22899 return err;
22900 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
22901@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
22902 if (mutex_is_locked(&text_mutex))
22903 goto knl_write;
22904 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
22905- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
22906+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
22907 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
22908 goto knl_write;
22909 return err;
22910 knl_write:
22911 #endif /* CONFIG_DEBUG_RODATA */
22912- return probe_kernel_write((char *)bpt->bpt_addr,
22913+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
22914 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
22915 }
22916
22917diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
22918index 211bce4..6e2580a 100644
22919--- a/arch/x86/kernel/kprobes/core.c
22920+++ b/arch/x86/kernel/kprobes/core.c
22921@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
22922 s32 raddr;
22923 } __packed *insn;
22924
22925- insn = (struct __arch_relative_insn *)from;
22926+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
22927+
22928+ pax_open_kernel();
22929 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
22930 insn->op = op;
22931+ pax_close_kernel();
22932 }
22933
22934 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
22935@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
22936 kprobe_opcode_t opcode;
22937 kprobe_opcode_t *orig_opcodes = opcodes;
22938
22939- if (search_exception_tables((unsigned long)opcodes))
22940+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
22941 return 0; /* Page fault may occur on this address. */
22942
22943 retry:
22944@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
22945 * for the first byte, we can recover the original instruction
22946 * from it and kp->opcode.
22947 */
22948- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
22949+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
22950 buf[0] = kp->opcode;
22951- return (unsigned long)buf;
22952+ return ktva_ktla((unsigned long)buf);
22953 }
22954
22955 /*
22956@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
22957 /* Another subsystem puts a breakpoint, failed to recover */
22958 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
22959 return 0;
22960+ pax_open_kernel();
22961 memcpy(dest, insn.kaddr, insn.length);
22962+ pax_close_kernel();
22963
22964 #ifdef CONFIG_X86_64
22965 if (insn_rip_relative(&insn)) {
22966@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
22967 return 0;
22968 }
22969 disp = (u8 *) dest + insn_offset_displacement(&insn);
22970+ pax_open_kernel();
22971 *(s32 *) disp = (s32) newdisp;
22972+ pax_close_kernel();
22973 }
22974 #endif
22975 return insn.length;
22976@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
22977 * nor set current_kprobe, because it doesn't use single
22978 * stepping.
22979 */
22980- regs->ip = (unsigned long)p->ainsn.insn;
22981+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
22982 preempt_enable_no_resched();
22983 return;
22984 }
22985@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
22986 regs->flags &= ~X86_EFLAGS_IF;
22987 /* single step inline if the instruction is an int3 */
22988 if (p->opcode == BREAKPOINT_INSTRUCTION)
22989- regs->ip = (unsigned long)p->addr;
22990+ regs->ip = ktla_ktva((unsigned long)p->addr);
22991 else
22992- regs->ip = (unsigned long)p->ainsn.insn;
22993+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
22994 }
22995
22996 /*
22997@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
22998 setup_singlestep(p, regs, kcb, 0);
22999 return 1;
23000 }
23001- } else if (*addr != BREAKPOINT_INSTRUCTION) {
23002+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
23003 /*
23004 * The breakpoint instruction was removed right
23005 * after we hit it. Another cpu has removed
23006@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
23007 " movq %rax, 152(%rsp)\n"
23008 RESTORE_REGS_STRING
23009 " popfq\n"
23010+#ifdef KERNEXEC_PLUGIN
23011+ " btsq $63,(%rsp)\n"
23012+#endif
23013 #else
23014 " pushf\n"
23015 SAVE_REGS_STRING
23016@@ -779,7 +789,7 @@ static void __kprobes
23017 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
23018 {
23019 unsigned long *tos = stack_addr(regs);
23020- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
23021+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
23022 unsigned long orig_ip = (unsigned long)p->addr;
23023 kprobe_opcode_t *insn = p->ainsn.insn;
23024
23025@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
23026 struct die_args *args = data;
23027 int ret = NOTIFY_DONE;
23028
23029- if (args->regs && user_mode_vm(args->regs))
23030+ if (args->regs && user_mode(args->regs))
23031 return ret;
23032
23033 switch (val) {
23034diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
23035index 76dc6f0..66bdfc3 100644
23036--- a/arch/x86/kernel/kprobes/opt.c
23037+++ b/arch/x86/kernel/kprobes/opt.c
23038@@ -79,6 +79,7 @@ found:
23039 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
23040 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
23041 {
23042+ pax_open_kernel();
23043 #ifdef CONFIG_X86_64
23044 *addr++ = 0x48;
23045 *addr++ = 0xbf;
23046@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
23047 *addr++ = 0xb8;
23048 #endif
23049 *(unsigned long *)addr = val;
23050+ pax_close_kernel();
23051 }
23052
23053 static void __used __kprobes kprobes_optinsn_template_holder(void)
23054@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23055 * Verify if the address gap is in 2GB range, because this uses
23056 * a relative jump.
23057 */
23058- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
23059+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
23060 if (abs(rel) > 0x7fffffff)
23061 return -ERANGE;
23062
23063@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
23064 op->optinsn.size = ret;
23065
23066 /* Copy arch-dep-instance from template */
23067- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
23068+ pax_open_kernel();
23069+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
23070+ pax_close_kernel();
23071
23072 /* Set probe information */
23073 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
23074
23075 /* Set probe function call */
23076- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
23077+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
23078
23079 /* Set returning jmp instruction at the tail of out-of-line buffer */
23080- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
23081+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
23082 (u8 *)op->kp.addr + op->optinsn.size);
23083
23084 flush_icache_range((unsigned long) buf,
23085@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
23086 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
23087
23088 /* Backup instructions which will be replaced by jump address */
23089- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
23090+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
23091 RELATIVE_ADDR_SIZE);
23092
23093 insn_buf[0] = RELATIVEJUMP_OPCODE;
23094@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
23095 /* This kprobe is really able to run optimized path. */
23096 op = container_of(p, struct optimized_kprobe, kp);
23097 /* Detour through copied instructions */
23098- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
23099+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
23100 if (!reenter)
23101 reset_current_kprobe();
23102 preempt_enable_no_resched();
23103diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
23104index cd6d9a5..16245a4 100644
23105--- a/arch/x86/kernel/kvm.c
23106+++ b/arch/x86/kernel/kvm.c
23107@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
23108 return NOTIFY_OK;
23109 }
23110
23111-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
23112+static struct notifier_block kvm_cpu_notifier = {
23113 .notifier_call = kvm_cpu_notify,
23114 };
23115 #endif
23116diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
23117index ebc9873..1b9724b 100644
23118--- a/arch/x86/kernel/ldt.c
23119+++ b/arch/x86/kernel/ldt.c
23120@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
23121 if (reload) {
23122 #ifdef CONFIG_SMP
23123 preempt_disable();
23124- load_LDT(pc);
23125+ load_LDT_nolock(pc);
23126 if (!cpumask_equal(mm_cpumask(current->mm),
23127 cpumask_of(smp_processor_id())))
23128 smp_call_function(flush_ldt, current->mm, 1);
23129 preempt_enable();
23130 #else
23131- load_LDT(pc);
23132+ load_LDT_nolock(pc);
23133 #endif
23134 }
23135 if (oldsize) {
23136@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
23137 return err;
23138
23139 for (i = 0; i < old->size; i++)
23140- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
23141+ write_ldt_entry(new->ldt, i, old->ldt + i);
23142 return 0;
23143 }
23144
23145@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
23146 retval = copy_ldt(&mm->context, &old_mm->context);
23147 mutex_unlock(&old_mm->context.lock);
23148 }
23149+
23150+ if (tsk == current) {
23151+ mm->context.vdso = 0;
23152+
23153+#ifdef CONFIG_X86_32
23154+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23155+ mm->context.user_cs_base = 0UL;
23156+ mm->context.user_cs_limit = ~0UL;
23157+
23158+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
23159+ cpus_clear(mm->context.cpu_user_cs_mask);
23160+#endif
23161+
23162+#endif
23163+#endif
23164+
23165+ }
23166+
23167 return retval;
23168 }
23169
23170@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
23171 }
23172 }
23173
23174+#ifdef CONFIG_PAX_SEGMEXEC
23175+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
23176+ error = -EINVAL;
23177+ goto out_unlock;
23178+ }
23179+#endif
23180+
23181 fill_ldt(&ldt, &ldt_info);
23182 if (oldmode)
23183 ldt.avl = 0;
23184diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
23185index 5b19e4d..6476a76 100644
23186--- a/arch/x86/kernel/machine_kexec_32.c
23187+++ b/arch/x86/kernel/machine_kexec_32.c
23188@@ -26,7 +26,7 @@
23189 #include <asm/cacheflush.h>
23190 #include <asm/debugreg.h>
23191
23192-static void set_idt(void *newidt, __u16 limit)
23193+static void set_idt(struct desc_struct *newidt, __u16 limit)
23194 {
23195 struct desc_ptr curidt;
23196
23197@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
23198 }
23199
23200
23201-static void set_gdt(void *newgdt, __u16 limit)
23202+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
23203 {
23204 struct desc_ptr curgdt;
23205
23206@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
23207 }
23208
23209 control_page = page_address(image->control_code_page);
23210- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
23211+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
23212
23213 relocate_kernel_ptr = control_page;
23214 page_list[PA_CONTROL_PAGE] = __pa(control_page);
23215diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
23216index 22db92b..d546bec 100644
23217--- a/arch/x86/kernel/microcode_core.c
23218+++ b/arch/x86/kernel/microcode_core.c
23219@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
23220 return NOTIFY_OK;
23221 }
23222
23223-static struct notifier_block __refdata mc_cpu_notifier = {
23224+static struct notifier_block mc_cpu_notifier = {
23225 .notifier_call = mc_cpu_callback,
23226 };
23227
23228diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
23229index 5fb2ceb..3ae90bb 100644
23230--- a/arch/x86/kernel/microcode_intel.c
23231+++ b/arch/x86/kernel/microcode_intel.c
23232@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
23233
23234 static int get_ucode_user(void *to, const void *from, size_t n)
23235 {
23236- return copy_from_user(to, from, n);
23237+ return copy_from_user(to, (const void __force_user *)from, n);
23238 }
23239
23240 static enum ucode_state
23241 request_microcode_user(int cpu, const void __user *buf, size_t size)
23242 {
23243- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
23244+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
23245 }
23246
23247 static void microcode_fini_cpu(int cpu)
23248diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
23249index 216a4d7..228255a 100644
23250--- a/arch/x86/kernel/module.c
23251+++ b/arch/x86/kernel/module.c
23252@@ -43,15 +43,60 @@ do { \
23253 } while (0)
23254 #endif
23255
23256-void *module_alloc(unsigned long size)
23257+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
23258 {
23259- if (PAGE_ALIGN(size) > MODULES_LEN)
23260+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
23261 return NULL;
23262 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
23263- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
23264+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
23265 -1, __builtin_return_address(0));
23266 }
23267
23268+void *module_alloc(unsigned long size)
23269+{
23270+
23271+#ifdef CONFIG_PAX_KERNEXEC
23272+ return __module_alloc(size, PAGE_KERNEL);
23273+#else
23274+ return __module_alloc(size, PAGE_KERNEL_EXEC);
23275+#endif
23276+
23277+}
23278+
23279+#ifdef CONFIG_PAX_KERNEXEC
23280+#ifdef CONFIG_X86_32
23281+void *module_alloc_exec(unsigned long size)
23282+{
23283+ struct vm_struct *area;
23284+
23285+ if (size == 0)
23286+ return NULL;
23287+
23288+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
23289+ return area ? area->addr : NULL;
23290+}
23291+EXPORT_SYMBOL(module_alloc_exec);
23292+
23293+void module_free_exec(struct module *mod, void *module_region)
23294+{
23295+ vunmap(module_region);
23296+}
23297+EXPORT_SYMBOL(module_free_exec);
23298+#else
23299+void module_free_exec(struct module *mod, void *module_region)
23300+{
23301+ module_free(mod, module_region);
23302+}
23303+EXPORT_SYMBOL(module_free_exec);
23304+
23305+void *module_alloc_exec(unsigned long size)
23306+{
23307+ return __module_alloc(size, PAGE_KERNEL_RX);
23308+}
23309+EXPORT_SYMBOL(module_alloc_exec);
23310+#endif
23311+#endif
23312+
23313 #ifdef CONFIG_X86_32
23314 int apply_relocate(Elf32_Shdr *sechdrs,
23315 const char *strtab,
23316@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
23317 unsigned int i;
23318 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
23319 Elf32_Sym *sym;
23320- uint32_t *location;
23321+ uint32_t *plocation, location;
23322
23323 DEBUGP("Applying relocate section %u to %u\n",
23324 relsec, sechdrs[relsec].sh_info);
23325 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
23326 /* This is where to make the change */
23327- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
23328- + rel[i].r_offset;
23329+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
23330+ location = (uint32_t)plocation;
23331+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
23332+ plocation = ktla_ktva((void *)plocation);
23333 /* This is the symbol it is referring to. Note that all
23334 undefined symbols have been resolved. */
23335 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
23336@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
23337 switch (ELF32_R_TYPE(rel[i].r_info)) {
23338 case R_386_32:
23339 /* We add the value into the location given */
23340- *location += sym->st_value;
23341+ pax_open_kernel();
23342+ *plocation += sym->st_value;
23343+ pax_close_kernel();
23344 break;
23345 case R_386_PC32:
23346 /* Add the value, subtract its position */
23347- *location += sym->st_value - (uint32_t)location;
23348+ pax_open_kernel();
23349+ *plocation += sym->st_value - location;
23350+ pax_close_kernel();
23351 break;
23352 default:
23353 pr_err("%s: Unknown relocation: %u\n",
23354@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
23355 case R_X86_64_NONE:
23356 break;
23357 case R_X86_64_64:
23358+ pax_open_kernel();
23359 *(u64 *)loc = val;
23360+ pax_close_kernel();
23361 break;
23362 case R_X86_64_32:
23363+ pax_open_kernel();
23364 *(u32 *)loc = val;
23365+ pax_close_kernel();
23366 if (val != *(u32 *)loc)
23367 goto overflow;
23368 break;
23369 case R_X86_64_32S:
23370+ pax_open_kernel();
23371 *(s32 *)loc = val;
23372+ pax_close_kernel();
23373 if ((s64)val != *(s32 *)loc)
23374 goto overflow;
23375 break;
23376 case R_X86_64_PC32:
23377 val -= (u64)loc;
23378+ pax_open_kernel();
23379 *(u32 *)loc = val;
23380+ pax_close_kernel();
23381+
23382 #if 0
23383 if ((s64)val != *(s32 *)loc)
23384 goto overflow;
23385diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
23386index ce13049..e2e9c3c 100644
23387--- a/arch/x86/kernel/msr.c
23388+++ b/arch/x86/kernel/msr.c
23389@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
23390 return notifier_from_errno(err);
23391 }
23392
23393-static struct notifier_block __refdata msr_class_cpu_notifier = {
23394+static struct notifier_block msr_class_cpu_notifier = {
23395 .notifier_call = msr_class_cpu_callback,
23396 };
23397
23398diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
23399index 6030805..2d33f21 100644
23400--- a/arch/x86/kernel/nmi.c
23401+++ b/arch/x86/kernel/nmi.c
23402@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
23403 return handled;
23404 }
23405
23406-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
23407+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
23408 {
23409 struct nmi_desc *desc = nmi_to_desc(type);
23410 unsigned long flags;
23411@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
23412 * event confuses some handlers (kdump uses this flag)
23413 */
23414 if (action->flags & NMI_FLAG_FIRST)
23415- list_add_rcu(&action->list, &desc->head);
23416+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
23417 else
23418- list_add_tail_rcu(&action->list, &desc->head);
23419+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
23420
23421 spin_unlock_irqrestore(&desc->lock, flags);
23422 return 0;
23423@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
23424 if (!strcmp(n->name, name)) {
23425 WARN(in_nmi(),
23426 "Trying to free NMI (%s) from NMI context!\n", n->name);
23427- list_del_rcu(&n->list);
23428+ pax_list_del_rcu((struct list_head *)&n->list);
23429 break;
23430 }
23431 }
23432@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
23433 dotraplinkage notrace __kprobes void
23434 do_nmi(struct pt_regs *regs, long error_code)
23435 {
23436+
23437+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23438+ if (!user_mode(regs)) {
23439+ unsigned long cs = regs->cs & 0xFFFF;
23440+ unsigned long ip = ktva_ktla(regs->ip);
23441+
23442+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
23443+ regs->ip = ip;
23444+ }
23445+#endif
23446+
23447 nmi_nesting_preprocess(regs);
23448
23449 nmi_enter();
23450diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
23451index 6d9582e..f746287 100644
23452--- a/arch/x86/kernel/nmi_selftest.c
23453+++ b/arch/x86/kernel/nmi_selftest.c
23454@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
23455 {
23456 /* trap all the unknown NMIs we may generate */
23457 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
23458- __initdata);
23459+ __initconst);
23460 }
23461
23462 static void __init cleanup_nmi_testsuite(void)
23463@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
23464 unsigned long timeout;
23465
23466 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
23467- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
23468+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
23469 nmi_fail = FAILURE;
23470 return;
23471 }
23472diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
23473index 676b8c7..870ba04 100644
23474--- a/arch/x86/kernel/paravirt-spinlocks.c
23475+++ b/arch/x86/kernel/paravirt-spinlocks.c
23476@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
23477 arch_spin_lock(lock);
23478 }
23479
23480-struct pv_lock_ops pv_lock_ops = {
23481+struct pv_lock_ops pv_lock_ops __read_only = {
23482 #ifdef CONFIG_SMP
23483 .spin_is_locked = __ticket_spin_is_locked,
23484 .spin_is_contended = __ticket_spin_is_contended,
23485diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
23486index cd6de64..27c6af0 100644
23487--- a/arch/x86/kernel/paravirt.c
23488+++ b/arch/x86/kernel/paravirt.c
23489@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
23490 {
23491 return x;
23492 }
23493+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23494+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
23495+#endif
23496
23497 void __init default_banner(void)
23498 {
23499@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
23500 if (opfunc == NULL)
23501 /* If there's no function, patch it with a ud2a (BUG) */
23502 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
23503- else if (opfunc == _paravirt_nop)
23504+ else if (opfunc == (void *)_paravirt_nop)
23505 /* If the operation is a nop, then nop the callsite */
23506 ret = paravirt_patch_nop();
23507
23508 /* identity functions just return their single argument */
23509- else if (opfunc == _paravirt_ident_32)
23510+ else if (opfunc == (void *)_paravirt_ident_32)
23511 ret = paravirt_patch_ident_32(insnbuf, len);
23512- else if (opfunc == _paravirt_ident_64)
23513+ else if (opfunc == (void *)_paravirt_ident_64)
23514 ret = paravirt_patch_ident_64(insnbuf, len);
23515+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23516+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
23517+ ret = paravirt_patch_ident_64(insnbuf, len);
23518+#endif
23519
23520 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
23521 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
23522@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
23523 if (insn_len > len || start == NULL)
23524 insn_len = len;
23525 else
23526- memcpy(insnbuf, start, insn_len);
23527+ memcpy(insnbuf, ktla_ktva(start), insn_len);
23528
23529 return insn_len;
23530 }
23531@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
23532 return this_cpu_read(paravirt_lazy_mode);
23533 }
23534
23535-struct pv_info pv_info = {
23536+struct pv_info pv_info __read_only = {
23537 .name = "bare hardware",
23538 .paravirt_enabled = 0,
23539 .kernel_rpl = 0,
23540@@ -315,16 +322,16 @@ struct pv_info pv_info = {
23541 #endif
23542 };
23543
23544-struct pv_init_ops pv_init_ops = {
23545+struct pv_init_ops pv_init_ops __read_only = {
23546 .patch = native_patch,
23547 };
23548
23549-struct pv_time_ops pv_time_ops = {
23550+struct pv_time_ops pv_time_ops __read_only = {
23551 .sched_clock = native_sched_clock,
23552 .steal_clock = native_steal_clock,
23553 };
23554
23555-struct pv_irq_ops pv_irq_ops = {
23556+struct pv_irq_ops pv_irq_ops __read_only = {
23557 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
23558 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
23559 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
23560@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
23561 #endif
23562 };
23563
23564-struct pv_cpu_ops pv_cpu_ops = {
23565+struct pv_cpu_ops pv_cpu_ops __read_only = {
23566 .cpuid = native_cpuid,
23567 .get_debugreg = native_get_debugreg,
23568 .set_debugreg = native_set_debugreg,
23569@@ -394,21 +401,26 @@ struct pv_cpu_ops pv_cpu_ops = {
23570 .end_context_switch = paravirt_nop,
23571 };
23572
23573-struct pv_apic_ops pv_apic_ops = {
23574+struct pv_apic_ops pv_apic_ops __read_only= {
23575 #ifdef CONFIG_X86_LOCAL_APIC
23576 .startup_ipi_hook = paravirt_nop,
23577 #endif
23578 };
23579
23580-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
23581+#ifdef CONFIG_X86_32
23582+#ifdef CONFIG_X86_PAE
23583+/* 64-bit pagetable entries */
23584+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
23585+#else
23586 /* 32-bit pagetable entries */
23587 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
23588+#endif
23589 #else
23590 /* 64-bit pagetable entries */
23591 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
23592 #endif
23593
23594-struct pv_mmu_ops pv_mmu_ops = {
23595+struct pv_mmu_ops pv_mmu_ops __read_only = {
23596
23597 .read_cr2 = native_read_cr2,
23598 .write_cr2 = native_write_cr2,
23599@@ -458,6 +470,7 @@ struct pv_mmu_ops pv_mmu_ops = {
23600 .make_pud = PTE_IDENT,
23601
23602 .set_pgd = native_set_pgd,
23603+ .set_pgd_batched = native_set_pgd_batched,
23604 #endif
23605 #endif /* PAGETABLE_LEVELS >= 3 */
23606
23607@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
23608 },
23609
23610 .set_fixmap = native_set_fixmap,
23611+
23612+#ifdef CONFIG_PAX_KERNEXEC
23613+ .pax_open_kernel = native_pax_open_kernel,
23614+ .pax_close_kernel = native_pax_close_kernel,
23615+#endif
23616+
23617 };
23618
23619 EXPORT_SYMBOL_GPL(pv_time_ops);
23620diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
23621index 299d493..2ccb0ee 100644
23622--- a/arch/x86/kernel/pci-calgary_64.c
23623+++ b/arch/x86/kernel/pci-calgary_64.c
23624@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
23625 tce_space = be64_to_cpu(readq(target));
23626 tce_space = tce_space & TAR_SW_BITS;
23627
23628- tce_space = tce_space & (~specified_table_size);
23629+ tce_space = tce_space & (~(unsigned long)specified_table_size);
23630 info->tce_space = (u64 *)__va(tce_space);
23631 }
23632 }
23633diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
23634index 35ccf75..7a15747 100644
23635--- a/arch/x86/kernel/pci-iommu_table.c
23636+++ b/arch/x86/kernel/pci-iommu_table.c
23637@@ -2,7 +2,7 @@
23638 #include <asm/iommu_table.h>
23639 #include <linux/string.h>
23640 #include <linux/kallsyms.h>
23641-
23642+#include <linux/sched.h>
23643
23644 #define DEBUG 1
23645
23646diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
23647index 6c483ba..d10ce2f 100644
23648--- a/arch/x86/kernel/pci-swiotlb.c
23649+++ b/arch/x86/kernel/pci-swiotlb.c
23650@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
23651 void *vaddr, dma_addr_t dma_addr,
23652 struct dma_attrs *attrs)
23653 {
23654- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
23655+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
23656 }
23657
23658 static struct dma_map_ops swiotlb_dma_ops = {
23659diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
23660index 81a5f5e..20f8b58 100644
23661--- a/arch/x86/kernel/process.c
23662+++ b/arch/x86/kernel/process.c
23663@@ -36,7 +36,8 @@
23664 * section. Since TSS's are completely CPU-local, we want them
23665 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
23666 */
23667-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
23668+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
23669+EXPORT_SYMBOL(init_tss);
23670
23671 #ifdef CONFIG_X86_64
23672 static DEFINE_PER_CPU(unsigned char, is_idle);
23673@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
23674 task_xstate_cachep =
23675 kmem_cache_create("task_xstate", xstate_size,
23676 __alignof__(union thread_xstate),
23677- SLAB_PANIC | SLAB_NOTRACK, NULL);
23678+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
23679 }
23680
23681 /*
23682@@ -105,7 +106,7 @@ void exit_thread(void)
23683 unsigned long *bp = t->io_bitmap_ptr;
23684
23685 if (bp) {
23686- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
23687+ struct tss_struct *tss = init_tss + get_cpu();
23688
23689 t->io_bitmap_ptr = NULL;
23690 clear_thread_flag(TIF_IO_BITMAP);
23691@@ -125,6 +126,9 @@ void flush_thread(void)
23692 {
23693 struct task_struct *tsk = current;
23694
23695+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
23696+ loadsegment(gs, 0);
23697+#endif
23698 flush_ptrace_hw_breakpoint(tsk);
23699 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
23700 drop_init_fpu(tsk);
23701@@ -271,7 +275,7 @@ static void __exit_idle(void)
23702 void exit_idle(void)
23703 {
23704 /* idle loop has pid 0 */
23705- if (current->pid)
23706+ if (task_pid_nr(current))
23707 return;
23708 __exit_idle();
23709 }
23710@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
23711 return ret;
23712 }
23713 #endif
23714-void stop_this_cpu(void *dummy)
23715+__noreturn void stop_this_cpu(void *dummy)
23716 {
23717 local_irq_disable();
23718 /*
23719@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
23720 }
23721 early_param("idle", idle_setup);
23722
23723-unsigned long arch_align_stack(unsigned long sp)
23724+#ifdef CONFIG_PAX_RANDKSTACK
23725+void pax_randomize_kstack(struct pt_regs *regs)
23726 {
23727- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
23728- sp -= get_random_int() % 8192;
23729- return sp & ~0xf;
23730-}
23731+ struct thread_struct *thread = &current->thread;
23732+ unsigned long time;
23733
23734-unsigned long arch_randomize_brk(struct mm_struct *mm)
23735-{
23736- unsigned long range_end = mm->brk + 0x02000000;
23737- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
23738-}
23739+ if (!randomize_va_space)
23740+ return;
23741+
23742+ if (v8086_mode(regs))
23743+ return;
23744
23745+ rdtscl(time);
23746+
23747+ /* P4 seems to return a 0 LSB, ignore it */
23748+#ifdef CONFIG_MPENTIUM4
23749+ time &= 0x3EUL;
23750+ time <<= 2;
23751+#elif defined(CONFIG_X86_64)
23752+ time &= 0xFUL;
23753+ time <<= 4;
23754+#else
23755+ time &= 0x1FUL;
23756+ time <<= 3;
23757+#endif
23758+
23759+ thread->sp0 ^= time;
23760+ load_sp0(init_tss + smp_processor_id(), thread);
23761+
23762+#ifdef CONFIG_X86_64
23763+ this_cpu_write(kernel_stack, thread->sp0);
23764+#endif
23765+}
23766+#endif
23767diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
23768index 7305f7d..22f73d6 100644
23769--- a/arch/x86/kernel/process_32.c
23770+++ b/arch/x86/kernel/process_32.c
23771@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
23772 unsigned long thread_saved_pc(struct task_struct *tsk)
23773 {
23774 return ((unsigned long *)tsk->thread.sp)[3];
23775+//XXX return tsk->thread.eip;
23776 }
23777
23778 void __show_regs(struct pt_regs *regs, int all)
23779@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
23780 unsigned long sp;
23781 unsigned short ss, gs;
23782
23783- if (user_mode_vm(regs)) {
23784+ if (user_mode(regs)) {
23785 sp = regs->sp;
23786 ss = regs->ss & 0xffff;
23787- gs = get_user_gs(regs);
23788 } else {
23789 sp = kernel_stack_pointer(regs);
23790 savesegment(ss, ss);
23791- savesegment(gs, gs);
23792 }
23793+ gs = get_user_gs(regs);
23794
23795 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
23796 (u16)regs->cs, regs->ip, regs->flags,
23797- smp_processor_id());
23798+ raw_smp_processor_id());
23799 print_symbol("EIP is at %s\n", regs->ip);
23800
23801 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
23802@@ -128,20 +128,21 @@ void release_thread(struct task_struct *dead_task)
23803 int copy_thread(unsigned long clone_flags, unsigned long sp,
23804 unsigned long arg, struct task_struct *p)
23805 {
23806- struct pt_regs *childregs = task_pt_regs(p);
23807+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
23808 struct task_struct *tsk;
23809 int err;
23810
23811 p->thread.sp = (unsigned long) childregs;
23812 p->thread.sp0 = (unsigned long) (childregs+1);
23813+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
23814
23815 if (unlikely(p->flags & PF_KTHREAD)) {
23816 /* kernel thread */
23817 memset(childregs, 0, sizeof(struct pt_regs));
23818 p->thread.ip = (unsigned long) ret_from_kernel_thread;
23819- task_user_gs(p) = __KERNEL_STACK_CANARY;
23820- childregs->ds = __USER_DS;
23821- childregs->es = __USER_DS;
23822+ savesegment(gs, childregs->gs);
23823+ childregs->ds = __KERNEL_DS;
23824+ childregs->es = __KERNEL_DS;
23825 childregs->fs = __KERNEL_PERCPU;
23826 childregs->bx = sp; /* function */
23827 childregs->bp = arg;
23828@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23829 struct thread_struct *prev = &prev_p->thread,
23830 *next = &next_p->thread;
23831 int cpu = smp_processor_id();
23832- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23833+ struct tss_struct *tss = init_tss + cpu;
23834 fpu_switch_t fpu;
23835
23836 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
23837@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23838 */
23839 lazy_save_gs(prev->gs);
23840
23841+#ifdef CONFIG_PAX_MEMORY_UDEREF
23842+ __set_fs(task_thread_info(next_p)->addr_limit);
23843+#endif
23844+
23845 /*
23846 * Load the per-thread Thread-Local Storage descriptor.
23847 */
23848@@ -302,6 +307,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23849 */
23850 arch_end_context_switch(next_p);
23851
23852+ this_cpu_write(current_task, next_p);
23853+ this_cpu_write(current_tinfo, &next_p->tinfo);
23854+
23855 /*
23856 * Restore %gs if needed (which is common)
23857 */
23858@@ -310,8 +318,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23859
23860 switch_fpu_finish(next_p, fpu);
23861
23862- this_cpu_write(current_task, next_p);
23863-
23864 return prev_p;
23865 }
23866
23867@@ -341,4 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
23868 } while (count++ < 16);
23869 return 0;
23870 }
23871-
23872diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
23873index 355ae06..560fbbe 100644
23874--- a/arch/x86/kernel/process_64.c
23875+++ b/arch/x86/kernel/process_64.c
23876@@ -151,10 +151,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
23877 struct pt_regs *childregs;
23878 struct task_struct *me = current;
23879
23880- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
23881+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
23882 childregs = task_pt_regs(p);
23883 p->thread.sp = (unsigned long) childregs;
23884 p->thread.usersp = me->thread.usersp;
23885+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
23886 set_tsk_thread_flag(p, TIF_FORK);
23887 p->fpu_counter = 0;
23888 p->thread.io_bitmap_ptr = NULL;
23889@@ -165,6 +166,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
23890 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
23891 savesegment(es, p->thread.es);
23892 savesegment(ds, p->thread.ds);
23893+ savesegment(ss, p->thread.ss);
23894+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
23895 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
23896
23897 if (unlikely(p->flags & PF_KTHREAD)) {
23898@@ -273,7 +276,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23899 struct thread_struct *prev = &prev_p->thread;
23900 struct thread_struct *next = &next_p->thread;
23901 int cpu = smp_processor_id();
23902- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23903+ struct tss_struct *tss = init_tss + cpu;
23904 unsigned fsindex, gsindex;
23905 fpu_switch_t fpu;
23906
23907@@ -296,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23908 if (unlikely(next->ds | prev->ds))
23909 loadsegment(ds, next->ds);
23910
23911+ savesegment(ss, prev->ss);
23912+ if (unlikely(next->ss != prev->ss))
23913+ loadsegment(ss, next->ss);
23914
23915 /* We must save %fs and %gs before load_TLS() because
23916 * %fs and %gs may be cleared by load_TLS().
23917@@ -355,10 +361,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
23918 prev->usersp = this_cpu_read(old_rsp);
23919 this_cpu_write(old_rsp, next->usersp);
23920 this_cpu_write(current_task, next_p);
23921+ this_cpu_write(current_tinfo, &next_p->tinfo);
23922
23923- this_cpu_write(kernel_stack,
23924- (unsigned long)task_stack_page(next_p) +
23925- THREAD_SIZE - KERNEL_STACK_OFFSET);
23926+ this_cpu_write(kernel_stack, next->sp0);
23927
23928 /*
23929 * Now maybe reload the debug registers and handle I/O bitmaps
23930@@ -427,12 +432,11 @@ unsigned long get_wchan(struct task_struct *p)
23931 if (!p || p == current || p->state == TASK_RUNNING)
23932 return 0;
23933 stack = (unsigned long)task_stack_page(p);
23934- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
23935+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
23936 return 0;
23937 fp = *(u64 *)(p->thread.sp);
23938 do {
23939- if (fp < (unsigned long)stack ||
23940- fp >= (unsigned long)stack+THREAD_SIZE)
23941+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
23942 return 0;
23943 ip = *(u64 *)(fp+8);
23944 if (!in_sched_functions(ip))
23945diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
23946index 29a8120..a50b5ee 100644
23947--- a/arch/x86/kernel/ptrace.c
23948+++ b/arch/x86/kernel/ptrace.c
23949@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
23950 {
23951 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
23952 unsigned long sp = (unsigned long)&regs->sp;
23953- struct thread_info *tinfo;
23954
23955- if (context == (sp & ~(THREAD_SIZE - 1)))
23956+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
23957 return sp;
23958
23959- tinfo = (struct thread_info *)context;
23960- if (tinfo->previous_esp)
23961- return tinfo->previous_esp;
23962+ sp = *(unsigned long *)context;
23963+ if (sp)
23964+ return sp;
23965
23966 return (unsigned long)regs;
23967 }
23968@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
23969 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
23970 {
23971 int i;
23972- int dr7 = 0;
23973+ unsigned long dr7 = 0;
23974 struct arch_hw_breakpoint *info;
23975
23976 for (i = 0; i < HBP_NUM; i++) {
23977@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
23978 unsigned long addr, unsigned long data)
23979 {
23980 int ret;
23981- unsigned long __user *datap = (unsigned long __user *)data;
23982+ unsigned long __user *datap = (__force unsigned long __user *)data;
23983
23984 switch (request) {
23985 /* read the word at location addr in the USER area. */
23986@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
23987 if ((int) addr < 0)
23988 return -EIO;
23989 ret = do_get_thread_area(child, addr,
23990- (struct user_desc __user *)data);
23991+ (__force struct user_desc __user *) data);
23992 break;
23993
23994 case PTRACE_SET_THREAD_AREA:
23995 if ((int) addr < 0)
23996 return -EIO;
23997 ret = do_set_thread_area(child, addr,
23998- (struct user_desc __user *)data, 0);
23999+ (__force struct user_desc __user *) data, 0);
24000 break;
24001 #endif
24002
24003@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
24004
24005 #ifdef CONFIG_X86_64
24006
24007-static struct user_regset x86_64_regsets[] __read_mostly = {
24008+static user_regset_no_const x86_64_regsets[] __read_only = {
24009 [REGSET_GENERAL] = {
24010 .core_note_type = NT_PRSTATUS,
24011 .n = sizeof(struct user_regs_struct) / sizeof(long),
24012@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
24013 #endif /* CONFIG_X86_64 */
24014
24015 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
24016-static struct user_regset x86_32_regsets[] __read_mostly = {
24017+static user_regset_no_const x86_32_regsets[] __read_only = {
24018 [REGSET_GENERAL] = {
24019 .core_note_type = NT_PRSTATUS,
24020 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
24021@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
24022 */
24023 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
24024
24025-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24026+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
24027 {
24028 #ifdef CONFIG_X86_64
24029 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
24030@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
24031 memset(info, 0, sizeof(*info));
24032 info->si_signo = SIGTRAP;
24033 info->si_code = si_code;
24034- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
24035+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
24036 }
24037
24038 void user_single_step_siginfo(struct task_struct *tsk,
24039@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
24040 # define IS_IA32 0
24041 #endif
24042
24043+#ifdef CONFIG_GRKERNSEC_SETXID
24044+extern void gr_delayed_cred_worker(void);
24045+#endif
24046+
24047 /*
24048 * We must return the syscall number to actually look up in the table.
24049 * This can be -1L to skip running any syscall at all.
24050@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
24051
24052 user_exit();
24053
24054+#ifdef CONFIG_GRKERNSEC_SETXID
24055+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24056+ gr_delayed_cred_worker();
24057+#endif
24058+
24059 /*
24060 * If we stepped into a sysenter/syscall insn, it trapped in
24061 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
24062@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
24063 */
24064 user_exit();
24065
24066+#ifdef CONFIG_GRKERNSEC_SETXID
24067+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
24068+ gr_delayed_cred_worker();
24069+#endif
24070+
24071 audit_syscall_exit(regs);
24072
24073 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
24074diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
24075index 2cb9470..ff1fd80 100644
24076--- a/arch/x86/kernel/pvclock.c
24077+++ b/arch/x86/kernel/pvclock.c
24078@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
24079 return pv_tsc_khz;
24080 }
24081
24082-static atomic64_t last_value = ATOMIC64_INIT(0);
24083+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
24084
24085 void pvclock_resume(void)
24086 {
24087- atomic64_set(&last_value, 0);
24088+ atomic64_set_unchecked(&last_value, 0);
24089 }
24090
24091 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
24092@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
24093 * updating at the same time, and one of them could be slightly behind,
24094 * making the assumption that last_value always go forward fail to hold.
24095 */
24096- last = atomic64_read(&last_value);
24097+ last = atomic64_read_unchecked(&last_value);
24098 do {
24099 if (ret < last)
24100 return last;
24101- last = atomic64_cmpxchg(&last_value, last, ret);
24102+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
24103 } while (unlikely(last != ret));
24104
24105 return ret;
24106diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
24107index 76fa1e9..abf09ea 100644
24108--- a/arch/x86/kernel/reboot.c
24109+++ b/arch/x86/kernel/reboot.c
24110@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
24111 EXPORT_SYMBOL(pm_power_off);
24112
24113 static const struct desc_ptr no_idt = {};
24114-static int reboot_mode;
24115+static unsigned short reboot_mode;
24116 enum reboot_type reboot_type = BOOT_ACPI;
24117 int reboot_force;
24118
24119@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
24120
24121 void __noreturn machine_real_restart(unsigned int type)
24122 {
24123+
24124+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
24125+ struct desc_struct *gdt;
24126+#endif
24127+
24128 local_irq_disable();
24129
24130 /*
24131@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
24132
24133 /* Jump to the identity-mapped low memory code */
24134 #ifdef CONFIG_X86_32
24135- asm volatile("jmpl *%0" : :
24136+
24137+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
24138+ gdt = get_cpu_gdt_table(smp_processor_id());
24139+ pax_open_kernel();
24140+#ifdef CONFIG_PAX_MEMORY_UDEREF
24141+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
24142+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
24143+ loadsegment(ds, __KERNEL_DS);
24144+ loadsegment(es, __KERNEL_DS);
24145+ loadsegment(ss, __KERNEL_DS);
24146+#endif
24147+#ifdef CONFIG_PAX_KERNEXEC
24148+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
24149+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
24150+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
24151+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
24152+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
24153+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
24154+#endif
24155+ pax_close_kernel();
24156+#endif
24157+
24158+ asm volatile("ljmpl *%0" : :
24159 "rm" (real_mode_header->machine_real_restart_asm),
24160 "a" (type));
24161 #else
24162@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
24163 * try to force a triple fault and then cycle between hitting the keyboard
24164 * controller and doing that
24165 */
24166-static void native_machine_emergency_restart(void)
24167+static void __noreturn native_machine_emergency_restart(void)
24168 {
24169 int i;
24170 int attempt = 0;
24171@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
24172 #endif
24173 }
24174
24175-static void __machine_emergency_restart(int emergency)
24176+static void __noreturn __machine_emergency_restart(int emergency)
24177 {
24178 reboot_emergency = emergency;
24179 machine_ops.emergency_restart();
24180 }
24181
24182-static void native_machine_restart(char *__unused)
24183+static void __noreturn native_machine_restart(char *__unused)
24184 {
24185 pr_notice("machine restart\n");
24186
24187@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
24188 __machine_emergency_restart(0);
24189 }
24190
24191-static void native_machine_halt(void)
24192+static void __noreturn native_machine_halt(void)
24193 {
24194 /* Stop other cpus and apics */
24195 machine_shutdown();
24196@@ -679,7 +706,7 @@ static void native_machine_halt(void)
24197 stop_this_cpu(NULL);
24198 }
24199
24200-static void native_machine_power_off(void)
24201+static void __noreturn native_machine_power_off(void)
24202 {
24203 if (pm_power_off) {
24204 if (!reboot_force)
24205@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
24206 }
24207 /* A fallback in case there is no PM info available */
24208 tboot_shutdown(TB_SHUTDOWN_HALT);
24209+ unreachable();
24210 }
24211
24212-struct machine_ops machine_ops = {
24213+struct machine_ops machine_ops __read_only = {
24214 .power_off = native_machine_power_off,
24215 .shutdown = native_machine_shutdown,
24216 .emergency_restart = native_machine_emergency_restart,
24217diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
24218index c8e41e9..64049ef 100644
24219--- a/arch/x86/kernel/reboot_fixups_32.c
24220+++ b/arch/x86/kernel/reboot_fixups_32.c
24221@@ -57,7 +57,7 @@ struct device_fixup {
24222 unsigned int vendor;
24223 unsigned int device;
24224 void (*reboot_fixup)(struct pci_dev *);
24225-};
24226+} __do_const;
24227
24228 /*
24229 * PCI ids solely used for fixups_table go here
24230diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
24231index f2bb9c9..bed145d7 100644
24232--- a/arch/x86/kernel/relocate_kernel_64.S
24233+++ b/arch/x86/kernel/relocate_kernel_64.S
24234@@ -11,6 +11,7 @@
24235 #include <asm/kexec.h>
24236 #include <asm/processor-flags.h>
24237 #include <asm/pgtable_types.h>
24238+#include <asm/alternative-asm.h>
24239
24240 /*
24241 * Must be relocatable PIC code callable as a C function
24242@@ -167,6 +168,7 @@ identity_mapped:
24243 xorq %r14, %r14
24244 xorq %r15, %r15
24245
24246+ pax_force_retaddr 0, 1
24247 ret
24248
24249 1:
24250diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
24251index 56f7fcf..3b88ad1 100644
24252--- a/arch/x86/kernel/setup.c
24253+++ b/arch/x86/kernel/setup.c
24254@@ -110,6 +110,7 @@
24255 #include <asm/mce.h>
24256 #include <asm/alternative.h>
24257 #include <asm/prom.h>
24258+#include <asm/boot.h>
24259
24260 /*
24261 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
24262@@ -205,10 +206,12 @@ EXPORT_SYMBOL(boot_cpu_data);
24263 #endif
24264
24265
24266-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
24267-unsigned long mmu_cr4_features;
24268+#ifdef CONFIG_X86_64
24269+unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
24270+#elif defined(CONFIG_X86_PAE)
24271+unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
24272 #else
24273-unsigned long mmu_cr4_features = X86_CR4_PAE;
24274+unsigned long mmu_cr4_features __read_only;
24275 #endif
24276
24277 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
24278@@ -444,7 +447,7 @@ static void __init parse_setup_data(void)
24279
24280 switch (data->type) {
24281 case SETUP_E820_EXT:
24282- parse_e820_ext(data);
24283+ parse_e820_ext((struct setup_data __force_kernel *)data);
24284 break;
24285 case SETUP_DTB:
24286 add_dtb(pa_data);
24287@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
24288 * area (640->1Mb) as ram even though it is not.
24289 * take them out.
24290 */
24291- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
24292+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
24293
24294 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
24295 }
24296@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
24297 /* called before trim_bios_range() to spare extra sanitize */
24298 static void __init e820_add_kernel_range(void)
24299 {
24300- u64 start = __pa_symbol(_text);
24301+ u64 start = __pa_symbol(ktla_ktva(_text));
24302 u64 size = __pa_symbol(_end) - start;
24303
24304 /*
24305@@ -841,8 +844,12 @@ static void __init trim_low_memory_range(void)
24306
24307 void __init setup_arch(char **cmdline_p)
24308 {
24309+#ifdef CONFIG_X86_32
24310+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
24311+#else
24312 memblock_reserve(__pa_symbol(_text),
24313 (unsigned long)__bss_stop - (unsigned long)_text);
24314+#endif
24315
24316 early_reserve_initrd();
24317
24318@@ -934,14 +941,14 @@ void __init setup_arch(char **cmdline_p)
24319
24320 if (!boot_params.hdr.root_flags)
24321 root_mountflags &= ~MS_RDONLY;
24322- init_mm.start_code = (unsigned long) _text;
24323- init_mm.end_code = (unsigned long) _etext;
24324+ init_mm.start_code = ktla_ktva((unsigned long) _text);
24325+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
24326 init_mm.end_data = (unsigned long) _edata;
24327 init_mm.brk = _brk_end;
24328
24329- code_resource.start = __pa_symbol(_text);
24330- code_resource.end = __pa_symbol(_etext)-1;
24331- data_resource.start = __pa_symbol(_etext);
24332+ code_resource.start = __pa_symbol(ktla_ktva(_text));
24333+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
24334+ data_resource.start = __pa_symbol(_sdata);
24335 data_resource.end = __pa_symbol(_edata)-1;
24336 bss_resource.start = __pa_symbol(__bss_start);
24337 bss_resource.end = __pa_symbol(__bss_stop)-1;
24338diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
24339index 5cdff03..80fa283 100644
24340--- a/arch/x86/kernel/setup_percpu.c
24341+++ b/arch/x86/kernel/setup_percpu.c
24342@@ -21,19 +21,17 @@
24343 #include <asm/cpu.h>
24344 #include <asm/stackprotector.h>
24345
24346-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
24347+#ifdef CONFIG_SMP
24348+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
24349 EXPORT_PER_CPU_SYMBOL(cpu_number);
24350+#endif
24351
24352-#ifdef CONFIG_X86_64
24353 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
24354-#else
24355-#define BOOT_PERCPU_OFFSET 0
24356-#endif
24357
24358 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
24359 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
24360
24361-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
24362+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
24363 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
24364 };
24365 EXPORT_SYMBOL(__per_cpu_offset);
24366@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
24367 {
24368 #ifdef CONFIG_NEED_MULTIPLE_NODES
24369 pg_data_t *last = NULL;
24370- unsigned int cpu;
24371+ int cpu;
24372
24373 for_each_possible_cpu(cpu) {
24374 int node = early_cpu_to_node(cpu);
24375@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
24376 {
24377 #ifdef CONFIG_X86_32
24378 struct desc_struct gdt;
24379+ unsigned long base = per_cpu_offset(cpu);
24380
24381- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
24382- 0x2 | DESCTYPE_S, 0x8);
24383- gdt.s = 1;
24384+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
24385+ 0x83 | DESCTYPE_S, 0xC);
24386 write_gdt_entry(get_cpu_gdt_table(cpu),
24387 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
24388 #endif
24389@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
24390 /* alrighty, percpu areas up and running */
24391 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
24392 for_each_possible_cpu(cpu) {
24393+#ifdef CONFIG_CC_STACKPROTECTOR
24394+#ifdef CONFIG_X86_32
24395+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
24396+#endif
24397+#endif
24398 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
24399 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
24400 per_cpu(cpu_number, cpu) = cpu;
24401@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
24402 */
24403 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
24404 #endif
24405+#ifdef CONFIG_CC_STACKPROTECTOR
24406+#ifdef CONFIG_X86_32
24407+ if (!cpu)
24408+ per_cpu(stack_canary.canary, cpu) = canary;
24409+#endif
24410+#endif
24411 /*
24412 * Up to this point, the boot CPU has been using .init.data
24413 * area. Reload any changed state for the boot CPU.
24414diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
24415index 6956299..18126ec4 100644
24416--- a/arch/x86/kernel/signal.c
24417+++ b/arch/x86/kernel/signal.c
24418@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
24419 * Align the stack pointer according to the i386 ABI,
24420 * i.e. so that on function entry ((sp + 4) & 15) == 0.
24421 */
24422- sp = ((sp + 4) & -16ul) - 4;
24423+ sp = ((sp - 12) & -16ul) - 4;
24424 #else /* !CONFIG_X86_32 */
24425 sp = round_down(sp, 16) - 8;
24426 #endif
24427@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
24428 }
24429
24430 if (current->mm->context.vdso)
24431- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
24432+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
24433 else
24434- restorer = &frame->retcode;
24435+ restorer = (void __user *)&frame->retcode;
24436 if (ksig->ka.sa.sa_flags & SA_RESTORER)
24437 restorer = ksig->ka.sa.sa_restorer;
24438
24439@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
24440 * reasons and because gdb uses it as a signature to notice
24441 * signal handler stack frames.
24442 */
24443- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
24444+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
24445
24446 if (err)
24447 return -EFAULT;
24448@@ -364,10 +364,13 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
24449 else
24450 put_user_ex(0, &frame->uc.uc_flags);
24451 put_user_ex(0, &frame->uc.uc_link);
24452- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
24453+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
24454
24455 /* Set up to return from userspace. */
24456- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
24457+ if (current->mm->context.vdso)
24458+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
24459+ else
24460+ restorer = (void __user *)&frame->retcode;
24461 if (ksig->ka.sa.sa_flags & SA_RESTORER)
24462 restorer = ksig->ka.sa.sa_restorer;
24463 put_user_ex(restorer, &frame->pretcode);
24464@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
24465 * reasons and because gdb uses it as a signature to notice
24466 * signal handler stack frames.
24467 */
24468- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
24469+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
24470 } put_user_catch(err);
24471
24472 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
24473@@ -429,7 +432,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
24474 else
24475 put_user_ex(0, &frame->uc.uc_flags);
24476 put_user_ex(0, &frame->uc.uc_link);
24477- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
24478+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
24479
24480 /* Set up to return from userspace. If provided, use a stub
24481 already in userspace. */
24482@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
24483 {
24484 int usig = signr_convert(ksig->sig);
24485 sigset_t *set = sigmask_to_save();
24486- compat_sigset_t *cset = (compat_sigset_t *) set;
24487+ sigset_t sigcopy;
24488+ compat_sigset_t *cset;
24489+
24490+ sigcopy = *set;
24491+
24492+ cset = (compat_sigset_t *) &sigcopy;
24493
24494 /* Set up the stack frame */
24495 if (is_ia32_frame()) {
24496@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
24497 } else if (is_x32_frame()) {
24498 return x32_setup_rt_frame(ksig, cset, regs);
24499 } else {
24500- return __setup_rt_frame(ksig->sig, ksig, set, regs);
24501+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
24502 }
24503 }
24504
24505diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
24506index 48d2b7d..90d328a 100644
24507--- a/arch/x86/kernel/smp.c
24508+++ b/arch/x86/kernel/smp.c
24509@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
24510
24511 __setup("nonmi_ipi", nonmi_ipi_setup);
24512
24513-struct smp_ops smp_ops = {
24514+struct smp_ops smp_ops __read_only = {
24515 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
24516 .smp_prepare_cpus = native_smp_prepare_cpus,
24517 .smp_cpus_done = native_smp_cpus_done,
24518diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
24519index bfd348e..f0c1bf2 100644
24520--- a/arch/x86/kernel/smpboot.c
24521+++ b/arch/x86/kernel/smpboot.c
24522@@ -251,14 +251,18 @@ notrace static void __cpuinit start_secondary(void *unused)
24523
24524 enable_start_cpu0 = 0;
24525
24526-#ifdef CONFIG_X86_32
24527- /* switch away from the initial page table */
24528- load_cr3(swapper_pg_dir);
24529- __flush_tlb_all();
24530-#endif
24531-
24532 /* otherwise gcc will move up smp_processor_id before the cpu_init */
24533 barrier();
24534+
24535+ /* switch away from the initial page table */
24536+#ifdef CONFIG_PAX_PER_CPU_PGD
24537+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
24538+ __flush_tlb_all();
24539+#elif defined(CONFIG_X86_32)
24540+ load_cr3(swapper_pg_dir);
24541+ __flush_tlb_all();
24542+#endif
24543+
24544 /*
24545 * Check TSC synchronization with the BP:
24546 */
24547@@ -748,6 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
24548 idle->thread.sp = (unsigned long) (((struct pt_regs *)
24549 (THREAD_SIZE + task_stack_page(idle))) - 1);
24550 per_cpu(current_task, cpu) = idle;
24551+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24552
24553 #ifdef CONFIG_X86_32
24554 /* Stack for startup_32 can be just as for start_secondary onwards */
24555@@ -755,11 +760,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
24556 #else
24557 clear_tsk_thread_flag(idle, TIF_FORK);
24558 initial_gs = per_cpu_offset(cpu);
24559- per_cpu(kernel_stack, cpu) =
24560- (unsigned long)task_stack_page(idle) -
24561- KERNEL_STACK_OFFSET + THREAD_SIZE;
24562+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24563 #endif
24564+
24565+ pax_open_kernel();
24566 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
24567+ pax_close_kernel();
24568+
24569 initial_code = (unsigned long)start_secondary;
24570 stack_start = idle->thread.sp;
24571
24572@@ -908,6 +915,18 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
24573 /* the FPU context is blank, nobody can own it */
24574 __cpu_disable_lazy_restore(cpu);
24575
24576+#ifdef CONFIG_PAX_PER_CPU_PGD
24577+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
24578+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24579+ KERNEL_PGD_PTRS);
24580+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
24581+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24582+ KERNEL_PGD_PTRS);
24583+#endif
24584+
24585+ /* the FPU context is blank, nobody can own it */
24586+ __cpu_disable_lazy_restore(cpu);
24587+
24588 err = do_boot_cpu(apicid, cpu, tidle);
24589 if (err) {
24590 pr_debug("do_boot_cpu failed %d\n", err);
24591diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
24592index 9b4d51d..5d28b58 100644
24593--- a/arch/x86/kernel/step.c
24594+++ b/arch/x86/kernel/step.c
24595@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
24596 struct desc_struct *desc;
24597 unsigned long base;
24598
24599- seg &= ~7UL;
24600+ seg >>= 3;
24601
24602 mutex_lock(&child->mm->context.lock);
24603- if (unlikely((seg >> 3) >= child->mm->context.size))
24604+ if (unlikely(seg >= child->mm->context.size))
24605 addr = -1L; /* bogus selector, access would fault */
24606 else {
24607 desc = child->mm->context.ldt + seg;
24608@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
24609 addr += base;
24610 }
24611 mutex_unlock(&child->mm->context.lock);
24612- }
24613+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
24614+ addr = ktla_ktva(addr);
24615
24616 return addr;
24617 }
24618@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
24619 unsigned char opcode[15];
24620 unsigned long addr = convert_ip_to_linear(child, regs);
24621
24622+ if (addr == -EINVAL)
24623+ return 0;
24624+
24625 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
24626 for (i = 0; i < copied; i++) {
24627 switch (opcode[i]) {
24628diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
24629new file mode 100644
24630index 0000000..5877189
24631--- /dev/null
24632+++ b/arch/x86/kernel/sys_i386_32.c
24633@@ -0,0 +1,189 @@
24634+/*
24635+ * This file contains various random system calls that
24636+ * have a non-standard calling sequence on the Linux/i386
24637+ * platform.
24638+ */
24639+
24640+#include <linux/errno.h>
24641+#include <linux/sched.h>
24642+#include <linux/mm.h>
24643+#include <linux/fs.h>
24644+#include <linux/smp.h>
24645+#include <linux/sem.h>
24646+#include <linux/msg.h>
24647+#include <linux/shm.h>
24648+#include <linux/stat.h>
24649+#include <linux/syscalls.h>
24650+#include <linux/mman.h>
24651+#include <linux/file.h>
24652+#include <linux/utsname.h>
24653+#include <linux/ipc.h>
24654+#include <linux/elf.h>
24655+
24656+#include <linux/uaccess.h>
24657+#include <linux/unistd.h>
24658+
24659+#include <asm/syscalls.h>
24660+
24661+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
24662+{
24663+ unsigned long pax_task_size = TASK_SIZE;
24664+
24665+#ifdef CONFIG_PAX_SEGMEXEC
24666+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
24667+ pax_task_size = SEGMEXEC_TASK_SIZE;
24668+#endif
24669+
24670+ if (flags & MAP_FIXED)
24671+ if (len > pax_task_size || addr > pax_task_size - len)
24672+ return -EINVAL;
24673+
24674+ return 0;
24675+}
24676+
24677+/*
24678+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
24679+ */
24680+static unsigned long get_align_mask(void)
24681+{
24682+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
24683+ return 0;
24684+
24685+ if (!(current->flags & PF_RANDOMIZE))
24686+ return 0;
24687+
24688+ return va_align.mask;
24689+}
24690+
24691+unsigned long
24692+arch_get_unmapped_area(struct file *filp, unsigned long addr,
24693+ unsigned long len, unsigned long pgoff, unsigned long flags)
24694+{
24695+ struct mm_struct *mm = current->mm;
24696+ struct vm_area_struct *vma;
24697+ unsigned long pax_task_size = TASK_SIZE;
24698+ struct vm_unmapped_area_info info;
24699+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
24700+
24701+#ifdef CONFIG_PAX_SEGMEXEC
24702+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24703+ pax_task_size = SEGMEXEC_TASK_SIZE;
24704+#endif
24705+
24706+ pax_task_size -= PAGE_SIZE;
24707+
24708+ if (len > pax_task_size)
24709+ return -ENOMEM;
24710+
24711+ if (flags & MAP_FIXED)
24712+ return addr;
24713+
24714+#ifdef CONFIG_PAX_RANDMMAP
24715+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24716+#endif
24717+
24718+ if (addr) {
24719+ addr = PAGE_ALIGN(addr);
24720+ if (pax_task_size - len >= addr) {
24721+ vma = find_vma(mm, addr);
24722+ if (check_heap_stack_gap(vma, addr, len, offset))
24723+ return addr;
24724+ }
24725+ }
24726+
24727+ info.flags = 0;
24728+ info.length = len;
24729+ info.align_mask = filp ? get_align_mask() : 0;
24730+ info.align_offset = pgoff << PAGE_SHIFT;
24731+ info.threadstack_offset = offset;
24732+
24733+#ifdef CONFIG_PAX_PAGEEXEC
24734+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
24735+ info.low_limit = 0x00110000UL;
24736+ info.high_limit = mm->start_code;
24737+
24738+#ifdef CONFIG_PAX_RANDMMAP
24739+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24740+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
24741+#endif
24742+
24743+ if (info.low_limit < info.high_limit) {
24744+ addr = vm_unmapped_area(&info);
24745+ if (!IS_ERR_VALUE(addr))
24746+ return addr;
24747+ }
24748+ } else
24749+#endif
24750+
24751+ info.low_limit = mm->mmap_base;
24752+ info.high_limit = pax_task_size;
24753+
24754+ return vm_unmapped_area(&info);
24755+}
24756+
24757+unsigned long
24758+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24759+ const unsigned long len, const unsigned long pgoff,
24760+ const unsigned long flags)
24761+{
24762+ struct vm_area_struct *vma;
24763+ struct mm_struct *mm = current->mm;
24764+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
24765+ struct vm_unmapped_area_info info;
24766+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
24767+
24768+#ifdef CONFIG_PAX_SEGMEXEC
24769+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24770+ pax_task_size = SEGMEXEC_TASK_SIZE;
24771+#endif
24772+
24773+ pax_task_size -= PAGE_SIZE;
24774+
24775+ /* requested length too big for entire address space */
24776+ if (len > pax_task_size)
24777+ return -ENOMEM;
24778+
24779+ if (flags & MAP_FIXED)
24780+ return addr;
24781+
24782+#ifdef CONFIG_PAX_PAGEEXEC
24783+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
24784+ goto bottomup;
24785+#endif
24786+
24787+#ifdef CONFIG_PAX_RANDMMAP
24788+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24789+#endif
24790+
24791+ /* requesting a specific address */
24792+ if (addr) {
24793+ addr = PAGE_ALIGN(addr);
24794+ if (pax_task_size - len >= addr) {
24795+ vma = find_vma(mm, addr);
24796+ if (check_heap_stack_gap(vma, addr, len, offset))
24797+ return addr;
24798+ }
24799+ }
24800+
24801+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
24802+ info.length = len;
24803+ info.low_limit = PAGE_SIZE;
24804+ info.high_limit = mm->mmap_base;
24805+ info.align_mask = filp ? get_align_mask() : 0;
24806+ info.align_offset = pgoff << PAGE_SHIFT;
24807+ info.threadstack_offset = offset;
24808+
24809+ addr = vm_unmapped_area(&info);
24810+ if (!(addr & ~PAGE_MASK))
24811+ return addr;
24812+ VM_BUG_ON(addr != -ENOMEM);
24813+
24814+bottomup:
24815+ /*
24816+ * A failed mmap() very likely causes application failure,
24817+ * so fall back to the bottom-up function here. This scenario
24818+ * can happen with large stack limits and large mmap()
24819+ * allocations.
24820+ */
24821+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
24822+}
24823diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
24824index dbded5a..ace2781 100644
24825--- a/arch/x86/kernel/sys_x86_64.c
24826+++ b/arch/x86/kernel/sys_x86_64.c
24827@@ -81,8 +81,8 @@ out:
24828 return error;
24829 }
24830
24831-static void find_start_end(unsigned long flags, unsigned long *begin,
24832- unsigned long *end)
24833+static void find_start_end(struct mm_struct *mm, unsigned long flags,
24834+ unsigned long *begin, unsigned long *end)
24835 {
24836 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
24837 unsigned long new_begin;
24838@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
24839 *begin = new_begin;
24840 }
24841 } else {
24842- *begin = TASK_UNMAPPED_BASE;
24843+ *begin = mm->mmap_base;
24844 *end = TASK_SIZE;
24845 }
24846 }
24847@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
24848 struct vm_area_struct *vma;
24849 struct vm_unmapped_area_info info;
24850 unsigned long begin, end;
24851+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
24852
24853 if (flags & MAP_FIXED)
24854 return addr;
24855
24856- find_start_end(flags, &begin, &end);
24857+ find_start_end(mm, flags, &begin, &end);
24858
24859 if (len > end)
24860 return -ENOMEM;
24861
24862+#ifdef CONFIG_PAX_RANDMMAP
24863+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24864+#endif
24865+
24866 if (addr) {
24867 addr = PAGE_ALIGN(addr);
24868 vma = find_vma(mm, addr);
24869- if (end - len >= addr &&
24870- (!vma || addr + len <= vma->vm_start))
24871+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
24872 return addr;
24873 }
24874
24875@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
24876 info.high_limit = end;
24877 info.align_mask = filp ? get_align_mask() : 0;
24878 info.align_offset = pgoff << PAGE_SHIFT;
24879+ info.threadstack_offset = offset;
24880 return vm_unmapped_area(&info);
24881 }
24882
24883@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24884 struct mm_struct *mm = current->mm;
24885 unsigned long addr = addr0;
24886 struct vm_unmapped_area_info info;
24887+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
24888
24889 /* requested length too big for entire address space */
24890 if (len > TASK_SIZE)
24891@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24892 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
24893 goto bottomup;
24894
24895+#ifdef CONFIG_PAX_RANDMMAP
24896+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24897+#endif
24898+
24899 /* requesting a specific address */
24900 if (addr) {
24901 addr = PAGE_ALIGN(addr);
24902 vma = find_vma(mm, addr);
24903- if (TASK_SIZE - len >= addr &&
24904- (!vma || addr + len <= vma->vm_start))
24905+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
24906 return addr;
24907 }
24908
24909@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
24910 info.high_limit = mm->mmap_base;
24911 info.align_mask = filp ? get_align_mask() : 0;
24912 info.align_offset = pgoff << PAGE_SHIFT;
24913+ info.threadstack_offset = offset;
24914 addr = vm_unmapped_area(&info);
24915 if (!(addr & ~PAGE_MASK))
24916 return addr;
24917diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
24918index f84fe00..f41d9f1 100644
24919--- a/arch/x86/kernel/tboot.c
24920+++ b/arch/x86/kernel/tboot.c
24921@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
24922
24923 void tboot_shutdown(u32 shutdown_type)
24924 {
24925- void (*shutdown)(void);
24926+ void (* __noreturn shutdown)(void);
24927
24928 if (!tboot_enabled())
24929 return;
24930@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
24931
24932 switch_to_tboot_pt();
24933
24934- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
24935+ shutdown = (void *)tboot->shutdown_entry;
24936 shutdown();
24937
24938 /* should not reach here */
24939@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
24940 return 0;
24941 }
24942
24943-static atomic_t ap_wfs_count;
24944+static atomic_unchecked_t ap_wfs_count;
24945
24946 static int tboot_wait_for_aps(int num_aps)
24947 {
24948@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
24949 {
24950 switch (action) {
24951 case CPU_DYING:
24952- atomic_inc(&ap_wfs_count);
24953+ atomic_inc_unchecked(&ap_wfs_count);
24954 if (num_online_cpus() == 1)
24955- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
24956+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
24957 return NOTIFY_BAD;
24958 break;
24959 }
24960 return NOTIFY_OK;
24961 }
24962
24963-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
24964+static struct notifier_block tboot_cpu_notifier =
24965 {
24966 .notifier_call = tboot_cpu_callback,
24967 };
24968@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
24969
24970 tboot_create_trampoline();
24971
24972- atomic_set(&ap_wfs_count, 0);
24973+ atomic_set_unchecked(&ap_wfs_count, 0);
24974 register_hotcpu_notifier(&tboot_cpu_notifier);
24975
24976 acpi_os_set_prepare_sleep(&tboot_sleep);
24977diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
24978index 24d3c91..d06b473 100644
24979--- a/arch/x86/kernel/time.c
24980+++ b/arch/x86/kernel/time.c
24981@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
24982 {
24983 unsigned long pc = instruction_pointer(regs);
24984
24985- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
24986+ if (!user_mode(regs) && in_lock_functions(pc)) {
24987 #ifdef CONFIG_FRAME_POINTER
24988- return *(unsigned long *)(regs->bp + sizeof(long));
24989+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
24990 #else
24991 unsigned long *sp =
24992 (unsigned long *)kernel_stack_pointer(regs);
24993@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
24994 * or above a saved flags. Eflags has bits 22-31 zero,
24995 * kernel addresses don't.
24996 */
24997+
24998+#ifdef CONFIG_PAX_KERNEXEC
24999+ return ktla_ktva(sp[0]);
25000+#else
25001 if (sp[0] >> 22)
25002 return sp[0];
25003 if (sp[1] >> 22)
25004 return sp[1];
25005 #endif
25006+
25007+#endif
25008 }
25009 return pc;
25010 }
25011diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
25012index f7fec09..9991981 100644
25013--- a/arch/x86/kernel/tls.c
25014+++ b/arch/x86/kernel/tls.c
25015@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
25016 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
25017 return -EINVAL;
25018
25019+#ifdef CONFIG_PAX_SEGMEXEC
25020+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
25021+ return -EINVAL;
25022+#endif
25023+
25024 set_tls_desc(p, idx, &info, 1);
25025
25026 return 0;
25027@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
25028
25029 if (kbuf)
25030 info = kbuf;
25031- else if (__copy_from_user(infobuf, ubuf, count))
25032+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
25033 return -EFAULT;
25034 else
25035 info = infobuf;
25036diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
25037index 772e2a8..bad5bf6 100644
25038--- a/arch/x86/kernel/traps.c
25039+++ b/arch/x86/kernel/traps.c
25040@@ -68,12 +68,6 @@
25041 #include <asm/setup.h>
25042
25043 asmlinkage int system_call(void);
25044-
25045-/*
25046- * The IDT has to be page-aligned to simplify the Pentium
25047- * F0 0F bug workaround.
25048- */
25049-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
25050 #endif
25051
25052 DECLARE_BITMAP(used_vectors, NR_VECTORS);
25053@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
25054 }
25055
25056 static int __kprobes
25057-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25058+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
25059 struct pt_regs *regs, long error_code)
25060 {
25061 #ifdef CONFIG_X86_32
25062- if (regs->flags & X86_VM_MASK) {
25063+ if (v8086_mode(regs)) {
25064 /*
25065 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
25066 * On nmi (interrupt 2), do_trap should not be called.
25067@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25068 return -1;
25069 }
25070 #endif
25071- if (!user_mode(regs)) {
25072+ if (!user_mode_novm(regs)) {
25073 if (!fixup_exception(regs)) {
25074 tsk->thread.error_code = error_code;
25075 tsk->thread.trap_nr = trapnr;
25076+
25077+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25078+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
25079+ str = "PAX: suspicious stack segment fault";
25080+#endif
25081+
25082 die(str, regs, error_code);
25083 }
25084+
25085+#ifdef CONFIG_PAX_REFCOUNT
25086+ if (trapnr == 4)
25087+ pax_report_refcount_overflow(regs);
25088+#endif
25089+
25090 return 0;
25091 }
25092
25093@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
25094 }
25095
25096 static void __kprobes
25097-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25098+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
25099 long error_code, siginfo_t *info)
25100 {
25101 struct task_struct *tsk = current;
25102@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
25103 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
25104 printk_ratelimit()) {
25105 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
25106- tsk->comm, tsk->pid, str,
25107+ tsk->comm, task_pid_nr(tsk), str,
25108 regs->ip, regs->sp, error_code);
25109 print_vma_addr(" in ", regs->ip);
25110 pr_cont("\n");
25111@@ -273,7 +279,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
25112 conditional_sti(regs);
25113
25114 #ifdef CONFIG_X86_32
25115- if (regs->flags & X86_VM_MASK) {
25116+ if (v8086_mode(regs)) {
25117 local_irq_enable();
25118 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
25119 goto exit;
25120@@ -281,18 +287,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
25121 #endif
25122
25123 tsk = current;
25124- if (!user_mode(regs)) {
25125+ if (!user_mode_novm(regs)) {
25126 if (fixup_exception(regs))
25127 goto exit;
25128
25129 tsk->thread.error_code = error_code;
25130 tsk->thread.trap_nr = X86_TRAP_GP;
25131 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
25132- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
25133+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
25134+
25135+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25136+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
25137+ die("PAX: suspicious general protection fault", regs, error_code);
25138+ else
25139+#endif
25140+
25141 die("general protection fault", regs, error_code);
25142+ }
25143 goto exit;
25144 }
25145
25146+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25147+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
25148+ struct mm_struct *mm = tsk->mm;
25149+ unsigned long limit;
25150+
25151+ down_write(&mm->mmap_sem);
25152+ limit = mm->context.user_cs_limit;
25153+ if (limit < TASK_SIZE) {
25154+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
25155+ up_write(&mm->mmap_sem);
25156+ return;
25157+ }
25158+ up_write(&mm->mmap_sem);
25159+ }
25160+#endif
25161+
25162 tsk->thread.error_code = error_code;
25163 tsk->thread.trap_nr = X86_TRAP_GP;
25164
25165@@ -450,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25166 /* It's safe to allow irq's after DR6 has been saved */
25167 preempt_conditional_sti(regs);
25168
25169- if (regs->flags & X86_VM_MASK) {
25170+ if (v8086_mode(regs)) {
25171 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
25172 X86_TRAP_DB);
25173 preempt_conditional_cli(regs);
25174@@ -465,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
25175 * We already checked v86 mode above, so we can check for kernel mode
25176 * by just checking the CPL of CS.
25177 */
25178- if ((dr6 & DR_STEP) && !user_mode(regs)) {
25179+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
25180 tsk->thread.debugreg6 &= ~DR_STEP;
25181 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
25182 regs->flags &= ~X86_EFLAGS_TF;
25183@@ -497,7 +527,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
25184 return;
25185 conditional_sti(regs);
25186
25187- if (!user_mode_vm(regs))
25188+ if (!user_mode(regs))
25189 {
25190 if (!fixup_exception(regs)) {
25191 task->thread.error_code = error_code;
25192diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
25193index 2ed8459..7cf329f 100644
25194--- a/arch/x86/kernel/uprobes.c
25195+++ b/arch/x86/kernel/uprobes.c
25196@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
25197 int ret = NOTIFY_DONE;
25198
25199 /* We are only interested in userspace traps */
25200- if (regs && !user_mode_vm(regs))
25201+ if (regs && !user_mode(regs))
25202 return NOTIFY_DONE;
25203
25204 switch (val) {
25205@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
25206
25207 if (ncopied != rasize) {
25208 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
25209- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
25210+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
25211
25212 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
25213 }
25214diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
25215index b9242ba..50c5edd 100644
25216--- a/arch/x86/kernel/verify_cpu.S
25217+++ b/arch/x86/kernel/verify_cpu.S
25218@@ -20,6 +20,7 @@
25219 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
25220 * arch/x86/kernel/trampoline_64.S: secondary processor verification
25221 * arch/x86/kernel/head_32.S: processor startup
25222+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
25223 *
25224 * verify_cpu, returns the status of longmode and SSE in register %eax.
25225 * 0: Success 1: Failure
25226diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
25227index e8edcf5..27f9344 100644
25228--- a/arch/x86/kernel/vm86_32.c
25229+++ b/arch/x86/kernel/vm86_32.c
25230@@ -44,6 +44,7 @@
25231 #include <linux/ptrace.h>
25232 #include <linux/audit.h>
25233 #include <linux/stddef.h>
25234+#include <linux/grsecurity.h>
25235
25236 #include <asm/uaccess.h>
25237 #include <asm/io.h>
25238@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
25239 do_exit(SIGSEGV);
25240 }
25241
25242- tss = &per_cpu(init_tss, get_cpu());
25243+ tss = init_tss + get_cpu();
25244 current->thread.sp0 = current->thread.saved_sp0;
25245 current->thread.sysenter_cs = __KERNEL_CS;
25246 load_sp0(tss, &current->thread);
25247@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
25248
25249 if (tsk->thread.saved_sp0)
25250 return -EPERM;
25251+
25252+#ifdef CONFIG_GRKERNSEC_VM86
25253+ if (!capable(CAP_SYS_RAWIO)) {
25254+ gr_handle_vm86();
25255+ return -EPERM;
25256+ }
25257+#endif
25258+
25259 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
25260 offsetof(struct kernel_vm86_struct, vm86plus) -
25261 sizeof(info.regs));
25262@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
25263 int tmp;
25264 struct vm86plus_struct __user *v86;
25265
25266+#ifdef CONFIG_GRKERNSEC_VM86
25267+ if (!capable(CAP_SYS_RAWIO)) {
25268+ gr_handle_vm86();
25269+ return -EPERM;
25270+ }
25271+#endif
25272+
25273 tsk = current;
25274 switch (cmd) {
25275 case VM86_REQUEST_IRQ:
25276@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
25277 tsk->thread.saved_fs = info->regs32->fs;
25278 tsk->thread.saved_gs = get_user_gs(info->regs32);
25279
25280- tss = &per_cpu(init_tss, get_cpu());
25281+ tss = init_tss + get_cpu();
25282 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
25283 if (cpu_has_sep)
25284 tsk->thread.sysenter_cs = 0;
25285@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
25286 goto cannot_handle;
25287 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
25288 goto cannot_handle;
25289- intr_ptr = (unsigned long __user *) (i << 2);
25290+ intr_ptr = (__force unsigned long __user *) (i << 2);
25291 if (get_user(segoffs, intr_ptr))
25292 goto cannot_handle;
25293 if ((segoffs >> 16) == BIOSSEG)
25294diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
25295index 10c4f30..57377c2 100644
25296--- a/arch/x86/kernel/vmlinux.lds.S
25297+++ b/arch/x86/kernel/vmlinux.lds.S
25298@@ -26,6 +26,13 @@
25299 #include <asm/page_types.h>
25300 #include <asm/cache.h>
25301 #include <asm/boot.h>
25302+#include <asm/segment.h>
25303+
25304+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25305+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
25306+#else
25307+#define __KERNEL_TEXT_OFFSET 0
25308+#endif
25309
25310 #undef i386 /* in case the preprocessor is a 32bit one */
25311
25312@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
25313
25314 PHDRS {
25315 text PT_LOAD FLAGS(5); /* R_E */
25316+#ifdef CONFIG_X86_32
25317+ module PT_LOAD FLAGS(5); /* R_E */
25318+#endif
25319+#ifdef CONFIG_XEN
25320+ rodata PT_LOAD FLAGS(5); /* R_E */
25321+#else
25322+ rodata PT_LOAD FLAGS(4); /* R__ */
25323+#endif
25324 data PT_LOAD FLAGS(6); /* RW_ */
25325-#ifdef CONFIG_X86_64
25326+ init.begin PT_LOAD FLAGS(6); /* RW_ */
25327 #ifdef CONFIG_SMP
25328 percpu PT_LOAD FLAGS(6); /* RW_ */
25329 #endif
25330+ text.init PT_LOAD FLAGS(5); /* R_E */
25331+ text.exit PT_LOAD FLAGS(5); /* R_E */
25332 init PT_LOAD FLAGS(7); /* RWE */
25333-#endif
25334 note PT_NOTE FLAGS(0); /* ___ */
25335 }
25336
25337 SECTIONS
25338 {
25339 #ifdef CONFIG_X86_32
25340- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
25341- phys_startup_32 = startup_32 - LOAD_OFFSET;
25342+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
25343 #else
25344- . = __START_KERNEL;
25345- phys_startup_64 = startup_64 - LOAD_OFFSET;
25346+ . = __START_KERNEL;
25347 #endif
25348
25349 /* Text and read-only data */
25350- .text : AT(ADDR(.text) - LOAD_OFFSET) {
25351- _text = .;
25352+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
25353 /* bootstrapping code */
25354+#ifdef CONFIG_X86_32
25355+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
25356+#else
25357+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
25358+#endif
25359+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
25360+ _text = .;
25361 HEAD_TEXT
25362 . = ALIGN(8);
25363 _stext = .;
25364@@ -104,13 +124,48 @@ SECTIONS
25365 IRQENTRY_TEXT
25366 *(.fixup)
25367 *(.gnu.warning)
25368- /* End of text section */
25369- _etext = .;
25370 } :text = 0x9090
25371
25372- NOTES :text :note
25373+ . += __KERNEL_TEXT_OFFSET;
25374
25375- EXCEPTION_TABLE(16) :text = 0x9090
25376+#ifdef CONFIG_X86_32
25377+ . = ALIGN(PAGE_SIZE);
25378+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
25379+
25380+#ifdef CONFIG_PAX_KERNEXEC
25381+ MODULES_EXEC_VADDR = .;
25382+ BYTE(0)
25383+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
25384+ . = ALIGN(HPAGE_SIZE) - 1;
25385+ MODULES_EXEC_END = .;
25386+#endif
25387+
25388+ } :module
25389+#endif
25390+
25391+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
25392+ /* End of text section */
25393+ BYTE(0)
25394+ _etext = . - __KERNEL_TEXT_OFFSET;
25395+ }
25396+
25397+#ifdef CONFIG_X86_32
25398+ . = ALIGN(PAGE_SIZE);
25399+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
25400+ *(.idt)
25401+ . = ALIGN(PAGE_SIZE);
25402+ *(.empty_zero_page)
25403+ *(.initial_pg_fixmap)
25404+ *(.initial_pg_pmd)
25405+ *(.initial_page_table)
25406+ *(.swapper_pg_dir)
25407+ } :rodata
25408+#endif
25409+
25410+ . = ALIGN(PAGE_SIZE);
25411+ NOTES :rodata :note
25412+
25413+ EXCEPTION_TABLE(16) :rodata
25414
25415 #if defined(CONFIG_DEBUG_RODATA)
25416 /* .text should occupy whole number of pages */
25417@@ -122,16 +177,20 @@ SECTIONS
25418
25419 /* Data */
25420 .data : AT(ADDR(.data) - LOAD_OFFSET) {
25421+
25422+#ifdef CONFIG_PAX_KERNEXEC
25423+ . = ALIGN(HPAGE_SIZE);
25424+#else
25425+ . = ALIGN(PAGE_SIZE);
25426+#endif
25427+
25428 /* Start of data section */
25429 _sdata = .;
25430
25431 /* init_task */
25432 INIT_TASK_DATA(THREAD_SIZE)
25433
25434-#ifdef CONFIG_X86_32
25435- /* 32 bit has nosave before _edata */
25436 NOSAVE_DATA
25437-#endif
25438
25439 PAGE_ALIGNED_DATA(PAGE_SIZE)
25440
25441@@ -172,12 +231,19 @@ SECTIONS
25442 #endif /* CONFIG_X86_64 */
25443
25444 /* Init code and data - will be freed after init */
25445- . = ALIGN(PAGE_SIZE);
25446 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
25447+ BYTE(0)
25448+
25449+#ifdef CONFIG_PAX_KERNEXEC
25450+ . = ALIGN(HPAGE_SIZE);
25451+#else
25452+ . = ALIGN(PAGE_SIZE);
25453+#endif
25454+
25455 __init_begin = .; /* paired with __init_end */
25456- }
25457+ } :init.begin
25458
25459-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
25460+#ifdef CONFIG_SMP
25461 /*
25462 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
25463 * output PHDR, so the next output section - .init.text - should
25464@@ -186,12 +252,27 @@ SECTIONS
25465 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
25466 #endif
25467
25468- INIT_TEXT_SECTION(PAGE_SIZE)
25469-#ifdef CONFIG_X86_64
25470- :init
25471-#endif
25472+ . = ALIGN(PAGE_SIZE);
25473+ init_begin = .;
25474+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
25475+ VMLINUX_SYMBOL(_sinittext) = .;
25476+ INIT_TEXT
25477+ VMLINUX_SYMBOL(_einittext) = .;
25478+ . = ALIGN(PAGE_SIZE);
25479+ } :text.init
25480
25481- INIT_DATA_SECTION(16)
25482+ /*
25483+ * .exit.text is discard at runtime, not link time, to deal with
25484+ * references from .altinstructions and .eh_frame
25485+ */
25486+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
25487+ EXIT_TEXT
25488+ . = ALIGN(16);
25489+ } :text.exit
25490+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
25491+
25492+ . = ALIGN(PAGE_SIZE);
25493+ INIT_DATA_SECTION(16) :init
25494
25495 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
25496 __x86_cpu_dev_start = .;
25497@@ -253,19 +334,12 @@ SECTIONS
25498 }
25499
25500 . = ALIGN(8);
25501- /*
25502- * .exit.text is discard at runtime, not link time, to deal with
25503- * references from .altinstructions and .eh_frame
25504- */
25505- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
25506- EXIT_TEXT
25507- }
25508
25509 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
25510 EXIT_DATA
25511 }
25512
25513-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
25514+#ifndef CONFIG_SMP
25515 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
25516 #endif
25517
25518@@ -284,16 +358,10 @@ SECTIONS
25519 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
25520 __smp_locks = .;
25521 *(.smp_locks)
25522- . = ALIGN(PAGE_SIZE);
25523 __smp_locks_end = .;
25524+ . = ALIGN(PAGE_SIZE);
25525 }
25526
25527-#ifdef CONFIG_X86_64
25528- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
25529- NOSAVE_DATA
25530- }
25531-#endif
25532-
25533 /* BSS */
25534 . = ALIGN(PAGE_SIZE);
25535 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
25536@@ -309,6 +377,7 @@ SECTIONS
25537 __brk_base = .;
25538 . += 64 * 1024; /* 64k alignment slop space */
25539 *(.brk_reservation) /* areas brk users have reserved */
25540+ . = ALIGN(HPAGE_SIZE);
25541 __brk_limit = .;
25542 }
25543
25544@@ -335,13 +404,12 @@ SECTIONS
25545 * for the boot processor.
25546 */
25547 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
25548-INIT_PER_CPU(gdt_page);
25549 INIT_PER_CPU(irq_stack_union);
25550
25551 /*
25552 * Build-time check on the image size:
25553 */
25554-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
25555+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
25556 "kernel image bigger than KERNEL_IMAGE_SIZE");
25557
25558 #ifdef CONFIG_SMP
25559diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
25560index 9a907a6..f83f921 100644
25561--- a/arch/x86/kernel/vsyscall_64.c
25562+++ b/arch/x86/kernel/vsyscall_64.c
25563@@ -56,15 +56,13 @@
25564 DEFINE_VVAR(int, vgetcpu_mode);
25565 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
25566
25567-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
25568+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
25569
25570 static int __init vsyscall_setup(char *str)
25571 {
25572 if (str) {
25573 if (!strcmp("emulate", str))
25574 vsyscall_mode = EMULATE;
25575- else if (!strcmp("native", str))
25576- vsyscall_mode = NATIVE;
25577 else if (!strcmp("none", str))
25578 vsyscall_mode = NONE;
25579 else
25580@@ -323,8 +321,7 @@ do_ret:
25581 return true;
25582
25583 sigsegv:
25584- force_sig(SIGSEGV, current);
25585- return true;
25586+ do_group_exit(SIGKILL);
25587 }
25588
25589 /*
25590@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
25591 extern char __vvar_page;
25592 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
25593
25594- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
25595- vsyscall_mode == NATIVE
25596- ? PAGE_KERNEL_VSYSCALL
25597- : PAGE_KERNEL_VVAR);
25598+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
25599 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
25600 (unsigned long)VSYSCALL_START);
25601
25602diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
25603index b014d94..6d6ca7b 100644
25604--- a/arch/x86/kernel/x8664_ksyms_64.c
25605+++ b/arch/x86/kernel/x8664_ksyms_64.c
25606@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
25607 EXPORT_SYMBOL(copy_user_generic_unrolled);
25608 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
25609 EXPORT_SYMBOL(__copy_user_nocache);
25610-EXPORT_SYMBOL(_copy_from_user);
25611-EXPORT_SYMBOL(_copy_to_user);
25612
25613 EXPORT_SYMBOL(copy_page);
25614 EXPORT_SYMBOL(clear_page);
25615diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
25616index 45a14db..075bb9b 100644
25617--- a/arch/x86/kernel/x86_init.c
25618+++ b/arch/x86/kernel/x86_init.c
25619@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
25620 },
25621 };
25622
25623-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
25624+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
25625 .early_percpu_clock_init = x86_init_noop,
25626 .setup_percpu_clockev = setup_secondary_APIC_clock,
25627 };
25628@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
25629 static void default_nmi_init(void) { };
25630 static int default_i8042_detect(void) { return 1; };
25631
25632-struct x86_platform_ops x86_platform = {
25633+struct x86_platform_ops x86_platform __read_only = {
25634 .calibrate_tsc = native_calibrate_tsc,
25635 .get_wallclock = mach_get_cmos_time,
25636 .set_wallclock = mach_set_rtc_mmss,
25637@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
25638 };
25639
25640 EXPORT_SYMBOL_GPL(x86_platform);
25641-struct x86_msi_ops x86_msi = {
25642+struct x86_msi_ops x86_msi __read_only = {
25643 .setup_msi_irqs = native_setup_msi_irqs,
25644 .compose_msi_msg = native_compose_msi_msg,
25645 .teardown_msi_irq = native_teardown_msi_irq,
25646@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
25647 .setup_hpet_msi = default_setup_hpet_msi,
25648 };
25649
25650-struct x86_io_apic_ops x86_io_apic_ops = {
25651+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
25652 .init = native_io_apic_init_mappings,
25653 .read = native_io_apic_read,
25654 .write = native_io_apic_write,
25655diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
25656index ada87a3..afea76d 100644
25657--- a/arch/x86/kernel/xsave.c
25658+++ b/arch/x86/kernel/xsave.c
25659@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
25660 {
25661 int err;
25662
25663+ buf = (struct xsave_struct __user *)____m(buf);
25664 if (use_xsave())
25665 err = xsave_user(buf);
25666 else if (use_fxsr())
25667@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
25668 */
25669 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
25670 {
25671+ buf = (void __user *)____m(buf);
25672 if (use_xsave()) {
25673 if ((unsigned long)buf % 64 || fx_only) {
25674 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
25675diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
25676index a20ecb5..d0e2194 100644
25677--- a/arch/x86/kvm/cpuid.c
25678+++ b/arch/x86/kvm/cpuid.c
25679@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
25680 struct kvm_cpuid2 *cpuid,
25681 struct kvm_cpuid_entry2 __user *entries)
25682 {
25683- int r;
25684+ int r, i;
25685
25686 r = -E2BIG;
25687 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
25688 goto out;
25689 r = -EFAULT;
25690- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
25691- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
25692+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
25693 goto out;
25694+ for (i = 0; i < cpuid->nent; ++i) {
25695+ struct kvm_cpuid_entry2 cpuid_entry;
25696+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
25697+ goto out;
25698+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
25699+ }
25700 vcpu->arch.cpuid_nent = cpuid->nent;
25701 kvm_apic_set_version(vcpu);
25702 kvm_x86_ops->cpuid_update(vcpu);
25703@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
25704 struct kvm_cpuid2 *cpuid,
25705 struct kvm_cpuid_entry2 __user *entries)
25706 {
25707- int r;
25708+ int r, i;
25709
25710 r = -E2BIG;
25711 if (cpuid->nent < vcpu->arch.cpuid_nent)
25712 goto out;
25713 r = -EFAULT;
25714- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
25715- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
25716+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
25717 goto out;
25718+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
25719+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
25720+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
25721+ goto out;
25722+ }
25723 return 0;
25724
25725 out:
25726diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
25727index 5953dce..f11a7d2 100644
25728--- a/arch/x86/kvm/emulate.c
25729+++ b/arch/x86/kvm/emulate.c
25730@@ -329,6 +329,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
25731
25732 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
25733 do { \
25734+ unsigned long _tmp; \
25735 __asm__ __volatile__ ( \
25736 _PRE_EFLAGS("0", "4", "2") \
25737 _op _suffix " %"_x"3,%1; " \
25738@@ -343,8 +344,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
25739 /* Raw emulation: instruction has two explicit operands. */
25740 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
25741 do { \
25742- unsigned long _tmp; \
25743- \
25744 switch ((ctxt)->dst.bytes) { \
25745 case 2: \
25746 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
25747@@ -360,7 +359,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
25748
25749 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
25750 do { \
25751- unsigned long _tmp; \
25752 switch ((ctxt)->dst.bytes) { \
25753 case 1: \
25754 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
25755diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
25756index 0eee2c8..94a32c3 100644
25757--- a/arch/x86/kvm/lapic.c
25758+++ b/arch/x86/kvm/lapic.c
25759@@ -55,7 +55,7 @@
25760 #define APIC_BUS_CYCLE_NS 1
25761
25762 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
25763-#define apic_debug(fmt, arg...)
25764+#define apic_debug(fmt, arg...) do {} while (0)
25765
25766 #define APIC_LVT_NUM 6
25767 /* 14 is the version for Xeon and Pentium 8.4.8*/
25768diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
25769index da20860..d19fdf5 100644
25770--- a/arch/x86/kvm/paging_tmpl.h
25771+++ b/arch/x86/kvm/paging_tmpl.h
25772@@ -208,7 +208,7 @@ retry_walk:
25773 if (unlikely(kvm_is_error_hva(host_addr)))
25774 goto error;
25775
25776- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
25777+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
25778 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
25779 goto error;
25780 walker->ptep_user[walker->level - 1] = ptep_user;
25781diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
25782index a14a6ea..dc86cf0 100644
25783--- a/arch/x86/kvm/svm.c
25784+++ b/arch/x86/kvm/svm.c
25785@@ -3493,7 +3493,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
25786 int cpu = raw_smp_processor_id();
25787
25788 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
25789+
25790+ pax_open_kernel();
25791 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
25792+ pax_close_kernel();
25793+
25794 load_TR_desc();
25795 }
25796
25797@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
25798 #endif
25799 #endif
25800
25801+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25802+ __set_fs(current_thread_info()->addr_limit);
25803+#endif
25804+
25805 reload_tss(vcpu);
25806
25807 local_irq_disable();
25808diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
25809index 5402c94..c3bdeee 100644
25810--- a/arch/x86/kvm/vmx.c
25811+++ b/arch/x86/kvm/vmx.c
25812@@ -1311,12 +1311,12 @@ static void vmcs_write64(unsigned long field, u64 value)
25813 #endif
25814 }
25815
25816-static void vmcs_clear_bits(unsigned long field, u32 mask)
25817+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
25818 {
25819 vmcs_writel(field, vmcs_readl(field) & ~mask);
25820 }
25821
25822-static void vmcs_set_bits(unsigned long field, u32 mask)
25823+static void vmcs_set_bits(unsigned long field, unsigned long mask)
25824 {
25825 vmcs_writel(field, vmcs_readl(field) | mask);
25826 }
25827@@ -1517,7 +1517,11 @@ static void reload_tss(void)
25828 struct desc_struct *descs;
25829
25830 descs = (void *)gdt->address;
25831+
25832+ pax_open_kernel();
25833 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
25834+ pax_close_kernel();
25835+
25836 load_TR_desc();
25837 }
25838
25839@@ -1741,6 +1745,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
25840 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
25841 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
25842
25843+#ifdef CONFIG_PAX_PER_CPU_PGD
25844+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
25845+#endif
25846+
25847 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
25848 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
25849 vmx->loaded_vmcs->cpu = cpu;
25850@@ -2935,8 +2943,11 @@ static __init int hardware_setup(void)
25851 if (!cpu_has_vmx_flexpriority())
25852 flexpriority_enabled = 0;
25853
25854- if (!cpu_has_vmx_tpr_shadow())
25855- kvm_x86_ops->update_cr8_intercept = NULL;
25856+ if (!cpu_has_vmx_tpr_shadow()) {
25857+ pax_open_kernel();
25858+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
25859+ pax_close_kernel();
25860+ }
25861
25862 if (enable_ept && !cpu_has_vmx_ept_2m_page())
25863 kvm_disable_largepages();
25864@@ -2947,13 +2958,15 @@ static __init int hardware_setup(void)
25865 if (!cpu_has_vmx_apicv())
25866 enable_apicv = 0;
25867
25868+ pax_open_kernel();
25869 if (enable_apicv)
25870- kvm_x86_ops->update_cr8_intercept = NULL;
25871+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
25872 else {
25873- kvm_x86_ops->hwapic_irr_update = NULL;
25874- kvm_x86_ops->deliver_posted_interrupt = NULL;
25875- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
25876+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
25877+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
25878+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
25879 }
25880+ pax_close_kernel();
25881
25882 if (nested)
25883 nested_vmx_setup_ctls_msrs();
25884@@ -4076,7 +4089,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
25885
25886 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
25887 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
25888+
25889+#ifndef CONFIG_PAX_PER_CPU_PGD
25890 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
25891+#endif
25892
25893 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
25894 #ifdef CONFIG_X86_64
25895@@ -4098,7 +4114,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
25896 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
25897 vmx->host_idt_base = dt.address;
25898
25899- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
25900+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
25901
25902 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
25903 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
25904@@ -7030,6 +7046,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25905 "jmp 2f \n\t"
25906 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
25907 "2: "
25908+
25909+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25910+ "ljmp %[cs],$3f\n\t"
25911+ "3: "
25912+#endif
25913+
25914 /* Save guest registers, load host registers, keep flags */
25915 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
25916 "pop %0 \n\t"
25917@@ -7082,6 +7104,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25918 #endif
25919 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
25920 [wordsize]"i"(sizeof(ulong))
25921+
25922+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25923+ ,[cs]"i"(__KERNEL_CS)
25924+#endif
25925+
25926 : "cc", "memory"
25927 #ifdef CONFIG_X86_64
25928 , "rax", "rbx", "rdi", "rsi"
25929@@ -7095,7 +7122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25930 if (debugctlmsr)
25931 update_debugctlmsr(debugctlmsr);
25932
25933-#ifndef CONFIG_X86_64
25934+#ifdef CONFIG_X86_32
25935 /*
25936 * The sysexit path does not restore ds/es, so we must set them to
25937 * a reasonable value ourselves.
25938@@ -7104,8 +7131,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
25939 * may be executed in interrupt context, which saves and restore segments
25940 * around it, nullifying its effect.
25941 */
25942- loadsegment(ds, __USER_DS);
25943- loadsegment(es, __USER_DS);
25944+ loadsegment(ds, __KERNEL_DS);
25945+ loadsegment(es, __KERNEL_DS);
25946+ loadsegment(ss, __KERNEL_DS);
25947+
25948+#ifdef CONFIG_PAX_KERNEXEC
25949+ loadsegment(fs, __KERNEL_PERCPU);
25950+#endif
25951+
25952+#ifdef CONFIG_PAX_MEMORY_UDEREF
25953+ __set_fs(current_thread_info()->addr_limit);
25954+#endif
25955+
25956 #endif
25957
25958 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
25959diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
25960index e8ba99c..ee9d7d9 100644
25961--- a/arch/x86/kvm/x86.c
25962+++ b/arch/x86/kvm/x86.c
25963@@ -1725,8 +1725,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
25964 {
25965 struct kvm *kvm = vcpu->kvm;
25966 int lm = is_long_mode(vcpu);
25967- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
25968- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
25969+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
25970+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
25971 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
25972 : kvm->arch.xen_hvm_config.blob_size_32;
25973 u32 page_num = data & ~PAGE_MASK;
25974@@ -2609,6 +2609,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
25975 if (n < msr_list.nmsrs)
25976 goto out;
25977 r = -EFAULT;
25978+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
25979+ goto out;
25980 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
25981 num_msrs_to_save * sizeof(u32)))
25982 goto out;
25983@@ -5297,7 +5299,7 @@ static struct notifier_block pvclock_gtod_notifier = {
25984 };
25985 #endif
25986
25987-int kvm_arch_init(void *opaque)
25988+int kvm_arch_init(const void *opaque)
25989 {
25990 int r;
25991 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
25992diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
25993index 7114c63..a1018fc 100644
25994--- a/arch/x86/lguest/boot.c
25995+++ b/arch/x86/lguest/boot.c
25996@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
25997 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
25998 * Launcher to reboot us.
25999 */
26000-static void lguest_restart(char *reason)
26001+static __noreturn void lguest_restart(char *reason)
26002 {
26003 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
26004+ BUG();
26005 }
26006
26007 /*G:050
26008diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
26009index 00933d5..3a64af9 100644
26010--- a/arch/x86/lib/atomic64_386_32.S
26011+++ b/arch/x86/lib/atomic64_386_32.S
26012@@ -48,6 +48,10 @@ BEGIN(read)
26013 movl (v), %eax
26014 movl 4(v), %edx
26015 RET_ENDP
26016+BEGIN(read_unchecked)
26017+ movl (v), %eax
26018+ movl 4(v), %edx
26019+RET_ENDP
26020 #undef v
26021
26022 #define v %esi
26023@@ -55,6 +59,10 @@ BEGIN(set)
26024 movl %ebx, (v)
26025 movl %ecx, 4(v)
26026 RET_ENDP
26027+BEGIN(set_unchecked)
26028+ movl %ebx, (v)
26029+ movl %ecx, 4(v)
26030+RET_ENDP
26031 #undef v
26032
26033 #define v %esi
26034@@ -70,6 +78,20 @@ RET_ENDP
26035 BEGIN(add)
26036 addl %eax, (v)
26037 adcl %edx, 4(v)
26038+
26039+#ifdef CONFIG_PAX_REFCOUNT
26040+ jno 0f
26041+ subl %eax, (v)
26042+ sbbl %edx, 4(v)
26043+ int $4
26044+0:
26045+ _ASM_EXTABLE(0b, 0b)
26046+#endif
26047+
26048+RET_ENDP
26049+BEGIN(add_unchecked)
26050+ addl %eax, (v)
26051+ adcl %edx, 4(v)
26052 RET_ENDP
26053 #undef v
26054
26055@@ -77,6 +99,24 @@ RET_ENDP
26056 BEGIN(add_return)
26057 addl (v), %eax
26058 adcl 4(v), %edx
26059+
26060+#ifdef CONFIG_PAX_REFCOUNT
26061+ into
26062+1234:
26063+ _ASM_EXTABLE(1234b, 2f)
26064+#endif
26065+
26066+ movl %eax, (v)
26067+ movl %edx, 4(v)
26068+
26069+#ifdef CONFIG_PAX_REFCOUNT
26070+2:
26071+#endif
26072+
26073+RET_ENDP
26074+BEGIN(add_return_unchecked)
26075+ addl (v), %eax
26076+ adcl 4(v), %edx
26077 movl %eax, (v)
26078 movl %edx, 4(v)
26079 RET_ENDP
26080@@ -86,6 +126,20 @@ RET_ENDP
26081 BEGIN(sub)
26082 subl %eax, (v)
26083 sbbl %edx, 4(v)
26084+
26085+#ifdef CONFIG_PAX_REFCOUNT
26086+ jno 0f
26087+ addl %eax, (v)
26088+ adcl %edx, 4(v)
26089+ int $4
26090+0:
26091+ _ASM_EXTABLE(0b, 0b)
26092+#endif
26093+
26094+RET_ENDP
26095+BEGIN(sub_unchecked)
26096+ subl %eax, (v)
26097+ sbbl %edx, 4(v)
26098 RET_ENDP
26099 #undef v
26100
26101@@ -96,6 +150,27 @@ BEGIN(sub_return)
26102 sbbl $0, %edx
26103 addl (v), %eax
26104 adcl 4(v), %edx
26105+
26106+#ifdef CONFIG_PAX_REFCOUNT
26107+ into
26108+1234:
26109+ _ASM_EXTABLE(1234b, 2f)
26110+#endif
26111+
26112+ movl %eax, (v)
26113+ movl %edx, 4(v)
26114+
26115+#ifdef CONFIG_PAX_REFCOUNT
26116+2:
26117+#endif
26118+
26119+RET_ENDP
26120+BEGIN(sub_return_unchecked)
26121+ negl %edx
26122+ negl %eax
26123+ sbbl $0, %edx
26124+ addl (v), %eax
26125+ adcl 4(v), %edx
26126 movl %eax, (v)
26127 movl %edx, 4(v)
26128 RET_ENDP
26129@@ -105,6 +180,20 @@ RET_ENDP
26130 BEGIN(inc)
26131 addl $1, (v)
26132 adcl $0, 4(v)
26133+
26134+#ifdef CONFIG_PAX_REFCOUNT
26135+ jno 0f
26136+ subl $1, (v)
26137+ sbbl $0, 4(v)
26138+ int $4
26139+0:
26140+ _ASM_EXTABLE(0b, 0b)
26141+#endif
26142+
26143+RET_ENDP
26144+BEGIN(inc_unchecked)
26145+ addl $1, (v)
26146+ adcl $0, 4(v)
26147 RET_ENDP
26148 #undef v
26149
26150@@ -114,6 +203,26 @@ BEGIN(inc_return)
26151 movl 4(v), %edx
26152 addl $1, %eax
26153 adcl $0, %edx
26154+
26155+#ifdef CONFIG_PAX_REFCOUNT
26156+ into
26157+1234:
26158+ _ASM_EXTABLE(1234b, 2f)
26159+#endif
26160+
26161+ movl %eax, (v)
26162+ movl %edx, 4(v)
26163+
26164+#ifdef CONFIG_PAX_REFCOUNT
26165+2:
26166+#endif
26167+
26168+RET_ENDP
26169+BEGIN(inc_return_unchecked)
26170+ movl (v), %eax
26171+ movl 4(v), %edx
26172+ addl $1, %eax
26173+ adcl $0, %edx
26174 movl %eax, (v)
26175 movl %edx, 4(v)
26176 RET_ENDP
26177@@ -123,6 +232,20 @@ RET_ENDP
26178 BEGIN(dec)
26179 subl $1, (v)
26180 sbbl $0, 4(v)
26181+
26182+#ifdef CONFIG_PAX_REFCOUNT
26183+ jno 0f
26184+ addl $1, (v)
26185+ adcl $0, 4(v)
26186+ int $4
26187+0:
26188+ _ASM_EXTABLE(0b, 0b)
26189+#endif
26190+
26191+RET_ENDP
26192+BEGIN(dec_unchecked)
26193+ subl $1, (v)
26194+ sbbl $0, 4(v)
26195 RET_ENDP
26196 #undef v
26197
26198@@ -132,6 +255,26 @@ BEGIN(dec_return)
26199 movl 4(v), %edx
26200 subl $1, %eax
26201 sbbl $0, %edx
26202+
26203+#ifdef CONFIG_PAX_REFCOUNT
26204+ into
26205+1234:
26206+ _ASM_EXTABLE(1234b, 2f)
26207+#endif
26208+
26209+ movl %eax, (v)
26210+ movl %edx, 4(v)
26211+
26212+#ifdef CONFIG_PAX_REFCOUNT
26213+2:
26214+#endif
26215+
26216+RET_ENDP
26217+BEGIN(dec_return_unchecked)
26218+ movl (v), %eax
26219+ movl 4(v), %edx
26220+ subl $1, %eax
26221+ sbbl $0, %edx
26222 movl %eax, (v)
26223 movl %edx, 4(v)
26224 RET_ENDP
26225@@ -143,6 +286,13 @@ BEGIN(add_unless)
26226 adcl %edx, %edi
26227 addl (v), %eax
26228 adcl 4(v), %edx
26229+
26230+#ifdef CONFIG_PAX_REFCOUNT
26231+ into
26232+1234:
26233+ _ASM_EXTABLE(1234b, 2f)
26234+#endif
26235+
26236 cmpl %eax, %ecx
26237 je 3f
26238 1:
26239@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
26240 1:
26241 addl $1, %eax
26242 adcl $0, %edx
26243+
26244+#ifdef CONFIG_PAX_REFCOUNT
26245+ into
26246+1234:
26247+ _ASM_EXTABLE(1234b, 2f)
26248+#endif
26249+
26250 movl %eax, (v)
26251 movl %edx, 4(v)
26252 movl $1, %eax
26253@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
26254 movl 4(v), %edx
26255 subl $1, %eax
26256 sbbl $0, %edx
26257+
26258+#ifdef CONFIG_PAX_REFCOUNT
26259+ into
26260+1234:
26261+ _ASM_EXTABLE(1234b, 1f)
26262+#endif
26263+
26264 js 1f
26265 movl %eax, (v)
26266 movl %edx, 4(v)
26267diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
26268index f5cc9eb..51fa319 100644
26269--- a/arch/x86/lib/atomic64_cx8_32.S
26270+++ b/arch/x86/lib/atomic64_cx8_32.S
26271@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
26272 CFI_STARTPROC
26273
26274 read64 %ecx
26275+ pax_force_retaddr
26276 ret
26277 CFI_ENDPROC
26278 ENDPROC(atomic64_read_cx8)
26279
26280+ENTRY(atomic64_read_unchecked_cx8)
26281+ CFI_STARTPROC
26282+
26283+ read64 %ecx
26284+ pax_force_retaddr
26285+ ret
26286+ CFI_ENDPROC
26287+ENDPROC(atomic64_read_unchecked_cx8)
26288+
26289 ENTRY(atomic64_set_cx8)
26290 CFI_STARTPROC
26291
26292@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
26293 cmpxchg8b (%esi)
26294 jne 1b
26295
26296+ pax_force_retaddr
26297 ret
26298 CFI_ENDPROC
26299 ENDPROC(atomic64_set_cx8)
26300
26301+ENTRY(atomic64_set_unchecked_cx8)
26302+ CFI_STARTPROC
26303+
26304+1:
26305+/* we don't need LOCK_PREFIX since aligned 64-bit writes
26306+ * are atomic on 586 and newer */
26307+ cmpxchg8b (%esi)
26308+ jne 1b
26309+
26310+ pax_force_retaddr
26311+ ret
26312+ CFI_ENDPROC
26313+ENDPROC(atomic64_set_unchecked_cx8)
26314+
26315 ENTRY(atomic64_xchg_cx8)
26316 CFI_STARTPROC
26317
26318@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
26319 cmpxchg8b (%esi)
26320 jne 1b
26321
26322+ pax_force_retaddr
26323 ret
26324 CFI_ENDPROC
26325 ENDPROC(atomic64_xchg_cx8)
26326
26327-.macro addsub_return func ins insc
26328-ENTRY(atomic64_\func\()_return_cx8)
26329+.macro addsub_return func ins insc unchecked=""
26330+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
26331 CFI_STARTPROC
26332 SAVE ebp
26333 SAVE ebx
26334@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
26335 movl %edx, %ecx
26336 \ins\()l %esi, %ebx
26337 \insc\()l %edi, %ecx
26338+
26339+.ifb \unchecked
26340+#ifdef CONFIG_PAX_REFCOUNT
26341+ into
26342+2:
26343+ _ASM_EXTABLE(2b, 3f)
26344+#endif
26345+.endif
26346+
26347 LOCK_PREFIX
26348 cmpxchg8b (%ebp)
26349 jne 1b
26350-
26351-10:
26352 movl %ebx, %eax
26353 movl %ecx, %edx
26354+
26355+.ifb \unchecked
26356+#ifdef CONFIG_PAX_REFCOUNT
26357+3:
26358+#endif
26359+.endif
26360+
26361 RESTORE edi
26362 RESTORE esi
26363 RESTORE ebx
26364 RESTORE ebp
26365+ pax_force_retaddr
26366 ret
26367 CFI_ENDPROC
26368-ENDPROC(atomic64_\func\()_return_cx8)
26369+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
26370 .endm
26371
26372 addsub_return add add adc
26373 addsub_return sub sub sbb
26374+addsub_return add add adc _unchecked
26375+addsub_return sub sub sbb _unchecked
26376
26377-.macro incdec_return func ins insc
26378-ENTRY(atomic64_\func\()_return_cx8)
26379+.macro incdec_return func ins insc unchecked=""
26380+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
26381 CFI_STARTPROC
26382 SAVE ebx
26383
26384@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
26385 movl %edx, %ecx
26386 \ins\()l $1, %ebx
26387 \insc\()l $0, %ecx
26388+
26389+.ifb \unchecked
26390+#ifdef CONFIG_PAX_REFCOUNT
26391+ into
26392+2:
26393+ _ASM_EXTABLE(2b, 3f)
26394+#endif
26395+.endif
26396+
26397 LOCK_PREFIX
26398 cmpxchg8b (%esi)
26399 jne 1b
26400
26401-10:
26402 movl %ebx, %eax
26403 movl %ecx, %edx
26404+
26405+.ifb \unchecked
26406+#ifdef CONFIG_PAX_REFCOUNT
26407+3:
26408+#endif
26409+.endif
26410+
26411 RESTORE ebx
26412+ pax_force_retaddr
26413 ret
26414 CFI_ENDPROC
26415-ENDPROC(atomic64_\func\()_return_cx8)
26416+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
26417 .endm
26418
26419 incdec_return inc add adc
26420 incdec_return dec sub sbb
26421+incdec_return inc add adc _unchecked
26422+incdec_return dec sub sbb _unchecked
26423
26424 ENTRY(atomic64_dec_if_positive_cx8)
26425 CFI_STARTPROC
26426@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
26427 movl %edx, %ecx
26428 subl $1, %ebx
26429 sbb $0, %ecx
26430+
26431+#ifdef CONFIG_PAX_REFCOUNT
26432+ into
26433+1234:
26434+ _ASM_EXTABLE(1234b, 2f)
26435+#endif
26436+
26437 js 2f
26438 LOCK_PREFIX
26439 cmpxchg8b (%esi)
26440@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
26441 movl %ebx, %eax
26442 movl %ecx, %edx
26443 RESTORE ebx
26444+ pax_force_retaddr
26445 ret
26446 CFI_ENDPROC
26447 ENDPROC(atomic64_dec_if_positive_cx8)
26448@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
26449 movl %edx, %ecx
26450 addl %ebp, %ebx
26451 adcl %edi, %ecx
26452+
26453+#ifdef CONFIG_PAX_REFCOUNT
26454+ into
26455+1234:
26456+ _ASM_EXTABLE(1234b, 3f)
26457+#endif
26458+
26459 LOCK_PREFIX
26460 cmpxchg8b (%esi)
26461 jne 1b
26462@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
26463 CFI_ADJUST_CFA_OFFSET -8
26464 RESTORE ebx
26465 RESTORE ebp
26466+ pax_force_retaddr
26467 ret
26468 4:
26469 cmpl %edx, 4(%esp)
26470@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
26471 xorl %ecx, %ecx
26472 addl $1, %ebx
26473 adcl %edx, %ecx
26474+
26475+#ifdef CONFIG_PAX_REFCOUNT
26476+ into
26477+1234:
26478+ _ASM_EXTABLE(1234b, 3f)
26479+#endif
26480+
26481 LOCK_PREFIX
26482 cmpxchg8b (%esi)
26483 jne 1b
26484@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
26485 movl $1, %eax
26486 3:
26487 RESTORE ebx
26488+ pax_force_retaddr
26489 ret
26490 CFI_ENDPROC
26491 ENDPROC(atomic64_inc_not_zero_cx8)
26492diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
26493index e78b8ee..7e173a8 100644
26494--- a/arch/x86/lib/checksum_32.S
26495+++ b/arch/x86/lib/checksum_32.S
26496@@ -29,7 +29,8 @@
26497 #include <asm/dwarf2.h>
26498 #include <asm/errno.h>
26499 #include <asm/asm.h>
26500-
26501+#include <asm/segment.h>
26502+
26503 /*
26504 * computes a partial checksum, e.g. for TCP/UDP fragments
26505 */
26506@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
26507
26508 #define ARGBASE 16
26509 #define FP 12
26510-
26511-ENTRY(csum_partial_copy_generic)
26512+
26513+ENTRY(csum_partial_copy_generic_to_user)
26514 CFI_STARTPROC
26515+
26516+#ifdef CONFIG_PAX_MEMORY_UDEREF
26517+ pushl_cfi %gs
26518+ popl_cfi %es
26519+ jmp csum_partial_copy_generic
26520+#endif
26521+
26522+ENTRY(csum_partial_copy_generic_from_user)
26523+
26524+#ifdef CONFIG_PAX_MEMORY_UDEREF
26525+ pushl_cfi %gs
26526+ popl_cfi %ds
26527+#endif
26528+
26529+ENTRY(csum_partial_copy_generic)
26530 subl $4,%esp
26531 CFI_ADJUST_CFA_OFFSET 4
26532 pushl_cfi %edi
26533@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
26534 jmp 4f
26535 SRC(1: movw (%esi), %bx )
26536 addl $2, %esi
26537-DST( movw %bx, (%edi) )
26538+DST( movw %bx, %es:(%edi) )
26539 addl $2, %edi
26540 addw %bx, %ax
26541 adcl $0, %eax
26542@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
26543 SRC(1: movl (%esi), %ebx )
26544 SRC( movl 4(%esi), %edx )
26545 adcl %ebx, %eax
26546-DST( movl %ebx, (%edi) )
26547+DST( movl %ebx, %es:(%edi) )
26548 adcl %edx, %eax
26549-DST( movl %edx, 4(%edi) )
26550+DST( movl %edx, %es:4(%edi) )
26551
26552 SRC( movl 8(%esi), %ebx )
26553 SRC( movl 12(%esi), %edx )
26554 adcl %ebx, %eax
26555-DST( movl %ebx, 8(%edi) )
26556+DST( movl %ebx, %es:8(%edi) )
26557 adcl %edx, %eax
26558-DST( movl %edx, 12(%edi) )
26559+DST( movl %edx, %es:12(%edi) )
26560
26561 SRC( movl 16(%esi), %ebx )
26562 SRC( movl 20(%esi), %edx )
26563 adcl %ebx, %eax
26564-DST( movl %ebx, 16(%edi) )
26565+DST( movl %ebx, %es:16(%edi) )
26566 adcl %edx, %eax
26567-DST( movl %edx, 20(%edi) )
26568+DST( movl %edx, %es:20(%edi) )
26569
26570 SRC( movl 24(%esi), %ebx )
26571 SRC( movl 28(%esi), %edx )
26572 adcl %ebx, %eax
26573-DST( movl %ebx, 24(%edi) )
26574+DST( movl %ebx, %es:24(%edi) )
26575 adcl %edx, %eax
26576-DST( movl %edx, 28(%edi) )
26577+DST( movl %edx, %es:28(%edi) )
26578
26579 lea 32(%esi), %esi
26580 lea 32(%edi), %edi
26581@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
26582 shrl $2, %edx # This clears CF
26583 SRC(3: movl (%esi), %ebx )
26584 adcl %ebx, %eax
26585-DST( movl %ebx, (%edi) )
26586+DST( movl %ebx, %es:(%edi) )
26587 lea 4(%esi), %esi
26588 lea 4(%edi), %edi
26589 dec %edx
26590@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
26591 jb 5f
26592 SRC( movw (%esi), %cx )
26593 leal 2(%esi), %esi
26594-DST( movw %cx, (%edi) )
26595+DST( movw %cx, %es:(%edi) )
26596 leal 2(%edi), %edi
26597 je 6f
26598 shll $16,%ecx
26599 SRC(5: movb (%esi), %cl )
26600-DST( movb %cl, (%edi) )
26601+DST( movb %cl, %es:(%edi) )
26602 6: addl %ecx, %eax
26603 adcl $0, %eax
26604 7:
26605@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
26606
26607 6001:
26608 movl ARGBASE+20(%esp), %ebx # src_err_ptr
26609- movl $-EFAULT, (%ebx)
26610+ movl $-EFAULT, %ss:(%ebx)
26611
26612 # zero the complete destination - computing the rest
26613 # is too much work
26614@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
26615
26616 6002:
26617 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
26618- movl $-EFAULT,(%ebx)
26619+ movl $-EFAULT,%ss:(%ebx)
26620 jmp 5000b
26621
26622 .previous
26623
26624+ pushl_cfi %ss
26625+ popl_cfi %ds
26626+ pushl_cfi %ss
26627+ popl_cfi %es
26628 popl_cfi %ebx
26629 CFI_RESTORE ebx
26630 popl_cfi %esi
26631@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
26632 popl_cfi %ecx # equivalent to addl $4,%esp
26633 ret
26634 CFI_ENDPROC
26635-ENDPROC(csum_partial_copy_generic)
26636+ENDPROC(csum_partial_copy_generic_to_user)
26637
26638 #else
26639
26640 /* Version for PentiumII/PPro */
26641
26642 #define ROUND1(x) \
26643+ nop; nop; nop; \
26644 SRC(movl x(%esi), %ebx ) ; \
26645 addl %ebx, %eax ; \
26646- DST(movl %ebx, x(%edi) ) ;
26647+ DST(movl %ebx, %es:x(%edi)) ;
26648
26649 #define ROUND(x) \
26650+ nop; nop; nop; \
26651 SRC(movl x(%esi), %ebx ) ; \
26652 adcl %ebx, %eax ; \
26653- DST(movl %ebx, x(%edi) ) ;
26654+ DST(movl %ebx, %es:x(%edi)) ;
26655
26656 #define ARGBASE 12
26657-
26658-ENTRY(csum_partial_copy_generic)
26659+
26660+ENTRY(csum_partial_copy_generic_to_user)
26661 CFI_STARTPROC
26662+
26663+#ifdef CONFIG_PAX_MEMORY_UDEREF
26664+ pushl_cfi %gs
26665+ popl_cfi %es
26666+ jmp csum_partial_copy_generic
26667+#endif
26668+
26669+ENTRY(csum_partial_copy_generic_from_user)
26670+
26671+#ifdef CONFIG_PAX_MEMORY_UDEREF
26672+ pushl_cfi %gs
26673+ popl_cfi %ds
26674+#endif
26675+
26676+ENTRY(csum_partial_copy_generic)
26677 pushl_cfi %ebx
26678 CFI_REL_OFFSET ebx, 0
26679 pushl_cfi %edi
26680@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
26681 subl %ebx, %edi
26682 lea -1(%esi),%edx
26683 andl $-32,%edx
26684- lea 3f(%ebx,%ebx), %ebx
26685+ lea 3f(%ebx,%ebx,2), %ebx
26686 testl %esi, %esi
26687 jmp *%ebx
26688 1: addl $64,%esi
26689@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
26690 jb 5f
26691 SRC( movw (%esi), %dx )
26692 leal 2(%esi), %esi
26693-DST( movw %dx, (%edi) )
26694+DST( movw %dx, %es:(%edi) )
26695 leal 2(%edi), %edi
26696 je 6f
26697 shll $16,%edx
26698 5:
26699 SRC( movb (%esi), %dl )
26700-DST( movb %dl, (%edi) )
26701+DST( movb %dl, %es:(%edi) )
26702 6: addl %edx, %eax
26703 adcl $0, %eax
26704 7:
26705 .section .fixup, "ax"
26706 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
26707- movl $-EFAULT, (%ebx)
26708+ movl $-EFAULT, %ss:(%ebx)
26709 # zero the complete destination (computing the rest is too much work)
26710 movl ARGBASE+8(%esp),%edi # dst
26711 movl ARGBASE+12(%esp),%ecx # len
26712@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
26713 rep; stosb
26714 jmp 7b
26715 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
26716- movl $-EFAULT, (%ebx)
26717+ movl $-EFAULT, %ss:(%ebx)
26718 jmp 7b
26719 .previous
26720
26721+#ifdef CONFIG_PAX_MEMORY_UDEREF
26722+ pushl_cfi %ss
26723+ popl_cfi %ds
26724+ pushl_cfi %ss
26725+ popl_cfi %es
26726+#endif
26727+
26728 popl_cfi %esi
26729 CFI_RESTORE esi
26730 popl_cfi %edi
26731@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
26732 CFI_RESTORE ebx
26733 ret
26734 CFI_ENDPROC
26735-ENDPROC(csum_partial_copy_generic)
26736+ENDPROC(csum_partial_copy_generic_to_user)
26737
26738 #undef ROUND
26739 #undef ROUND1
26740diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
26741index f2145cf..cea889d 100644
26742--- a/arch/x86/lib/clear_page_64.S
26743+++ b/arch/x86/lib/clear_page_64.S
26744@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
26745 movl $4096/8,%ecx
26746 xorl %eax,%eax
26747 rep stosq
26748+ pax_force_retaddr
26749 ret
26750 CFI_ENDPROC
26751 ENDPROC(clear_page_c)
26752@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
26753 movl $4096,%ecx
26754 xorl %eax,%eax
26755 rep stosb
26756+ pax_force_retaddr
26757 ret
26758 CFI_ENDPROC
26759 ENDPROC(clear_page_c_e)
26760@@ -43,6 +45,7 @@ ENTRY(clear_page)
26761 leaq 64(%rdi),%rdi
26762 jnz .Lloop
26763 nop
26764+ pax_force_retaddr
26765 ret
26766 CFI_ENDPROC
26767 .Lclear_page_end:
26768@@ -58,7 +61,7 @@ ENDPROC(clear_page)
26769
26770 #include <asm/cpufeature.h>
26771
26772- .section .altinstr_replacement,"ax"
26773+ .section .altinstr_replacement,"a"
26774 1: .byte 0xeb /* jmp <disp8> */
26775 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
26776 2: .byte 0xeb /* jmp <disp8> */
26777diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
26778index 1e572c5..2a162cd 100644
26779--- a/arch/x86/lib/cmpxchg16b_emu.S
26780+++ b/arch/x86/lib/cmpxchg16b_emu.S
26781@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
26782
26783 popf
26784 mov $1, %al
26785+ pax_force_retaddr
26786 ret
26787
26788 not_same:
26789 popf
26790 xor %al,%al
26791+ pax_force_retaddr
26792 ret
26793
26794 CFI_ENDPROC
26795diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
26796index 176cca6..1166c50 100644
26797--- a/arch/x86/lib/copy_page_64.S
26798+++ b/arch/x86/lib/copy_page_64.S
26799@@ -9,6 +9,7 @@ copy_page_rep:
26800 CFI_STARTPROC
26801 movl $4096/8, %ecx
26802 rep movsq
26803+ pax_force_retaddr
26804 ret
26805 CFI_ENDPROC
26806 ENDPROC(copy_page_rep)
26807@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
26808
26809 ENTRY(copy_page)
26810 CFI_STARTPROC
26811- subq $2*8, %rsp
26812- CFI_ADJUST_CFA_OFFSET 2*8
26813+ subq $3*8, %rsp
26814+ CFI_ADJUST_CFA_OFFSET 3*8
26815 movq %rbx, (%rsp)
26816 CFI_REL_OFFSET rbx, 0
26817 movq %r12, 1*8(%rsp)
26818 CFI_REL_OFFSET r12, 1*8
26819+ movq %r13, 2*8(%rsp)
26820+ CFI_REL_OFFSET r13, 2*8
26821
26822 movl $(4096/64)-5, %ecx
26823 .p2align 4
26824@@ -36,7 +39,7 @@ ENTRY(copy_page)
26825 movq 0x8*2(%rsi), %rdx
26826 movq 0x8*3(%rsi), %r8
26827 movq 0x8*4(%rsi), %r9
26828- movq 0x8*5(%rsi), %r10
26829+ movq 0x8*5(%rsi), %r13
26830 movq 0x8*6(%rsi), %r11
26831 movq 0x8*7(%rsi), %r12
26832
26833@@ -47,7 +50,7 @@ ENTRY(copy_page)
26834 movq %rdx, 0x8*2(%rdi)
26835 movq %r8, 0x8*3(%rdi)
26836 movq %r9, 0x8*4(%rdi)
26837- movq %r10, 0x8*5(%rdi)
26838+ movq %r13, 0x8*5(%rdi)
26839 movq %r11, 0x8*6(%rdi)
26840 movq %r12, 0x8*7(%rdi)
26841
26842@@ -66,7 +69,7 @@ ENTRY(copy_page)
26843 movq 0x8*2(%rsi), %rdx
26844 movq 0x8*3(%rsi), %r8
26845 movq 0x8*4(%rsi), %r9
26846- movq 0x8*5(%rsi), %r10
26847+ movq 0x8*5(%rsi), %r13
26848 movq 0x8*6(%rsi), %r11
26849 movq 0x8*7(%rsi), %r12
26850
26851@@ -75,7 +78,7 @@ ENTRY(copy_page)
26852 movq %rdx, 0x8*2(%rdi)
26853 movq %r8, 0x8*3(%rdi)
26854 movq %r9, 0x8*4(%rdi)
26855- movq %r10, 0x8*5(%rdi)
26856+ movq %r13, 0x8*5(%rdi)
26857 movq %r11, 0x8*6(%rdi)
26858 movq %r12, 0x8*7(%rdi)
26859
26860@@ -87,8 +90,11 @@ ENTRY(copy_page)
26861 CFI_RESTORE rbx
26862 movq 1*8(%rsp), %r12
26863 CFI_RESTORE r12
26864- addq $2*8, %rsp
26865- CFI_ADJUST_CFA_OFFSET -2*8
26866+ movq 2*8(%rsp), %r13
26867+ CFI_RESTORE r13
26868+ addq $3*8, %rsp
26869+ CFI_ADJUST_CFA_OFFSET -3*8
26870+ pax_force_retaddr
26871 ret
26872 .Lcopy_page_end:
26873 CFI_ENDPROC
26874@@ -99,7 +105,7 @@ ENDPROC(copy_page)
26875
26876 #include <asm/cpufeature.h>
26877
26878- .section .altinstr_replacement,"ax"
26879+ .section .altinstr_replacement,"a"
26880 1: .byte 0xeb /* jmp <disp8> */
26881 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
26882 2:
26883diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
26884index a30ca15..6b3f4e1 100644
26885--- a/arch/x86/lib/copy_user_64.S
26886+++ b/arch/x86/lib/copy_user_64.S
26887@@ -18,31 +18,7 @@
26888 #include <asm/alternative-asm.h>
26889 #include <asm/asm.h>
26890 #include <asm/smap.h>
26891-
26892-/*
26893- * By placing feature2 after feature1 in altinstructions section, we logically
26894- * implement:
26895- * If CPU has feature2, jmp to alt2 is used
26896- * else if CPU has feature1, jmp to alt1 is used
26897- * else jmp to orig is used.
26898- */
26899- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
26900-0:
26901- .byte 0xe9 /* 32bit jump */
26902- .long \orig-1f /* by default jump to orig */
26903-1:
26904- .section .altinstr_replacement,"ax"
26905-2: .byte 0xe9 /* near jump with 32bit immediate */
26906- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
26907-3: .byte 0xe9 /* near jump with 32bit immediate */
26908- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
26909- .previous
26910-
26911- .section .altinstructions,"a"
26912- altinstruction_entry 0b,2b,\feature1,5,5
26913- altinstruction_entry 0b,3b,\feature2,5,5
26914- .previous
26915- .endm
26916+#include <asm/pgtable.h>
26917
26918 .macro ALIGN_DESTINATION
26919 #ifdef FIX_ALIGNMENT
26920@@ -70,52 +46,6 @@
26921 #endif
26922 .endm
26923
26924-/* Standard copy_to_user with segment limit checking */
26925-ENTRY(_copy_to_user)
26926- CFI_STARTPROC
26927- GET_THREAD_INFO(%rax)
26928- movq %rdi,%rcx
26929- addq %rdx,%rcx
26930- jc bad_to_user
26931- cmpq TI_addr_limit(%rax),%rcx
26932- ja bad_to_user
26933- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
26934- copy_user_generic_unrolled,copy_user_generic_string, \
26935- copy_user_enhanced_fast_string
26936- CFI_ENDPROC
26937-ENDPROC(_copy_to_user)
26938-
26939-/* Standard copy_from_user with segment limit checking */
26940-ENTRY(_copy_from_user)
26941- CFI_STARTPROC
26942- GET_THREAD_INFO(%rax)
26943- movq %rsi,%rcx
26944- addq %rdx,%rcx
26945- jc bad_from_user
26946- cmpq TI_addr_limit(%rax),%rcx
26947- ja bad_from_user
26948- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
26949- copy_user_generic_unrolled,copy_user_generic_string, \
26950- copy_user_enhanced_fast_string
26951- CFI_ENDPROC
26952-ENDPROC(_copy_from_user)
26953-
26954- .section .fixup,"ax"
26955- /* must zero dest */
26956-ENTRY(bad_from_user)
26957-bad_from_user:
26958- CFI_STARTPROC
26959- movl %edx,%ecx
26960- xorl %eax,%eax
26961- rep
26962- stosb
26963-bad_to_user:
26964- movl %edx,%eax
26965- ret
26966- CFI_ENDPROC
26967-ENDPROC(bad_from_user)
26968- .previous
26969-
26970 /*
26971 * copy_user_generic_unrolled - memory copy with exception handling.
26972 * This version is for CPUs like P4 that don't have efficient micro
26973@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
26974 */
26975 ENTRY(copy_user_generic_unrolled)
26976 CFI_STARTPROC
26977+ ASM_PAX_OPEN_USERLAND
26978 ASM_STAC
26979 cmpl $8,%edx
26980 jb 20f /* less then 8 bytes, go to byte copy loop */
26981@@ -141,19 +72,19 @@ ENTRY(copy_user_generic_unrolled)
26982 jz 17f
26983 1: movq (%rsi),%r8
26984 2: movq 1*8(%rsi),%r9
26985-3: movq 2*8(%rsi),%r10
26986+3: movq 2*8(%rsi),%rax
26987 4: movq 3*8(%rsi),%r11
26988 5: movq %r8,(%rdi)
26989 6: movq %r9,1*8(%rdi)
26990-7: movq %r10,2*8(%rdi)
26991+7: movq %rax,2*8(%rdi)
26992 8: movq %r11,3*8(%rdi)
26993 9: movq 4*8(%rsi),%r8
26994 10: movq 5*8(%rsi),%r9
26995-11: movq 6*8(%rsi),%r10
26996+11: movq 6*8(%rsi),%rax
26997 12: movq 7*8(%rsi),%r11
26998 13: movq %r8,4*8(%rdi)
26999 14: movq %r9,5*8(%rdi)
27000-15: movq %r10,6*8(%rdi)
27001+15: movq %rax,6*8(%rdi)
27002 16: movq %r11,7*8(%rdi)
27003 leaq 64(%rsi),%rsi
27004 leaq 64(%rdi),%rdi
27005@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
27006 jnz 21b
27007 23: xor %eax,%eax
27008 ASM_CLAC
27009+ ASM_PAX_CLOSE_USERLAND
27010+ pax_force_retaddr
27011 ret
27012
27013 .section .fixup,"ax"
27014@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
27015 */
27016 ENTRY(copy_user_generic_string)
27017 CFI_STARTPROC
27018+ ASM_PAX_OPEN_USERLAND
27019 ASM_STAC
27020 andl %edx,%edx
27021 jz 4f
27022@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
27023 movsb
27024 4: xorl %eax,%eax
27025 ASM_CLAC
27026+ ASM_PAX_CLOSE_USERLAND
27027+ pax_force_retaddr
27028 ret
27029
27030 .section .fixup,"ax"
27031@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
27032 */
27033 ENTRY(copy_user_enhanced_fast_string)
27034 CFI_STARTPROC
27035+ ASM_PAX_OPEN_USERLAND
27036 ASM_STAC
27037 andl %edx,%edx
27038 jz 2f
27039@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
27040 movsb
27041 2: xorl %eax,%eax
27042 ASM_CLAC
27043+ ASM_PAX_CLOSE_USERLAND
27044+ pax_force_retaddr
27045 ret
27046
27047 .section .fixup,"ax"
27048diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
27049index 6a4f43c..55d26f2 100644
27050--- a/arch/x86/lib/copy_user_nocache_64.S
27051+++ b/arch/x86/lib/copy_user_nocache_64.S
27052@@ -8,6 +8,7 @@
27053
27054 #include <linux/linkage.h>
27055 #include <asm/dwarf2.h>
27056+#include <asm/alternative-asm.h>
27057
27058 #define FIX_ALIGNMENT 1
27059
27060@@ -16,6 +17,7 @@
27061 #include <asm/thread_info.h>
27062 #include <asm/asm.h>
27063 #include <asm/smap.h>
27064+#include <asm/pgtable.h>
27065
27066 .macro ALIGN_DESTINATION
27067 #ifdef FIX_ALIGNMENT
27068@@ -49,6 +51,16 @@
27069 */
27070 ENTRY(__copy_user_nocache)
27071 CFI_STARTPROC
27072+
27073+#ifdef CONFIG_PAX_MEMORY_UDEREF
27074+ mov pax_user_shadow_base,%rcx
27075+ cmp %rcx,%rsi
27076+ jae 1f
27077+ add %rcx,%rsi
27078+1:
27079+#endif
27080+
27081+ ASM_PAX_OPEN_USERLAND
27082 ASM_STAC
27083 cmpl $8,%edx
27084 jb 20f /* less then 8 bytes, go to byte copy loop */
27085@@ -59,19 +71,19 @@ ENTRY(__copy_user_nocache)
27086 jz 17f
27087 1: movq (%rsi),%r8
27088 2: movq 1*8(%rsi),%r9
27089-3: movq 2*8(%rsi),%r10
27090+3: movq 2*8(%rsi),%rax
27091 4: movq 3*8(%rsi),%r11
27092 5: movnti %r8,(%rdi)
27093 6: movnti %r9,1*8(%rdi)
27094-7: movnti %r10,2*8(%rdi)
27095+7: movnti %rax,2*8(%rdi)
27096 8: movnti %r11,3*8(%rdi)
27097 9: movq 4*8(%rsi),%r8
27098 10: movq 5*8(%rsi),%r9
27099-11: movq 6*8(%rsi),%r10
27100+11: movq 6*8(%rsi),%rax
27101 12: movq 7*8(%rsi),%r11
27102 13: movnti %r8,4*8(%rdi)
27103 14: movnti %r9,5*8(%rdi)
27104-15: movnti %r10,6*8(%rdi)
27105+15: movnti %rax,6*8(%rdi)
27106 16: movnti %r11,7*8(%rdi)
27107 leaq 64(%rsi),%rsi
27108 leaq 64(%rdi),%rdi
27109@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
27110 jnz 21b
27111 23: xorl %eax,%eax
27112 ASM_CLAC
27113+ ASM_PAX_CLOSE_USERLAND
27114 sfence
27115+ pax_force_retaddr
27116 ret
27117
27118 .section .fixup,"ax"
27119diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
27120index 2419d5f..953ee51 100644
27121--- a/arch/x86/lib/csum-copy_64.S
27122+++ b/arch/x86/lib/csum-copy_64.S
27123@@ -9,6 +9,7 @@
27124 #include <asm/dwarf2.h>
27125 #include <asm/errno.h>
27126 #include <asm/asm.h>
27127+#include <asm/alternative-asm.h>
27128
27129 /*
27130 * Checksum copy with exception handling.
27131@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
27132 CFI_RESTORE rbp
27133 addq $7*8, %rsp
27134 CFI_ADJUST_CFA_OFFSET -7*8
27135+ pax_force_retaddr 0, 1
27136 ret
27137 CFI_RESTORE_STATE
27138
27139diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
27140index 25b7ae8..c40113e 100644
27141--- a/arch/x86/lib/csum-wrappers_64.c
27142+++ b/arch/x86/lib/csum-wrappers_64.c
27143@@ -52,8 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
27144 len -= 2;
27145 }
27146 }
27147- isum = csum_partial_copy_generic((__force const void *)src,
27148+ pax_open_userland();
27149+ stac();
27150+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
27151 dst, len, isum, errp, NULL);
27152+ clac();
27153+ pax_close_userland();
27154 if (unlikely(*errp))
27155 goto out_err;
27156
27157@@ -105,8 +109,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
27158 }
27159
27160 *errp = 0;
27161- return csum_partial_copy_generic(src, (void __force *)dst,
27162+ pax_open_userland();
27163+ stac();
27164+ isum = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
27165 len, isum, NULL, errp);
27166+ clac();
27167+ pax_close_userland();
27168+ return isum;
27169 }
27170 EXPORT_SYMBOL(csum_partial_copy_to_user);
27171
27172diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
27173index a451235..1daa956 100644
27174--- a/arch/x86/lib/getuser.S
27175+++ b/arch/x86/lib/getuser.S
27176@@ -33,17 +33,40 @@
27177 #include <asm/thread_info.h>
27178 #include <asm/asm.h>
27179 #include <asm/smap.h>
27180+#include <asm/segment.h>
27181+#include <asm/pgtable.h>
27182+#include <asm/alternative-asm.h>
27183+
27184+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27185+#define __copyuser_seg gs;
27186+#else
27187+#define __copyuser_seg
27188+#endif
27189
27190 .text
27191 ENTRY(__get_user_1)
27192 CFI_STARTPROC
27193+
27194+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27195 GET_THREAD_INFO(%_ASM_DX)
27196 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27197 jae bad_get_user
27198 ASM_STAC
27199-1: movzbl (%_ASM_AX),%edx
27200+
27201+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27202+ mov pax_user_shadow_base,%_ASM_DX
27203+ cmp %_ASM_DX,%_ASM_AX
27204+ jae 1234f
27205+ add %_ASM_DX,%_ASM_AX
27206+1234:
27207+#endif
27208+
27209+#endif
27210+
27211+1: __copyuser_seg movzbl (%_ASM_AX),%edx
27212 xor %eax,%eax
27213 ASM_CLAC
27214+ pax_force_retaddr
27215 ret
27216 CFI_ENDPROC
27217 ENDPROC(__get_user_1)
27218@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
27219 ENTRY(__get_user_2)
27220 CFI_STARTPROC
27221 add $1,%_ASM_AX
27222+
27223+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27224 jc bad_get_user
27225 GET_THREAD_INFO(%_ASM_DX)
27226 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27227 jae bad_get_user
27228 ASM_STAC
27229-2: movzwl -1(%_ASM_AX),%edx
27230+
27231+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27232+ mov pax_user_shadow_base,%_ASM_DX
27233+ cmp %_ASM_DX,%_ASM_AX
27234+ jae 1234f
27235+ add %_ASM_DX,%_ASM_AX
27236+1234:
27237+#endif
27238+
27239+#endif
27240+
27241+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
27242 xor %eax,%eax
27243 ASM_CLAC
27244+ pax_force_retaddr
27245 ret
27246 CFI_ENDPROC
27247 ENDPROC(__get_user_2)
27248@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
27249 ENTRY(__get_user_4)
27250 CFI_STARTPROC
27251 add $3,%_ASM_AX
27252+
27253+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
27254 jc bad_get_user
27255 GET_THREAD_INFO(%_ASM_DX)
27256 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27257 jae bad_get_user
27258 ASM_STAC
27259-3: movl -3(%_ASM_AX),%edx
27260+
27261+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27262+ mov pax_user_shadow_base,%_ASM_DX
27263+ cmp %_ASM_DX,%_ASM_AX
27264+ jae 1234f
27265+ add %_ASM_DX,%_ASM_AX
27266+1234:
27267+#endif
27268+
27269+#endif
27270+
27271+3: __copyuser_seg movl -3(%_ASM_AX),%edx
27272 xor %eax,%eax
27273 ASM_CLAC
27274+ pax_force_retaddr
27275 ret
27276 CFI_ENDPROC
27277 ENDPROC(__get_user_4)
27278@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
27279 GET_THREAD_INFO(%_ASM_DX)
27280 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27281 jae bad_get_user
27282+
27283+#ifdef CONFIG_PAX_MEMORY_UDEREF
27284+ mov pax_user_shadow_base,%_ASM_DX
27285+ cmp %_ASM_DX,%_ASM_AX
27286+ jae 1234f
27287+ add %_ASM_DX,%_ASM_AX
27288+1234:
27289+#endif
27290+
27291 ASM_STAC
27292 4: movq -7(%_ASM_AX),%rdx
27293 xor %eax,%eax
27294 ASM_CLAC
27295+ pax_force_retaddr
27296 ret
27297 #else
27298 add $7,%_ASM_AX
27299@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
27300 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
27301 jae bad_get_user_8
27302 ASM_STAC
27303-4: movl -7(%_ASM_AX),%edx
27304-5: movl -3(%_ASM_AX),%ecx
27305+4: __copyuser_seg movl -7(%_ASM_AX),%edx
27306+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
27307 xor %eax,%eax
27308 ASM_CLAC
27309+ pax_force_retaddr
27310 ret
27311 #endif
27312 CFI_ENDPROC
27313@@ -113,6 +175,7 @@ bad_get_user:
27314 xor %edx,%edx
27315 mov $(-EFAULT),%_ASM_AX
27316 ASM_CLAC
27317+ pax_force_retaddr
27318 ret
27319 CFI_ENDPROC
27320 END(bad_get_user)
27321@@ -124,6 +187,7 @@ bad_get_user_8:
27322 xor %ecx,%ecx
27323 mov $(-EFAULT),%_ASM_AX
27324 ASM_CLAC
27325+ pax_force_retaddr
27326 ret
27327 CFI_ENDPROC
27328 END(bad_get_user_8)
27329diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
27330index 54fcffe..7be149e 100644
27331--- a/arch/x86/lib/insn.c
27332+++ b/arch/x86/lib/insn.c
27333@@ -20,8 +20,10 @@
27334
27335 #ifdef __KERNEL__
27336 #include <linux/string.h>
27337+#include <asm/pgtable_types.h>
27338 #else
27339 #include <string.h>
27340+#define ktla_ktva(addr) addr
27341 #endif
27342 #include <asm/inat.h>
27343 #include <asm/insn.h>
27344@@ -53,8 +55,8 @@
27345 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
27346 {
27347 memset(insn, 0, sizeof(*insn));
27348- insn->kaddr = kaddr;
27349- insn->next_byte = kaddr;
27350+ insn->kaddr = ktla_ktva(kaddr);
27351+ insn->next_byte = ktla_ktva(kaddr);
27352 insn->x86_64 = x86_64 ? 1 : 0;
27353 insn->opnd_bytes = 4;
27354 if (x86_64)
27355diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
27356index 05a95e7..326f2fa 100644
27357--- a/arch/x86/lib/iomap_copy_64.S
27358+++ b/arch/x86/lib/iomap_copy_64.S
27359@@ -17,6 +17,7 @@
27360
27361 #include <linux/linkage.h>
27362 #include <asm/dwarf2.h>
27363+#include <asm/alternative-asm.h>
27364
27365 /*
27366 * override generic version in lib/iomap_copy.c
27367@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
27368 CFI_STARTPROC
27369 movl %edx,%ecx
27370 rep movsd
27371+ pax_force_retaddr
27372 ret
27373 CFI_ENDPROC
27374 ENDPROC(__iowrite32_copy)
27375diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
27376index 56313a3..9b59269 100644
27377--- a/arch/x86/lib/memcpy_64.S
27378+++ b/arch/x86/lib/memcpy_64.S
27379@@ -24,7 +24,7 @@
27380 * This gets patched over the unrolled variant (below) via the
27381 * alternative instructions framework:
27382 */
27383- .section .altinstr_replacement, "ax", @progbits
27384+ .section .altinstr_replacement, "a", @progbits
27385 .Lmemcpy_c:
27386 movq %rdi, %rax
27387 movq %rdx, %rcx
27388@@ -33,6 +33,7 @@
27389 rep movsq
27390 movl %edx, %ecx
27391 rep movsb
27392+ pax_force_retaddr
27393 ret
27394 .Lmemcpy_e:
27395 .previous
27396@@ -44,11 +45,12 @@
27397 * This gets patched over the unrolled variant (below) via the
27398 * alternative instructions framework:
27399 */
27400- .section .altinstr_replacement, "ax", @progbits
27401+ .section .altinstr_replacement, "a", @progbits
27402 .Lmemcpy_c_e:
27403 movq %rdi, %rax
27404 movq %rdx, %rcx
27405 rep movsb
27406+ pax_force_retaddr
27407 ret
27408 .Lmemcpy_e_e:
27409 .previous
27410@@ -76,13 +78,13 @@ ENTRY(memcpy)
27411 */
27412 movq 0*8(%rsi), %r8
27413 movq 1*8(%rsi), %r9
27414- movq 2*8(%rsi), %r10
27415+ movq 2*8(%rsi), %rcx
27416 movq 3*8(%rsi), %r11
27417 leaq 4*8(%rsi), %rsi
27418
27419 movq %r8, 0*8(%rdi)
27420 movq %r9, 1*8(%rdi)
27421- movq %r10, 2*8(%rdi)
27422+ movq %rcx, 2*8(%rdi)
27423 movq %r11, 3*8(%rdi)
27424 leaq 4*8(%rdi), %rdi
27425 jae .Lcopy_forward_loop
27426@@ -105,12 +107,12 @@ ENTRY(memcpy)
27427 subq $0x20, %rdx
27428 movq -1*8(%rsi), %r8
27429 movq -2*8(%rsi), %r9
27430- movq -3*8(%rsi), %r10
27431+ movq -3*8(%rsi), %rcx
27432 movq -4*8(%rsi), %r11
27433 leaq -4*8(%rsi), %rsi
27434 movq %r8, -1*8(%rdi)
27435 movq %r9, -2*8(%rdi)
27436- movq %r10, -3*8(%rdi)
27437+ movq %rcx, -3*8(%rdi)
27438 movq %r11, -4*8(%rdi)
27439 leaq -4*8(%rdi), %rdi
27440 jae .Lcopy_backward_loop
27441@@ -130,12 +132,13 @@ ENTRY(memcpy)
27442 */
27443 movq 0*8(%rsi), %r8
27444 movq 1*8(%rsi), %r9
27445- movq -2*8(%rsi, %rdx), %r10
27446+ movq -2*8(%rsi, %rdx), %rcx
27447 movq -1*8(%rsi, %rdx), %r11
27448 movq %r8, 0*8(%rdi)
27449 movq %r9, 1*8(%rdi)
27450- movq %r10, -2*8(%rdi, %rdx)
27451+ movq %rcx, -2*8(%rdi, %rdx)
27452 movq %r11, -1*8(%rdi, %rdx)
27453+ pax_force_retaddr
27454 retq
27455 .p2align 4
27456 .Lless_16bytes:
27457@@ -148,6 +151,7 @@ ENTRY(memcpy)
27458 movq -1*8(%rsi, %rdx), %r9
27459 movq %r8, 0*8(%rdi)
27460 movq %r9, -1*8(%rdi, %rdx)
27461+ pax_force_retaddr
27462 retq
27463 .p2align 4
27464 .Lless_8bytes:
27465@@ -161,6 +165,7 @@ ENTRY(memcpy)
27466 movl -4(%rsi, %rdx), %r8d
27467 movl %ecx, (%rdi)
27468 movl %r8d, -4(%rdi, %rdx)
27469+ pax_force_retaddr
27470 retq
27471 .p2align 4
27472 .Lless_3bytes:
27473@@ -179,6 +184,7 @@ ENTRY(memcpy)
27474 movb %cl, (%rdi)
27475
27476 .Lend:
27477+ pax_force_retaddr
27478 retq
27479 CFI_ENDPROC
27480 ENDPROC(memcpy)
27481diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
27482index 65268a6..5aa7815 100644
27483--- a/arch/x86/lib/memmove_64.S
27484+++ b/arch/x86/lib/memmove_64.S
27485@@ -61,13 +61,13 @@ ENTRY(memmove)
27486 5:
27487 sub $0x20, %rdx
27488 movq 0*8(%rsi), %r11
27489- movq 1*8(%rsi), %r10
27490+ movq 1*8(%rsi), %rcx
27491 movq 2*8(%rsi), %r9
27492 movq 3*8(%rsi), %r8
27493 leaq 4*8(%rsi), %rsi
27494
27495 movq %r11, 0*8(%rdi)
27496- movq %r10, 1*8(%rdi)
27497+ movq %rcx, 1*8(%rdi)
27498 movq %r9, 2*8(%rdi)
27499 movq %r8, 3*8(%rdi)
27500 leaq 4*8(%rdi), %rdi
27501@@ -81,10 +81,10 @@ ENTRY(memmove)
27502 4:
27503 movq %rdx, %rcx
27504 movq -8(%rsi, %rdx), %r11
27505- lea -8(%rdi, %rdx), %r10
27506+ lea -8(%rdi, %rdx), %r9
27507 shrq $3, %rcx
27508 rep movsq
27509- movq %r11, (%r10)
27510+ movq %r11, (%r9)
27511 jmp 13f
27512 .Lmemmove_end_forward:
27513
27514@@ -95,14 +95,14 @@ ENTRY(memmove)
27515 7:
27516 movq %rdx, %rcx
27517 movq (%rsi), %r11
27518- movq %rdi, %r10
27519+ movq %rdi, %r9
27520 leaq -8(%rsi, %rdx), %rsi
27521 leaq -8(%rdi, %rdx), %rdi
27522 shrq $3, %rcx
27523 std
27524 rep movsq
27525 cld
27526- movq %r11, (%r10)
27527+ movq %r11, (%r9)
27528 jmp 13f
27529
27530 /*
27531@@ -127,13 +127,13 @@ ENTRY(memmove)
27532 8:
27533 subq $0x20, %rdx
27534 movq -1*8(%rsi), %r11
27535- movq -2*8(%rsi), %r10
27536+ movq -2*8(%rsi), %rcx
27537 movq -3*8(%rsi), %r9
27538 movq -4*8(%rsi), %r8
27539 leaq -4*8(%rsi), %rsi
27540
27541 movq %r11, -1*8(%rdi)
27542- movq %r10, -2*8(%rdi)
27543+ movq %rcx, -2*8(%rdi)
27544 movq %r9, -3*8(%rdi)
27545 movq %r8, -4*8(%rdi)
27546 leaq -4*8(%rdi), %rdi
27547@@ -151,11 +151,11 @@ ENTRY(memmove)
27548 * Move data from 16 bytes to 31 bytes.
27549 */
27550 movq 0*8(%rsi), %r11
27551- movq 1*8(%rsi), %r10
27552+ movq 1*8(%rsi), %rcx
27553 movq -2*8(%rsi, %rdx), %r9
27554 movq -1*8(%rsi, %rdx), %r8
27555 movq %r11, 0*8(%rdi)
27556- movq %r10, 1*8(%rdi)
27557+ movq %rcx, 1*8(%rdi)
27558 movq %r9, -2*8(%rdi, %rdx)
27559 movq %r8, -1*8(%rdi, %rdx)
27560 jmp 13f
27561@@ -167,9 +167,9 @@ ENTRY(memmove)
27562 * Move data from 8 bytes to 15 bytes.
27563 */
27564 movq 0*8(%rsi), %r11
27565- movq -1*8(%rsi, %rdx), %r10
27566+ movq -1*8(%rsi, %rdx), %r9
27567 movq %r11, 0*8(%rdi)
27568- movq %r10, -1*8(%rdi, %rdx)
27569+ movq %r9, -1*8(%rdi, %rdx)
27570 jmp 13f
27571 10:
27572 cmpq $4, %rdx
27573@@ -178,9 +178,9 @@ ENTRY(memmove)
27574 * Move data from 4 bytes to 7 bytes.
27575 */
27576 movl (%rsi), %r11d
27577- movl -4(%rsi, %rdx), %r10d
27578+ movl -4(%rsi, %rdx), %r9d
27579 movl %r11d, (%rdi)
27580- movl %r10d, -4(%rdi, %rdx)
27581+ movl %r9d, -4(%rdi, %rdx)
27582 jmp 13f
27583 11:
27584 cmp $2, %rdx
27585@@ -189,9 +189,9 @@ ENTRY(memmove)
27586 * Move data from 2 bytes to 3 bytes.
27587 */
27588 movw (%rsi), %r11w
27589- movw -2(%rsi, %rdx), %r10w
27590+ movw -2(%rsi, %rdx), %r9w
27591 movw %r11w, (%rdi)
27592- movw %r10w, -2(%rdi, %rdx)
27593+ movw %r9w, -2(%rdi, %rdx)
27594 jmp 13f
27595 12:
27596 cmp $1, %rdx
27597@@ -202,14 +202,16 @@ ENTRY(memmove)
27598 movb (%rsi), %r11b
27599 movb %r11b, (%rdi)
27600 13:
27601+ pax_force_retaddr
27602 retq
27603 CFI_ENDPROC
27604
27605- .section .altinstr_replacement,"ax"
27606+ .section .altinstr_replacement,"a"
27607 .Lmemmove_begin_forward_efs:
27608 /* Forward moving data. */
27609 movq %rdx, %rcx
27610 rep movsb
27611+ pax_force_retaddr
27612 retq
27613 .Lmemmove_end_forward_efs:
27614 .previous
27615diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
27616index 2dcb380..50a78bc 100644
27617--- a/arch/x86/lib/memset_64.S
27618+++ b/arch/x86/lib/memset_64.S
27619@@ -16,7 +16,7 @@
27620 *
27621 * rax original destination
27622 */
27623- .section .altinstr_replacement, "ax", @progbits
27624+ .section .altinstr_replacement, "a", @progbits
27625 .Lmemset_c:
27626 movq %rdi,%r9
27627 movq %rdx,%rcx
27628@@ -30,6 +30,7 @@
27629 movl %edx,%ecx
27630 rep stosb
27631 movq %r9,%rax
27632+ pax_force_retaddr
27633 ret
27634 .Lmemset_e:
27635 .previous
27636@@ -45,13 +46,14 @@
27637 *
27638 * rax original destination
27639 */
27640- .section .altinstr_replacement, "ax", @progbits
27641+ .section .altinstr_replacement, "a", @progbits
27642 .Lmemset_c_e:
27643 movq %rdi,%r9
27644 movb %sil,%al
27645 movq %rdx,%rcx
27646 rep stosb
27647 movq %r9,%rax
27648+ pax_force_retaddr
27649 ret
27650 .Lmemset_e_e:
27651 .previous
27652@@ -59,7 +61,7 @@
27653 ENTRY(memset)
27654 ENTRY(__memset)
27655 CFI_STARTPROC
27656- movq %rdi,%r10
27657+ movq %rdi,%r11
27658
27659 /* expand byte value */
27660 movzbl %sil,%ecx
27661@@ -117,7 +119,8 @@ ENTRY(__memset)
27662 jnz .Lloop_1
27663
27664 .Lende:
27665- movq %r10,%rax
27666+ movq %r11,%rax
27667+ pax_force_retaddr
27668 ret
27669
27670 CFI_RESTORE_STATE
27671diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
27672index c9f2d9b..e7fd2c0 100644
27673--- a/arch/x86/lib/mmx_32.c
27674+++ b/arch/x86/lib/mmx_32.c
27675@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
27676 {
27677 void *p;
27678 int i;
27679+ unsigned long cr0;
27680
27681 if (unlikely(in_interrupt()))
27682 return __memcpy(to, from, len);
27683@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
27684 kernel_fpu_begin();
27685
27686 __asm__ __volatile__ (
27687- "1: prefetch (%0)\n" /* This set is 28 bytes */
27688- " prefetch 64(%0)\n"
27689- " prefetch 128(%0)\n"
27690- " prefetch 192(%0)\n"
27691- " prefetch 256(%0)\n"
27692+ "1: prefetch (%1)\n" /* This set is 28 bytes */
27693+ " prefetch 64(%1)\n"
27694+ " prefetch 128(%1)\n"
27695+ " prefetch 192(%1)\n"
27696+ " prefetch 256(%1)\n"
27697 "2: \n"
27698 ".section .fixup, \"ax\"\n"
27699- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27700+ "3: \n"
27701+
27702+#ifdef CONFIG_PAX_KERNEXEC
27703+ " movl %%cr0, %0\n"
27704+ " movl %0, %%eax\n"
27705+ " andl $0xFFFEFFFF, %%eax\n"
27706+ " movl %%eax, %%cr0\n"
27707+#endif
27708+
27709+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27710+
27711+#ifdef CONFIG_PAX_KERNEXEC
27712+ " movl %0, %%cr0\n"
27713+#endif
27714+
27715 " jmp 2b\n"
27716 ".previous\n"
27717 _ASM_EXTABLE(1b, 3b)
27718- : : "r" (from));
27719+ : "=&r" (cr0) : "r" (from) : "ax");
27720
27721 for ( ; i > 5; i--) {
27722 __asm__ __volatile__ (
27723- "1: prefetch 320(%0)\n"
27724- "2: movq (%0), %%mm0\n"
27725- " movq 8(%0), %%mm1\n"
27726- " movq 16(%0), %%mm2\n"
27727- " movq 24(%0), %%mm3\n"
27728- " movq %%mm0, (%1)\n"
27729- " movq %%mm1, 8(%1)\n"
27730- " movq %%mm2, 16(%1)\n"
27731- " movq %%mm3, 24(%1)\n"
27732- " movq 32(%0), %%mm0\n"
27733- " movq 40(%0), %%mm1\n"
27734- " movq 48(%0), %%mm2\n"
27735- " movq 56(%0), %%mm3\n"
27736- " movq %%mm0, 32(%1)\n"
27737- " movq %%mm1, 40(%1)\n"
27738- " movq %%mm2, 48(%1)\n"
27739- " movq %%mm3, 56(%1)\n"
27740+ "1: prefetch 320(%1)\n"
27741+ "2: movq (%1), %%mm0\n"
27742+ " movq 8(%1), %%mm1\n"
27743+ " movq 16(%1), %%mm2\n"
27744+ " movq 24(%1), %%mm3\n"
27745+ " movq %%mm0, (%2)\n"
27746+ " movq %%mm1, 8(%2)\n"
27747+ " movq %%mm2, 16(%2)\n"
27748+ " movq %%mm3, 24(%2)\n"
27749+ " movq 32(%1), %%mm0\n"
27750+ " movq 40(%1), %%mm1\n"
27751+ " movq 48(%1), %%mm2\n"
27752+ " movq 56(%1), %%mm3\n"
27753+ " movq %%mm0, 32(%2)\n"
27754+ " movq %%mm1, 40(%2)\n"
27755+ " movq %%mm2, 48(%2)\n"
27756+ " movq %%mm3, 56(%2)\n"
27757 ".section .fixup, \"ax\"\n"
27758- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27759+ "3:\n"
27760+
27761+#ifdef CONFIG_PAX_KERNEXEC
27762+ " movl %%cr0, %0\n"
27763+ " movl %0, %%eax\n"
27764+ " andl $0xFFFEFFFF, %%eax\n"
27765+ " movl %%eax, %%cr0\n"
27766+#endif
27767+
27768+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27769+
27770+#ifdef CONFIG_PAX_KERNEXEC
27771+ " movl %0, %%cr0\n"
27772+#endif
27773+
27774 " jmp 2b\n"
27775 ".previous\n"
27776 _ASM_EXTABLE(1b, 3b)
27777- : : "r" (from), "r" (to) : "memory");
27778+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
27779
27780 from += 64;
27781 to += 64;
27782@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
27783 static void fast_copy_page(void *to, void *from)
27784 {
27785 int i;
27786+ unsigned long cr0;
27787
27788 kernel_fpu_begin();
27789
27790@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
27791 * but that is for later. -AV
27792 */
27793 __asm__ __volatile__(
27794- "1: prefetch (%0)\n"
27795- " prefetch 64(%0)\n"
27796- " prefetch 128(%0)\n"
27797- " prefetch 192(%0)\n"
27798- " prefetch 256(%0)\n"
27799+ "1: prefetch (%1)\n"
27800+ " prefetch 64(%1)\n"
27801+ " prefetch 128(%1)\n"
27802+ " prefetch 192(%1)\n"
27803+ " prefetch 256(%1)\n"
27804 "2: \n"
27805 ".section .fixup, \"ax\"\n"
27806- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27807+ "3: \n"
27808+
27809+#ifdef CONFIG_PAX_KERNEXEC
27810+ " movl %%cr0, %0\n"
27811+ " movl %0, %%eax\n"
27812+ " andl $0xFFFEFFFF, %%eax\n"
27813+ " movl %%eax, %%cr0\n"
27814+#endif
27815+
27816+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27817+
27818+#ifdef CONFIG_PAX_KERNEXEC
27819+ " movl %0, %%cr0\n"
27820+#endif
27821+
27822 " jmp 2b\n"
27823 ".previous\n"
27824- _ASM_EXTABLE(1b, 3b) : : "r" (from));
27825+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
27826
27827 for (i = 0; i < (4096-320)/64; i++) {
27828 __asm__ __volatile__ (
27829- "1: prefetch 320(%0)\n"
27830- "2: movq (%0), %%mm0\n"
27831- " movntq %%mm0, (%1)\n"
27832- " movq 8(%0), %%mm1\n"
27833- " movntq %%mm1, 8(%1)\n"
27834- " movq 16(%0), %%mm2\n"
27835- " movntq %%mm2, 16(%1)\n"
27836- " movq 24(%0), %%mm3\n"
27837- " movntq %%mm3, 24(%1)\n"
27838- " movq 32(%0), %%mm4\n"
27839- " movntq %%mm4, 32(%1)\n"
27840- " movq 40(%0), %%mm5\n"
27841- " movntq %%mm5, 40(%1)\n"
27842- " movq 48(%0), %%mm6\n"
27843- " movntq %%mm6, 48(%1)\n"
27844- " movq 56(%0), %%mm7\n"
27845- " movntq %%mm7, 56(%1)\n"
27846+ "1: prefetch 320(%1)\n"
27847+ "2: movq (%1), %%mm0\n"
27848+ " movntq %%mm0, (%2)\n"
27849+ " movq 8(%1), %%mm1\n"
27850+ " movntq %%mm1, 8(%2)\n"
27851+ " movq 16(%1), %%mm2\n"
27852+ " movntq %%mm2, 16(%2)\n"
27853+ " movq 24(%1), %%mm3\n"
27854+ " movntq %%mm3, 24(%2)\n"
27855+ " movq 32(%1), %%mm4\n"
27856+ " movntq %%mm4, 32(%2)\n"
27857+ " movq 40(%1), %%mm5\n"
27858+ " movntq %%mm5, 40(%2)\n"
27859+ " movq 48(%1), %%mm6\n"
27860+ " movntq %%mm6, 48(%2)\n"
27861+ " movq 56(%1), %%mm7\n"
27862+ " movntq %%mm7, 56(%2)\n"
27863 ".section .fixup, \"ax\"\n"
27864- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27865+ "3:\n"
27866+
27867+#ifdef CONFIG_PAX_KERNEXEC
27868+ " movl %%cr0, %0\n"
27869+ " movl %0, %%eax\n"
27870+ " andl $0xFFFEFFFF, %%eax\n"
27871+ " movl %%eax, %%cr0\n"
27872+#endif
27873+
27874+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27875+
27876+#ifdef CONFIG_PAX_KERNEXEC
27877+ " movl %0, %%cr0\n"
27878+#endif
27879+
27880 " jmp 2b\n"
27881 ".previous\n"
27882- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
27883+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
27884
27885 from += 64;
27886 to += 64;
27887@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
27888 static void fast_copy_page(void *to, void *from)
27889 {
27890 int i;
27891+ unsigned long cr0;
27892
27893 kernel_fpu_begin();
27894
27895 __asm__ __volatile__ (
27896- "1: prefetch (%0)\n"
27897- " prefetch 64(%0)\n"
27898- " prefetch 128(%0)\n"
27899- " prefetch 192(%0)\n"
27900- " prefetch 256(%0)\n"
27901+ "1: prefetch (%1)\n"
27902+ " prefetch 64(%1)\n"
27903+ " prefetch 128(%1)\n"
27904+ " prefetch 192(%1)\n"
27905+ " prefetch 256(%1)\n"
27906 "2: \n"
27907 ".section .fixup, \"ax\"\n"
27908- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27909+ "3: \n"
27910+
27911+#ifdef CONFIG_PAX_KERNEXEC
27912+ " movl %%cr0, %0\n"
27913+ " movl %0, %%eax\n"
27914+ " andl $0xFFFEFFFF, %%eax\n"
27915+ " movl %%eax, %%cr0\n"
27916+#endif
27917+
27918+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
27919+
27920+#ifdef CONFIG_PAX_KERNEXEC
27921+ " movl %0, %%cr0\n"
27922+#endif
27923+
27924 " jmp 2b\n"
27925 ".previous\n"
27926- _ASM_EXTABLE(1b, 3b) : : "r" (from));
27927+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
27928
27929 for (i = 0; i < 4096/64; i++) {
27930 __asm__ __volatile__ (
27931- "1: prefetch 320(%0)\n"
27932- "2: movq (%0), %%mm0\n"
27933- " movq 8(%0), %%mm1\n"
27934- " movq 16(%0), %%mm2\n"
27935- " movq 24(%0), %%mm3\n"
27936- " movq %%mm0, (%1)\n"
27937- " movq %%mm1, 8(%1)\n"
27938- " movq %%mm2, 16(%1)\n"
27939- " movq %%mm3, 24(%1)\n"
27940- " movq 32(%0), %%mm0\n"
27941- " movq 40(%0), %%mm1\n"
27942- " movq 48(%0), %%mm2\n"
27943- " movq 56(%0), %%mm3\n"
27944- " movq %%mm0, 32(%1)\n"
27945- " movq %%mm1, 40(%1)\n"
27946- " movq %%mm2, 48(%1)\n"
27947- " movq %%mm3, 56(%1)\n"
27948+ "1: prefetch 320(%1)\n"
27949+ "2: movq (%1), %%mm0\n"
27950+ " movq 8(%1), %%mm1\n"
27951+ " movq 16(%1), %%mm2\n"
27952+ " movq 24(%1), %%mm3\n"
27953+ " movq %%mm0, (%2)\n"
27954+ " movq %%mm1, 8(%2)\n"
27955+ " movq %%mm2, 16(%2)\n"
27956+ " movq %%mm3, 24(%2)\n"
27957+ " movq 32(%1), %%mm0\n"
27958+ " movq 40(%1), %%mm1\n"
27959+ " movq 48(%1), %%mm2\n"
27960+ " movq 56(%1), %%mm3\n"
27961+ " movq %%mm0, 32(%2)\n"
27962+ " movq %%mm1, 40(%2)\n"
27963+ " movq %%mm2, 48(%2)\n"
27964+ " movq %%mm3, 56(%2)\n"
27965 ".section .fixup, \"ax\"\n"
27966- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27967+ "3:\n"
27968+
27969+#ifdef CONFIG_PAX_KERNEXEC
27970+ " movl %%cr0, %0\n"
27971+ " movl %0, %%eax\n"
27972+ " andl $0xFFFEFFFF, %%eax\n"
27973+ " movl %%eax, %%cr0\n"
27974+#endif
27975+
27976+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
27977+
27978+#ifdef CONFIG_PAX_KERNEXEC
27979+ " movl %0, %%cr0\n"
27980+#endif
27981+
27982 " jmp 2b\n"
27983 ".previous\n"
27984 _ASM_EXTABLE(1b, 3b)
27985- : : "r" (from), "r" (to) : "memory");
27986+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
27987
27988 from += 64;
27989 to += 64;
27990diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
27991index f6d13ee..aca5f0b 100644
27992--- a/arch/x86/lib/msr-reg.S
27993+++ b/arch/x86/lib/msr-reg.S
27994@@ -3,6 +3,7 @@
27995 #include <asm/dwarf2.h>
27996 #include <asm/asm.h>
27997 #include <asm/msr.h>
27998+#include <asm/alternative-asm.h>
27999
28000 #ifdef CONFIG_X86_64
28001 /*
28002@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
28003 CFI_STARTPROC
28004 pushq_cfi %rbx
28005 pushq_cfi %rbp
28006- movq %rdi, %r10 /* Save pointer */
28007+ movq %rdi, %r9 /* Save pointer */
28008 xorl %r11d, %r11d /* Return value */
28009 movl (%rdi), %eax
28010 movl 4(%rdi), %ecx
28011@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
28012 movl 28(%rdi), %edi
28013 CFI_REMEMBER_STATE
28014 1: \op
28015-2: movl %eax, (%r10)
28016+2: movl %eax, (%r9)
28017 movl %r11d, %eax /* Return value */
28018- movl %ecx, 4(%r10)
28019- movl %edx, 8(%r10)
28020- movl %ebx, 12(%r10)
28021- movl %ebp, 20(%r10)
28022- movl %esi, 24(%r10)
28023- movl %edi, 28(%r10)
28024+ movl %ecx, 4(%r9)
28025+ movl %edx, 8(%r9)
28026+ movl %ebx, 12(%r9)
28027+ movl %ebp, 20(%r9)
28028+ movl %esi, 24(%r9)
28029+ movl %edi, 28(%r9)
28030 popq_cfi %rbp
28031 popq_cfi %rbx
28032+ pax_force_retaddr
28033 ret
28034 3:
28035 CFI_RESTORE_STATE
28036diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
28037index fc6ba17..d4d989d 100644
28038--- a/arch/x86/lib/putuser.S
28039+++ b/arch/x86/lib/putuser.S
28040@@ -16,7 +16,9 @@
28041 #include <asm/errno.h>
28042 #include <asm/asm.h>
28043 #include <asm/smap.h>
28044-
28045+#include <asm/segment.h>
28046+#include <asm/pgtable.h>
28047+#include <asm/alternative-asm.h>
28048
28049 /*
28050 * __put_user_X
28051@@ -30,57 +32,125 @@
28052 * as they get called from within inline assembly.
28053 */
28054
28055-#define ENTER CFI_STARTPROC ; \
28056- GET_THREAD_INFO(%_ASM_BX)
28057-#define EXIT ASM_CLAC ; \
28058- ret ; \
28059+#define ENTER CFI_STARTPROC
28060+#define EXIT ASM_CLAC ; \
28061+ pax_force_retaddr ; \
28062+ ret ; \
28063 CFI_ENDPROC
28064
28065+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28066+#define _DEST %_ASM_CX,%_ASM_BX
28067+#else
28068+#define _DEST %_ASM_CX
28069+#endif
28070+
28071+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28072+#define __copyuser_seg gs;
28073+#else
28074+#define __copyuser_seg
28075+#endif
28076+
28077 .text
28078 ENTRY(__put_user_1)
28079 ENTER
28080+
28081+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28082+ GET_THREAD_INFO(%_ASM_BX)
28083 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
28084 jae bad_put_user
28085 ASM_STAC
28086-1: movb %al,(%_ASM_CX)
28087+
28088+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28089+ mov pax_user_shadow_base,%_ASM_BX
28090+ cmp %_ASM_BX,%_ASM_CX
28091+ jb 1234f
28092+ xor %ebx,%ebx
28093+1234:
28094+#endif
28095+
28096+#endif
28097+
28098+1: __copyuser_seg movb %al,(_DEST)
28099 xor %eax,%eax
28100 EXIT
28101 ENDPROC(__put_user_1)
28102
28103 ENTRY(__put_user_2)
28104 ENTER
28105+
28106+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28107+ GET_THREAD_INFO(%_ASM_BX)
28108 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28109 sub $1,%_ASM_BX
28110 cmp %_ASM_BX,%_ASM_CX
28111 jae bad_put_user
28112 ASM_STAC
28113-2: movw %ax,(%_ASM_CX)
28114+
28115+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28116+ mov pax_user_shadow_base,%_ASM_BX
28117+ cmp %_ASM_BX,%_ASM_CX
28118+ jb 1234f
28119+ xor %ebx,%ebx
28120+1234:
28121+#endif
28122+
28123+#endif
28124+
28125+2: __copyuser_seg movw %ax,(_DEST)
28126 xor %eax,%eax
28127 EXIT
28128 ENDPROC(__put_user_2)
28129
28130 ENTRY(__put_user_4)
28131 ENTER
28132+
28133+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28134+ GET_THREAD_INFO(%_ASM_BX)
28135 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28136 sub $3,%_ASM_BX
28137 cmp %_ASM_BX,%_ASM_CX
28138 jae bad_put_user
28139 ASM_STAC
28140-3: movl %eax,(%_ASM_CX)
28141+
28142+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28143+ mov pax_user_shadow_base,%_ASM_BX
28144+ cmp %_ASM_BX,%_ASM_CX
28145+ jb 1234f
28146+ xor %ebx,%ebx
28147+1234:
28148+#endif
28149+
28150+#endif
28151+
28152+3: __copyuser_seg movl %eax,(_DEST)
28153 xor %eax,%eax
28154 EXIT
28155 ENDPROC(__put_user_4)
28156
28157 ENTRY(__put_user_8)
28158 ENTER
28159+
28160+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28161+ GET_THREAD_INFO(%_ASM_BX)
28162 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
28163 sub $7,%_ASM_BX
28164 cmp %_ASM_BX,%_ASM_CX
28165 jae bad_put_user
28166 ASM_STAC
28167-4: mov %_ASM_AX,(%_ASM_CX)
28168+
28169+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28170+ mov pax_user_shadow_base,%_ASM_BX
28171+ cmp %_ASM_BX,%_ASM_CX
28172+ jb 1234f
28173+ xor %ebx,%ebx
28174+1234:
28175+#endif
28176+
28177+#endif
28178+
28179+4: __copyuser_seg mov %_ASM_AX,(_DEST)
28180 #ifdef CONFIG_X86_32
28181-5: movl %edx,4(%_ASM_CX)
28182+5: __copyuser_seg movl %edx,4(_DEST)
28183 #endif
28184 xor %eax,%eax
28185 EXIT
28186diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
28187index 1cad221..de671ee 100644
28188--- a/arch/x86/lib/rwlock.S
28189+++ b/arch/x86/lib/rwlock.S
28190@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
28191 FRAME
28192 0: LOCK_PREFIX
28193 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
28194+
28195+#ifdef CONFIG_PAX_REFCOUNT
28196+ jno 1234f
28197+ LOCK_PREFIX
28198+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
28199+ int $4
28200+1234:
28201+ _ASM_EXTABLE(1234b, 1234b)
28202+#endif
28203+
28204 1: rep; nop
28205 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
28206 jne 1b
28207 LOCK_PREFIX
28208 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
28209+
28210+#ifdef CONFIG_PAX_REFCOUNT
28211+ jno 1234f
28212+ LOCK_PREFIX
28213+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
28214+ int $4
28215+1234:
28216+ _ASM_EXTABLE(1234b, 1234b)
28217+#endif
28218+
28219 jnz 0b
28220 ENDFRAME
28221+ pax_force_retaddr
28222 ret
28223 CFI_ENDPROC
28224 END(__write_lock_failed)
28225@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
28226 FRAME
28227 0: LOCK_PREFIX
28228 READ_LOCK_SIZE(inc) (%__lock_ptr)
28229+
28230+#ifdef CONFIG_PAX_REFCOUNT
28231+ jno 1234f
28232+ LOCK_PREFIX
28233+ READ_LOCK_SIZE(dec) (%__lock_ptr)
28234+ int $4
28235+1234:
28236+ _ASM_EXTABLE(1234b, 1234b)
28237+#endif
28238+
28239 1: rep; nop
28240 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
28241 js 1b
28242 LOCK_PREFIX
28243 READ_LOCK_SIZE(dec) (%__lock_ptr)
28244+
28245+#ifdef CONFIG_PAX_REFCOUNT
28246+ jno 1234f
28247+ LOCK_PREFIX
28248+ READ_LOCK_SIZE(inc) (%__lock_ptr)
28249+ int $4
28250+1234:
28251+ _ASM_EXTABLE(1234b, 1234b)
28252+#endif
28253+
28254 js 0b
28255 ENDFRAME
28256+ pax_force_retaddr
28257 ret
28258 CFI_ENDPROC
28259 END(__read_lock_failed)
28260diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
28261index 5dff5f0..cadebf4 100644
28262--- a/arch/x86/lib/rwsem.S
28263+++ b/arch/x86/lib/rwsem.S
28264@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
28265 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
28266 CFI_RESTORE __ASM_REG(dx)
28267 restore_common_regs
28268+ pax_force_retaddr
28269 ret
28270 CFI_ENDPROC
28271 ENDPROC(call_rwsem_down_read_failed)
28272@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
28273 movq %rax,%rdi
28274 call rwsem_down_write_failed
28275 restore_common_regs
28276+ pax_force_retaddr
28277 ret
28278 CFI_ENDPROC
28279 ENDPROC(call_rwsem_down_write_failed)
28280@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
28281 movq %rax,%rdi
28282 call rwsem_wake
28283 restore_common_regs
28284-1: ret
28285+1: pax_force_retaddr
28286+ ret
28287 CFI_ENDPROC
28288 ENDPROC(call_rwsem_wake)
28289
28290@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
28291 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
28292 CFI_RESTORE __ASM_REG(dx)
28293 restore_common_regs
28294+ pax_force_retaddr
28295 ret
28296 CFI_ENDPROC
28297 ENDPROC(call_rwsem_downgrade_wake)
28298diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
28299index a63efd6..ccecad8 100644
28300--- a/arch/x86/lib/thunk_64.S
28301+++ b/arch/x86/lib/thunk_64.S
28302@@ -8,6 +8,7 @@
28303 #include <linux/linkage.h>
28304 #include <asm/dwarf2.h>
28305 #include <asm/calling.h>
28306+#include <asm/alternative-asm.h>
28307
28308 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
28309 .macro THUNK name, func, put_ret_addr_in_rdi=0
28310@@ -41,5 +42,6 @@
28311 SAVE_ARGS
28312 restore:
28313 RESTORE_ARGS
28314+ pax_force_retaddr
28315 ret
28316 CFI_ENDPROC
28317diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
28318index 3eb18ac..6890bc3 100644
28319--- a/arch/x86/lib/usercopy_32.c
28320+++ b/arch/x86/lib/usercopy_32.c
28321@@ -42,11 +42,13 @@ do { \
28322 int __d0; \
28323 might_fault(); \
28324 __asm__ __volatile__( \
28325+ __COPYUSER_SET_ES \
28326 ASM_STAC "\n" \
28327 "0: rep; stosl\n" \
28328 " movl %2,%0\n" \
28329 "1: rep; stosb\n" \
28330 "2: " ASM_CLAC "\n" \
28331+ __COPYUSER_RESTORE_ES \
28332 ".section .fixup,\"ax\"\n" \
28333 "3: lea 0(%2,%0,4),%0\n" \
28334 " jmp 2b\n" \
28335@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
28336
28337 #ifdef CONFIG_X86_INTEL_USERCOPY
28338 static unsigned long
28339-__copy_user_intel(void __user *to, const void *from, unsigned long size)
28340+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
28341 {
28342 int d0, d1;
28343 __asm__ __volatile__(
28344@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
28345 " .align 2,0x90\n"
28346 "3: movl 0(%4), %%eax\n"
28347 "4: movl 4(%4), %%edx\n"
28348- "5: movl %%eax, 0(%3)\n"
28349- "6: movl %%edx, 4(%3)\n"
28350+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
28351+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
28352 "7: movl 8(%4), %%eax\n"
28353 "8: movl 12(%4),%%edx\n"
28354- "9: movl %%eax, 8(%3)\n"
28355- "10: movl %%edx, 12(%3)\n"
28356+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
28357+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
28358 "11: movl 16(%4), %%eax\n"
28359 "12: movl 20(%4), %%edx\n"
28360- "13: movl %%eax, 16(%3)\n"
28361- "14: movl %%edx, 20(%3)\n"
28362+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
28363+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
28364 "15: movl 24(%4), %%eax\n"
28365 "16: movl 28(%4), %%edx\n"
28366- "17: movl %%eax, 24(%3)\n"
28367- "18: movl %%edx, 28(%3)\n"
28368+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
28369+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
28370 "19: movl 32(%4), %%eax\n"
28371 "20: movl 36(%4), %%edx\n"
28372- "21: movl %%eax, 32(%3)\n"
28373- "22: movl %%edx, 36(%3)\n"
28374+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
28375+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
28376 "23: movl 40(%4), %%eax\n"
28377 "24: movl 44(%4), %%edx\n"
28378- "25: movl %%eax, 40(%3)\n"
28379- "26: movl %%edx, 44(%3)\n"
28380+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
28381+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
28382 "27: movl 48(%4), %%eax\n"
28383 "28: movl 52(%4), %%edx\n"
28384- "29: movl %%eax, 48(%3)\n"
28385- "30: movl %%edx, 52(%3)\n"
28386+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
28387+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
28388 "31: movl 56(%4), %%eax\n"
28389 "32: movl 60(%4), %%edx\n"
28390- "33: movl %%eax, 56(%3)\n"
28391- "34: movl %%edx, 60(%3)\n"
28392+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
28393+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
28394 " addl $-64, %0\n"
28395 " addl $64, %4\n"
28396 " addl $64, %3\n"
28397@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
28398 " shrl $2, %0\n"
28399 " andl $3, %%eax\n"
28400 " cld\n"
28401+ __COPYUSER_SET_ES
28402 "99: rep; movsl\n"
28403 "36: movl %%eax, %0\n"
28404 "37: rep; movsb\n"
28405 "100:\n"
28406+ __COPYUSER_RESTORE_ES
28407 ".section .fixup,\"ax\"\n"
28408 "101: lea 0(%%eax,%0,4),%0\n"
28409 " jmp 100b\n"
28410@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
28411 }
28412
28413 static unsigned long
28414+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
28415+{
28416+ int d0, d1;
28417+ __asm__ __volatile__(
28418+ " .align 2,0x90\n"
28419+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
28420+ " cmpl $67, %0\n"
28421+ " jbe 3f\n"
28422+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
28423+ " .align 2,0x90\n"
28424+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
28425+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
28426+ "5: movl %%eax, 0(%3)\n"
28427+ "6: movl %%edx, 4(%3)\n"
28428+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
28429+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
28430+ "9: movl %%eax, 8(%3)\n"
28431+ "10: movl %%edx, 12(%3)\n"
28432+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
28433+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
28434+ "13: movl %%eax, 16(%3)\n"
28435+ "14: movl %%edx, 20(%3)\n"
28436+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
28437+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
28438+ "17: movl %%eax, 24(%3)\n"
28439+ "18: movl %%edx, 28(%3)\n"
28440+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
28441+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
28442+ "21: movl %%eax, 32(%3)\n"
28443+ "22: movl %%edx, 36(%3)\n"
28444+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
28445+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
28446+ "25: movl %%eax, 40(%3)\n"
28447+ "26: movl %%edx, 44(%3)\n"
28448+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
28449+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
28450+ "29: movl %%eax, 48(%3)\n"
28451+ "30: movl %%edx, 52(%3)\n"
28452+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
28453+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
28454+ "33: movl %%eax, 56(%3)\n"
28455+ "34: movl %%edx, 60(%3)\n"
28456+ " addl $-64, %0\n"
28457+ " addl $64, %4\n"
28458+ " addl $64, %3\n"
28459+ " cmpl $63, %0\n"
28460+ " ja 1b\n"
28461+ "35: movl %0, %%eax\n"
28462+ " shrl $2, %0\n"
28463+ " andl $3, %%eax\n"
28464+ " cld\n"
28465+ "99: rep; "__copyuser_seg" movsl\n"
28466+ "36: movl %%eax, %0\n"
28467+ "37: rep; "__copyuser_seg" movsb\n"
28468+ "100:\n"
28469+ ".section .fixup,\"ax\"\n"
28470+ "101: lea 0(%%eax,%0,4),%0\n"
28471+ " jmp 100b\n"
28472+ ".previous\n"
28473+ _ASM_EXTABLE(1b,100b)
28474+ _ASM_EXTABLE(2b,100b)
28475+ _ASM_EXTABLE(3b,100b)
28476+ _ASM_EXTABLE(4b,100b)
28477+ _ASM_EXTABLE(5b,100b)
28478+ _ASM_EXTABLE(6b,100b)
28479+ _ASM_EXTABLE(7b,100b)
28480+ _ASM_EXTABLE(8b,100b)
28481+ _ASM_EXTABLE(9b,100b)
28482+ _ASM_EXTABLE(10b,100b)
28483+ _ASM_EXTABLE(11b,100b)
28484+ _ASM_EXTABLE(12b,100b)
28485+ _ASM_EXTABLE(13b,100b)
28486+ _ASM_EXTABLE(14b,100b)
28487+ _ASM_EXTABLE(15b,100b)
28488+ _ASM_EXTABLE(16b,100b)
28489+ _ASM_EXTABLE(17b,100b)
28490+ _ASM_EXTABLE(18b,100b)
28491+ _ASM_EXTABLE(19b,100b)
28492+ _ASM_EXTABLE(20b,100b)
28493+ _ASM_EXTABLE(21b,100b)
28494+ _ASM_EXTABLE(22b,100b)
28495+ _ASM_EXTABLE(23b,100b)
28496+ _ASM_EXTABLE(24b,100b)
28497+ _ASM_EXTABLE(25b,100b)
28498+ _ASM_EXTABLE(26b,100b)
28499+ _ASM_EXTABLE(27b,100b)
28500+ _ASM_EXTABLE(28b,100b)
28501+ _ASM_EXTABLE(29b,100b)
28502+ _ASM_EXTABLE(30b,100b)
28503+ _ASM_EXTABLE(31b,100b)
28504+ _ASM_EXTABLE(32b,100b)
28505+ _ASM_EXTABLE(33b,100b)
28506+ _ASM_EXTABLE(34b,100b)
28507+ _ASM_EXTABLE(35b,100b)
28508+ _ASM_EXTABLE(36b,100b)
28509+ _ASM_EXTABLE(37b,100b)
28510+ _ASM_EXTABLE(99b,101b)
28511+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
28512+ : "1"(to), "2"(from), "0"(size)
28513+ : "eax", "edx", "memory");
28514+ return size;
28515+}
28516+
28517+static unsigned long __size_overflow(3)
28518 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
28519 {
28520 int d0, d1;
28521 __asm__ __volatile__(
28522 " .align 2,0x90\n"
28523- "0: movl 32(%4), %%eax\n"
28524+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
28525 " cmpl $67, %0\n"
28526 " jbe 2f\n"
28527- "1: movl 64(%4), %%eax\n"
28528+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
28529 " .align 2,0x90\n"
28530- "2: movl 0(%4), %%eax\n"
28531- "21: movl 4(%4), %%edx\n"
28532+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
28533+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
28534 " movl %%eax, 0(%3)\n"
28535 " movl %%edx, 4(%3)\n"
28536- "3: movl 8(%4), %%eax\n"
28537- "31: movl 12(%4),%%edx\n"
28538+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
28539+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
28540 " movl %%eax, 8(%3)\n"
28541 " movl %%edx, 12(%3)\n"
28542- "4: movl 16(%4), %%eax\n"
28543- "41: movl 20(%4), %%edx\n"
28544+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
28545+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
28546 " movl %%eax, 16(%3)\n"
28547 " movl %%edx, 20(%3)\n"
28548- "10: movl 24(%4), %%eax\n"
28549- "51: movl 28(%4), %%edx\n"
28550+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
28551+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
28552 " movl %%eax, 24(%3)\n"
28553 " movl %%edx, 28(%3)\n"
28554- "11: movl 32(%4), %%eax\n"
28555- "61: movl 36(%4), %%edx\n"
28556+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
28557+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
28558 " movl %%eax, 32(%3)\n"
28559 " movl %%edx, 36(%3)\n"
28560- "12: movl 40(%4), %%eax\n"
28561- "71: movl 44(%4), %%edx\n"
28562+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
28563+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
28564 " movl %%eax, 40(%3)\n"
28565 " movl %%edx, 44(%3)\n"
28566- "13: movl 48(%4), %%eax\n"
28567- "81: movl 52(%4), %%edx\n"
28568+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
28569+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
28570 " movl %%eax, 48(%3)\n"
28571 " movl %%edx, 52(%3)\n"
28572- "14: movl 56(%4), %%eax\n"
28573- "91: movl 60(%4), %%edx\n"
28574+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
28575+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
28576 " movl %%eax, 56(%3)\n"
28577 " movl %%edx, 60(%3)\n"
28578 " addl $-64, %0\n"
28579@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
28580 " shrl $2, %0\n"
28581 " andl $3, %%eax\n"
28582 " cld\n"
28583- "6: rep; movsl\n"
28584+ "6: rep; "__copyuser_seg" movsl\n"
28585 " movl %%eax,%0\n"
28586- "7: rep; movsb\n"
28587+ "7: rep; "__copyuser_seg" movsb\n"
28588 "8:\n"
28589 ".section .fixup,\"ax\"\n"
28590 "9: lea 0(%%eax,%0,4),%0\n"
28591@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
28592 * hyoshiok@miraclelinux.com
28593 */
28594
28595-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
28596+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
28597 const void __user *from, unsigned long size)
28598 {
28599 int d0, d1;
28600
28601 __asm__ __volatile__(
28602 " .align 2,0x90\n"
28603- "0: movl 32(%4), %%eax\n"
28604+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
28605 " cmpl $67, %0\n"
28606 " jbe 2f\n"
28607- "1: movl 64(%4), %%eax\n"
28608+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
28609 " .align 2,0x90\n"
28610- "2: movl 0(%4), %%eax\n"
28611- "21: movl 4(%4), %%edx\n"
28612+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
28613+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
28614 " movnti %%eax, 0(%3)\n"
28615 " movnti %%edx, 4(%3)\n"
28616- "3: movl 8(%4), %%eax\n"
28617- "31: movl 12(%4),%%edx\n"
28618+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
28619+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
28620 " movnti %%eax, 8(%3)\n"
28621 " movnti %%edx, 12(%3)\n"
28622- "4: movl 16(%4), %%eax\n"
28623- "41: movl 20(%4), %%edx\n"
28624+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
28625+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
28626 " movnti %%eax, 16(%3)\n"
28627 " movnti %%edx, 20(%3)\n"
28628- "10: movl 24(%4), %%eax\n"
28629- "51: movl 28(%4), %%edx\n"
28630+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
28631+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
28632 " movnti %%eax, 24(%3)\n"
28633 " movnti %%edx, 28(%3)\n"
28634- "11: movl 32(%4), %%eax\n"
28635- "61: movl 36(%4), %%edx\n"
28636+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
28637+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
28638 " movnti %%eax, 32(%3)\n"
28639 " movnti %%edx, 36(%3)\n"
28640- "12: movl 40(%4), %%eax\n"
28641- "71: movl 44(%4), %%edx\n"
28642+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
28643+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
28644 " movnti %%eax, 40(%3)\n"
28645 " movnti %%edx, 44(%3)\n"
28646- "13: movl 48(%4), %%eax\n"
28647- "81: movl 52(%4), %%edx\n"
28648+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
28649+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
28650 " movnti %%eax, 48(%3)\n"
28651 " movnti %%edx, 52(%3)\n"
28652- "14: movl 56(%4), %%eax\n"
28653- "91: movl 60(%4), %%edx\n"
28654+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
28655+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
28656 " movnti %%eax, 56(%3)\n"
28657 " movnti %%edx, 60(%3)\n"
28658 " addl $-64, %0\n"
28659@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
28660 " shrl $2, %0\n"
28661 " andl $3, %%eax\n"
28662 " cld\n"
28663- "6: rep; movsl\n"
28664+ "6: rep; "__copyuser_seg" movsl\n"
28665 " movl %%eax,%0\n"
28666- "7: rep; movsb\n"
28667+ "7: rep; "__copyuser_seg" movsb\n"
28668 "8:\n"
28669 ".section .fixup,\"ax\"\n"
28670 "9: lea 0(%%eax,%0,4),%0\n"
28671@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
28672 return size;
28673 }
28674
28675-static unsigned long __copy_user_intel_nocache(void *to,
28676+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
28677 const void __user *from, unsigned long size)
28678 {
28679 int d0, d1;
28680
28681 __asm__ __volatile__(
28682 " .align 2,0x90\n"
28683- "0: movl 32(%4), %%eax\n"
28684+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
28685 " cmpl $67, %0\n"
28686 " jbe 2f\n"
28687- "1: movl 64(%4), %%eax\n"
28688+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
28689 " .align 2,0x90\n"
28690- "2: movl 0(%4), %%eax\n"
28691- "21: movl 4(%4), %%edx\n"
28692+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
28693+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
28694 " movnti %%eax, 0(%3)\n"
28695 " movnti %%edx, 4(%3)\n"
28696- "3: movl 8(%4), %%eax\n"
28697- "31: movl 12(%4),%%edx\n"
28698+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
28699+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
28700 " movnti %%eax, 8(%3)\n"
28701 " movnti %%edx, 12(%3)\n"
28702- "4: movl 16(%4), %%eax\n"
28703- "41: movl 20(%4), %%edx\n"
28704+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
28705+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
28706 " movnti %%eax, 16(%3)\n"
28707 " movnti %%edx, 20(%3)\n"
28708- "10: movl 24(%4), %%eax\n"
28709- "51: movl 28(%4), %%edx\n"
28710+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
28711+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
28712 " movnti %%eax, 24(%3)\n"
28713 " movnti %%edx, 28(%3)\n"
28714- "11: movl 32(%4), %%eax\n"
28715- "61: movl 36(%4), %%edx\n"
28716+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
28717+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
28718 " movnti %%eax, 32(%3)\n"
28719 " movnti %%edx, 36(%3)\n"
28720- "12: movl 40(%4), %%eax\n"
28721- "71: movl 44(%4), %%edx\n"
28722+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
28723+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
28724 " movnti %%eax, 40(%3)\n"
28725 " movnti %%edx, 44(%3)\n"
28726- "13: movl 48(%4), %%eax\n"
28727- "81: movl 52(%4), %%edx\n"
28728+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
28729+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
28730 " movnti %%eax, 48(%3)\n"
28731 " movnti %%edx, 52(%3)\n"
28732- "14: movl 56(%4), %%eax\n"
28733- "91: movl 60(%4), %%edx\n"
28734+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
28735+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
28736 " movnti %%eax, 56(%3)\n"
28737 " movnti %%edx, 60(%3)\n"
28738 " addl $-64, %0\n"
28739@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
28740 " shrl $2, %0\n"
28741 " andl $3, %%eax\n"
28742 " cld\n"
28743- "6: rep; movsl\n"
28744+ "6: rep; "__copyuser_seg" movsl\n"
28745 " movl %%eax,%0\n"
28746- "7: rep; movsb\n"
28747+ "7: rep; "__copyuser_seg" movsb\n"
28748 "8:\n"
28749 ".section .fixup,\"ax\"\n"
28750 "9: lea 0(%%eax,%0,4),%0\n"
28751@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
28752 */
28753 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
28754 unsigned long size);
28755-unsigned long __copy_user_intel(void __user *to, const void *from,
28756+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
28757+ unsigned long size);
28758+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
28759 unsigned long size);
28760 unsigned long __copy_user_zeroing_intel_nocache(void *to,
28761 const void __user *from, unsigned long size);
28762 #endif /* CONFIG_X86_INTEL_USERCOPY */
28763
28764 /* Generic arbitrary sized copy. */
28765-#define __copy_user(to, from, size) \
28766+#define __copy_user(to, from, size, prefix, set, restore) \
28767 do { \
28768 int __d0, __d1, __d2; \
28769 __asm__ __volatile__( \
28770+ set \
28771 " cmp $7,%0\n" \
28772 " jbe 1f\n" \
28773 " movl %1,%0\n" \
28774 " negl %0\n" \
28775 " andl $7,%0\n" \
28776 " subl %0,%3\n" \
28777- "4: rep; movsb\n" \
28778+ "4: rep; "prefix"movsb\n" \
28779 " movl %3,%0\n" \
28780 " shrl $2,%0\n" \
28781 " andl $3,%3\n" \
28782 " .align 2,0x90\n" \
28783- "0: rep; movsl\n" \
28784+ "0: rep; "prefix"movsl\n" \
28785 " movl %3,%0\n" \
28786- "1: rep; movsb\n" \
28787+ "1: rep; "prefix"movsb\n" \
28788 "2:\n" \
28789+ restore \
28790 ".section .fixup,\"ax\"\n" \
28791 "5: addl %3,%0\n" \
28792 " jmp 2b\n" \
28793@@ -538,14 +650,14 @@ do { \
28794 " negl %0\n" \
28795 " andl $7,%0\n" \
28796 " subl %0,%3\n" \
28797- "4: rep; movsb\n" \
28798+ "4: rep; "__copyuser_seg"movsb\n" \
28799 " movl %3,%0\n" \
28800 " shrl $2,%0\n" \
28801 " andl $3,%3\n" \
28802 " .align 2,0x90\n" \
28803- "0: rep; movsl\n" \
28804+ "0: rep; "__copyuser_seg"movsl\n" \
28805 " movl %3,%0\n" \
28806- "1: rep; movsb\n" \
28807+ "1: rep; "__copyuser_seg"movsb\n" \
28808 "2:\n" \
28809 ".section .fixup,\"ax\"\n" \
28810 "5: addl %3,%0\n" \
28811@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
28812 {
28813 stac();
28814 if (movsl_is_ok(to, from, n))
28815- __copy_user(to, from, n);
28816+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
28817 else
28818- n = __copy_user_intel(to, from, n);
28819+ n = __generic_copy_to_user_intel(to, from, n);
28820 clac();
28821 return n;
28822 }
28823@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
28824 {
28825 stac();
28826 if (movsl_is_ok(to, from, n))
28827- __copy_user(to, from, n);
28828+ __copy_user(to, from, n, __copyuser_seg, "", "");
28829 else
28830- n = __copy_user_intel((void __user *)to,
28831- (const void *)from, n);
28832+ n = __generic_copy_from_user_intel(to, from, n);
28833 clac();
28834 return n;
28835 }
28836@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
28837 if (n > 64 && cpu_has_xmm2)
28838 n = __copy_user_intel_nocache(to, from, n);
28839 else
28840- __copy_user(to, from, n);
28841+ __copy_user(to, from, n, __copyuser_seg, "", "");
28842 #else
28843- __copy_user(to, from, n);
28844+ __copy_user(to, from, n, __copyuser_seg, "", "");
28845 #endif
28846 clac();
28847 return n;
28848 }
28849 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
28850
28851-/**
28852- * copy_to_user: - Copy a block of data into user space.
28853- * @to: Destination address, in user space.
28854- * @from: Source address, in kernel space.
28855- * @n: Number of bytes to copy.
28856- *
28857- * Context: User context only. This function may sleep.
28858- *
28859- * Copy data from kernel space to user space.
28860- *
28861- * Returns number of bytes that could not be copied.
28862- * On success, this will be zero.
28863- */
28864-unsigned long
28865-copy_to_user(void __user *to, const void *from, unsigned long n)
28866+#ifdef CONFIG_PAX_MEMORY_UDEREF
28867+void __set_fs(mm_segment_t x)
28868 {
28869- if (access_ok(VERIFY_WRITE, to, n))
28870- n = __copy_to_user(to, from, n);
28871- return n;
28872+ switch (x.seg) {
28873+ case 0:
28874+ loadsegment(gs, 0);
28875+ break;
28876+ case TASK_SIZE_MAX:
28877+ loadsegment(gs, __USER_DS);
28878+ break;
28879+ case -1UL:
28880+ loadsegment(gs, __KERNEL_DS);
28881+ break;
28882+ default:
28883+ BUG();
28884+ }
28885 }
28886-EXPORT_SYMBOL(copy_to_user);
28887+EXPORT_SYMBOL(__set_fs);
28888
28889-/**
28890- * copy_from_user: - Copy a block of data from user space.
28891- * @to: Destination address, in kernel space.
28892- * @from: Source address, in user space.
28893- * @n: Number of bytes to copy.
28894- *
28895- * Context: User context only. This function may sleep.
28896- *
28897- * Copy data from user space to kernel space.
28898- *
28899- * Returns number of bytes that could not be copied.
28900- * On success, this will be zero.
28901- *
28902- * If some data could not be copied, this function will pad the copied
28903- * data to the requested size using zero bytes.
28904- */
28905-unsigned long
28906-_copy_from_user(void *to, const void __user *from, unsigned long n)
28907+void set_fs(mm_segment_t x)
28908 {
28909- if (access_ok(VERIFY_READ, from, n))
28910- n = __copy_from_user(to, from, n);
28911- else
28912- memset(to, 0, n);
28913- return n;
28914+ current_thread_info()->addr_limit = x;
28915+ __set_fs(x);
28916 }
28917-EXPORT_SYMBOL(_copy_from_user);
28918+EXPORT_SYMBOL(set_fs);
28919+#endif
28920diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
28921index 906fea3..0194a18 100644
28922--- a/arch/x86/lib/usercopy_64.c
28923+++ b/arch/x86/lib/usercopy_64.c
28924@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
28925 might_fault();
28926 /* no memory constraint because it doesn't change any memory gcc knows
28927 about */
28928+ pax_open_userland();
28929 stac();
28930 asm volatile(
28931 " testq %[size8],%[size8]\n"
28932@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
28933 _ASM_EXTABLE(0b,3b)
28934 _ASM_EXTABLE(1b,2b)
28935 : [size8] "=&c"(size), [dst] "=&D" (__d0)
28936- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
28937+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
28938 [zero] "r" (0UL), [eight] "r" (8UL));
28939 clac();
28940+ pax_close_userland();
28941 return size;
28942 }
28943 EXPORT_SYMBOL(__clear_user);
28944@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
28945 }
28946 EXPORT_SYMBOL(clear_user);
28947
28948-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
28949+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
28950 {
28951- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
28952- return copy_user_generic((__force void *)to, (__force void *)from, len);
28953- }
28954- return len;
28955+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
28956+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
28957+ return len;
28958 }
28959 EXPORT_SYMBOL(copy_in_user);
28960
28961@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
28962 * it is not necessary to optimize tail handling.
28963 */
28964 unsigned long
28965-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
28966+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
28967 {
28968 char c;
28969 unsigned zero_len;
28970
28971+ clac();
28972+ pax_close_userland();
28973 for (; len; --len, to++) {
28974 if (__get_user_nocheck(c, from++, sizeof(char)))
28975 break;
28976@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
28977 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
28978 if (__put_user_nocheck(c, to++, sizeof(char)))
28979 break;
28980- clac();
28981 return len;
28982 }
28983diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
28984index 23d8e5f..9ccc13a 100644
28985--- a/arch/x86/mm/Makefile
28986+++ b/arch/x86/mm/Makefile
28987@@ -28,3 +28,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
28988 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
28989
28990 obj-$(CONFIG_MEMTEST) += memtest.o
28991+
28992+quote:="
28993+obj-$(CONFIG_X86_64) += uderef_64.o
28994+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
28995diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
28996index 903ec1e..c4166b2 100644
28997--- a/arch/x86/mm/extable.c
28998+++ b/arch/x86/mm/extable.c
28999@@ -6,12 +6,24 @@
29000 static inline unsigned long
29001 ex_insn_addr(const struct exception_table_entry *x)
29002 {
29003- return (unsigned long)&x->insn + x->insn;
29004+ unsigned long reloc = 0;
29005+
29006+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29007+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29008+#endif
29009+
29010+ return (unsigned long)&x->insn + x->insn + reloc;
29011 }
29012 static inline unsigned long
29013 ex_fixup_addr(const struct exception_table_entry *x)
29014 {
29015- return (unsigned long)&x->fixup + x->fixup;
29016+ unsigned long reloc = 0;
29017+
29018+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29019+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29020+#endif
29021+
29022+ return (unsigned long)&x->fixup + x->fixup + reloc;
29023 }
29024
29025 int fixup_exception(struct pt_regs *regs)
29026@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
29027 unsigned long new_ip;
29028
29029 #ifdef CONFIG_PNPBIOS
29030- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
29031+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
29032 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
29033 extern u32 pnp_bios_is_utter_crap;
29034 pnp_bios_is_utter_crap = 1;
29035@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
29036 i += 4;
29037 p->fixup -= i;
29038 i += 4;
29039+
29040+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29041+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
29042+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29043+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
29044+#endif
29045+
29046 }
29047 }
29048
29049diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
29050index 654be4a..a4a3da1 100644
29051--- a/arch/x86/mm/fault.c
29052+++ b/arch/x86/mm/fault.c
29053@@ -14,11 +14,18 @@
29054 #include <linux/hugetlb.h> /* hstate_index_to_shift */
29055 #include <linux/prefetch.h> /* prefetchw */
29056 #include <linux/context_tracking.h> /* exception_enter(), ... */
29057+#include <linux/unistd.h>
29058+#include <linux/compiler.h>
29059
29060 #include <asm/traps.h> /* dotraplinkage, ... */
29061 #include <asm/pgalloc.h> /* pgd_*(), ... */
29062 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
29063 #include <asm/fixmap.h> /* VSYSCALL_START */
29064+#include <asm/tlbflush.h>
29065+
29066+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29067+#include <asm/stacktrace.h>
29068+#endif
29069
29070 /*
29071 * Page fault error code bits:
29072@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
29073 int ret = 0;
29074
29075 /* kprobe_running() needs smp_processor_id() */
29076- if (kprobes_built_in() && !user_mode_vm(regs)) {
29077+ if (kprobes_built_in() && !user_mode(regs)) {
29078 preempt_disable();
29079 if (kprobe_running() && kprobe_fault_handler(regs, 14))
29080 ret = 1;
29081@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
29082 return !instr_lo || (instr_lo>>1) == 1;
29083 case 0x00:
29084 /* Prefetch instruction is 0x0F0D or 0x0F18 */
29085- if (probe_kernel_address(instr, opcode))
29086+ if (user_mode(regs)) {
29087+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29088+ return 0;
29089+ } else if (probe_kernel_address(instr, opcode))
29090 return 0;
29091
29092 *prefetch = (instr_lo == 0xF) &&
29093@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
29094 while (instr < max_instr) {
29095 unsigned char opcode;
29096
29097- if (probe_kernel_address(instr, opcode))
29098+ if (user_mode(regs)) {
29099+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
29100+ break;
29101+ } else if (probe_kernel_address(instr, opcode))
29102 break;
29103
29104 instr++;
29105@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
29106 force_sig_info(si_signo, &info, tsk);
29107 }
29108
29109+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29110+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
29111+#endif
29112+
29113+#ifdef CONFIG_PAX_EMUTRAMP
29114+static int pax_handle_fetch_fault(struct pt_regs *regs);
29115+#endif
29116+
29117+#ifdef CONFIG_PAX_PAGEEXEC
29118+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
29119+{
29120+ pgd_t *pgd;
29121+ pud_t *pud;
29122+ pmd_t *pmd;
29123+
29124+ pgd = pgd_offset(mm, address);
29125+ if (!pgd_present(*pgd))
29126+ return NULL;
29127+ pud = pud_offset(pgd, address);
29128+ if (!pud_present(*pud))
29129+ return NULL;
29130+ pmd = pmd_offset(pud, address);
29131+ if (!pmd_present(*pmd))
29132+ return NULL;
29133+ return pmd;
29134+}
29135+#endif
29136+
29137 DEFINE_SPINLOCK(pgd_lock);
29138 LIST_HEAD(pgd_list);
29139
29140@@ -232,10 +273,27 @@ void vmalloc_sync_all(void)
29141 for (address = VMALLOC_START & PMD_MASK;
29142 address >= TASK_SIZE && address < FIXADDR_TOP;
29143 address += PMD_SIZE) {
29144+
29145+#ifdef CONFIG_PAX_PER_CPU_PGD
29146+ unsigned long cpu;
29147+#else
29148 struct page *page;
29149+#endif
29150
29151 spin_lock(&pgd_lock);
29152+
29153+#ifdef CONFIG_PAX_PER_CPU_PGD
29154+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29155+ pgd_t *pgd = get_cpu_pgd(cpu, user);
29156+ pmd_t *ret;
29157+
29158+ ret = vmalloc_sync_one(pgd, address);
29159+ if (!ret)
29160+ break;
29161+ pgd = get_cpu_pgd(cpu, kernel);
29162+#else
29163 list_for_each_entry(page, &pgd_list, lru) {
29164+ pgd_t *pgd;
29165 spinlock_t *pgt_lock;
29166 pmd_t *ret;
29167
29168@@ -243,8 +301,14 @@ void vmalloc_sync_all(void)
29169 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
29170
29171 spin_lock(pgt_lock);
29172- ret = vmalloc_sync_one(page_address(page), address);
29173+ pgd = page_address(page);
29174+#endif
29175+
29176+ ret = vmalloc_sync_one(pgd, address);
29177+
29178+#ifndef CONFIG_PAX_PER_CPU_PGD
29179 spin_unlock(pgt_lock);
29180+#endif
29181
29182 if (!ret)
29183 break;
29184@@ -278,6 +342,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
29185 * an interrupt in the middle of a task switch..
29186 */
29187 pgd_paddr = read_cr3();
29188+
29189+#ifdef CONFIG_PAX_PER_CPU_PGD
29190+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
29191+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
29192+#endif
29193+
29194 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
29195 if (!pmd_k)
29196 return -1;
29197@@ -373,11 +443,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
29198 * happen within a race in page table update. In the later
29199 * case just flush:
29200 */
29201- pgd = pgd_offset(current->active_mm, address);
29202+
29203 pgd_ref = pgd_offset_k(address);
29204 if (pgd_none(*pgd_ref))
29205 return -1;
29206
29207+#ifdef CONFIG_PAX_PER_CPU_PGD
29208+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
29209+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
29210+ if (pgd_none(*pgd)) {
29211+ set_pgd(pgd, *pgd_ref);
29212+ arch_flush_lazy_mmu_mode();
29213+ } else {
29214+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
29215+ }
29216+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
29217+#else
29218+ pgd = pgd_offset(current->active_mm, address);
29219+#endif
29220+
29221 if (pgd_none(*pgd)) {
29222 set_pgd(pgd, *pgd_ref);
29223 arch_flush_lazy_mmu_mode();
29224@@ -543,7 +627,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
29225 static int is_errata100(struct pt_regs *regs, unsigned long address)
29226 {
29227 #ifdef CONFIG_X86_64
29228- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
29229+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
29230 return 1;
29231 #endif
29232 return 0;
29233@@ -570,7 +654,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
29234 }
29235
29236 static const char nx_warning[] = KERN_CRIT
29237-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
29238+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
29239
29240 static void
29241 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
29242@@ -579,15 +663,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
29243 if (!oops_may_print())
29244 return;
29245
29246- if (error_code & PF_INSTR) {
29247+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
29248 unsigned int level;
29249
29250 pte_t *pte = lookup_address(address, &level);
29251
29252 if (pte && pte_present(*pte) && !pte_exec(*pte))
29253- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
29254+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
29255 }
29256
29257+#ifdef CONFIG_PAX_KERNEXEC
29258+ if (init_mm.start_code <= address && address < init_mm.end_code) {
29259+ if (current->signal->curr_ip)
29260+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
29261+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
29262+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
29263+ else
29264+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
29265+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
29266+ }
29267+#endif
29268+
29269 printk(KERN_ALERT "BUG: unable to handle kernel ");
29270 if (address < PAGE_SIZE)
29271 printk(KERN_CONT "NULL pointer dereference");
29272@@ -750,6 +846,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
29273 return;
29274 }
29275 #endif
29276+
29277+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29278+ if (pax_is_fetch_fault(regs, error_code, address)) {
29279+
29280+#ifdef CONFIG_PAX_EMUTRAMP
29281+ switch (pax_handle_fetch_fault(regs)) {
29282+ case 2:
29283+ return;
29284+ }
29285+#endif
29286+
29287+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
29288+ do_group_exit(SIGKILL);
29289+ }
29290+#endif
29291+
29292 /* Kernel addresses are always protection faults: */
29293 if (address >= TASK_SIZE)
29294 error_code |= PF_PROT;
29295@@ -835,7 +947,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
29296 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
29297 printk(KERN_ERR
29298 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
29299- tsk->comm, tsk->pid, address);
29300+ tsk->comm, task_pid_nr(tsk), address);
29301 code = BUS_MCEERR_AR;
29302 }
29303 #endif
29304@@ -898,6 +1010,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
29305 return 1;
29306 }
29307
29308+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
29309+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
29310+{
29311+ pte_t *pte;
29312+ pmd_t *pmd;
29313+ spinlock_t *ptl;
29314+ unsigned char pte_mask;
29315+
29316+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
29317+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
29318+ return 0;
29319+
29320+ /* PaX: it's our fault, let's handle it if we can */
29321+
29322+ /* PaX: take a look at read faults before acquiring any locks */
29323+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
29324+ /* instruction fetch attempt from a protected page in user mode */
29325+ up_read(&mm->mmap_sem);
29326+
29327+#ifdef CONFIG_PAX_EMUTRAMP
29328+ switch (pax_handle_fetch_fault(regs)) {
29329+ case 2:
29330+ return 1;
29331+ }
29332+#endif
29333+
29334+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
29335+ do_group_exit(SIGKILL);
29336+ }
29337+
29338+ pmd = pax_get_pmd(mm, address);
29339+ if (unlikely(!pmd))
29340+ return 0;
29341+
29342+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
29343+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
29344+ pte_unmap_unlock(pte, ptl);
29345+ return 0;
29346+ }
29347+
29348+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
29349+ /* write attempt to a protected page in user mode */
29350+ pte_unmap_unlock(pte, ptl);
29351+ return 0;
29352+ }
29353+
29354+#ifdef CONFIG_SMP
29355+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
29356+#else
29357+ if (likely(address > get_limit(regs->cs)))
29358+#endif
29359+ {
29360+ set_pte(pte, pte_mkread(*pte));
29361+ __flush_tlb_one(address);
29362+ pte_unmap_unlock(pte, ptl);
29363+ up_read(&mm->mmap_sem);
29364+ return 1;
29365+ }
29366+
29367+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
29368+
29369+ /*
29370+ * PaX: fill DTLB with user rights and retry
29371+ */
29372+ __asm__ __volatile__ (
29373+ "orb %2,(%1)\n"
29374+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
29375+/*
29376+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
29377+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
29378+ * page fault when examined during a TLB load attempt. this is true not only
29379+ * for PTEs holding a non-present entry but also present entries that will
29380+ * raise a page fault (such as those set up by PaX, or the copy-on-write
29381+ * mechanism). in effect it means that we do *not* need to flush the TLBs
29382+ * for our target pages since their PTEs are simply not in the TLBs at all.
29383+
29384+ * the best thing in omitting it is that we gain around 15-20% speed in the
29385+ * fast path of the page fault handler and can get rid of tracing since we
29386+ * can no longer flush unintended entries.
29387+ */
29388+ "invlpg (%0)\n"
29389+#endif
29390+ __copyuser_seg"testb $0,(%0)\n"
29391+ "xorb %3,(%1)\n"
29392+ :
29393+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
29394+ : "memory", "cc");
29395+ pte_unmap_unlock(pte, ptl);
29396+ up_read(&mm->mmap_sem);
29397+ return 1;
29398+}
29399+#endif
29400+
29401 /*
29402 * Handle a spurious fault caused by a stale TLB entry.
29403 *
29404@@ -964,6 +1169,9 @@ int show_unhandled_signals = 1;
29405 static inline int
29406 access_error(unsigned long error_code, struct vm_area_struct *vma)
29407 {
29408+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
29409+ return 1;
29410+
29411 if (error_code & PF_WRITE) {
29412 /* write, present and write, not present: */
29413 if (unlikely(!(vma->vm_flags & VM_WRITE)))
29414@@ -992,7 +1200,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
29415 if (error_code & PF_USER)
29416 return false;
29417
29418- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
29419+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
29420 return false;
29421
29422 return true;
29423@@ -1008,18 +1216,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
29424 {
29425 struct vm_area_struct *vma;
29426 struct task_struct *tsk;
29427- unsigned long address;
29428 struct mm_struct *mm;
29429 int fault;
29430 int write = error_code & PF_WRITE;
29431 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
29432 (write ? FAULT_FLAG_WRITE : 0);
29433
29434- tsk = current;
29435- mm = tsk->mm;
29436-
29437 /* Get the faulting address: */
29438- address = read_cr2();
29439+ unsigned long address = read_cr2();
29440+
29441+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29442+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
29443+ if (!search_exception_tables(regs->ip)) {
29444+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
29445+ bad_area_nosemaphore(regs, error_code, address);
29446+ return;
29447+ }
29448+ if (address < pax_user_shadow_base) {
29449+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
29450+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
29451+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
29452+ } else
29453+ address -= pax_user_shadow_base;
29454+ }
29455+#endif
29456+
29457+ tsk = current;
29458+ mm = tsk->mm;
29459
29460 /*
29461 * Detect and handle instructions that would cause a page fault for
29462@@ -1080,7 +1303,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
29463 * User-mode registers count as a user access even for any
29464 * potential system fault or CPU buglet:
29465 */
29466- if (user_mode_vm(regs)) {
29467+ if (user_mode(regs)) {
29468 local_irq_enable();
29469 error_code |= PF_USER;
29470 } else {
29471@@ -1142,6 +1365,11 @@ retry:
29472 might_sleep();
29473 }
29474
29475+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
29476+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
29477+ return;
29478+#endif
29479+
29480 vma = find_vma(mm, address);
29481 if (unlikely(!vma)) {
29482 bad_area(regs, error_code, address);
29483@@ -1153,18 +1381,24 @@ retry:
29484 bad_area(regs, error_code, address);
29485 return;
29486 }
29487- if (error_code & PF_USER) {
29488- /*
29489- * Accessing the stack below %sp is always a bug.
29490- * The large cushion allows instructions like enter
29491- * and pusha to work. ("enter $65535, $31" pushes
29492- * 32 pointers and then decrements %sp by 65535.)
29493- */
29494- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
29495- bad_area(regs, error_code, address);
29496- return;
29497- }
29498+ /*
29499+ * Accessing the stack below %sp is always a bug.
29500+ * The large cushion allows instructions like enter
29501+ * and pusha to work. ("enter $65535, $31" pushes
29502+ * 32 pointers and then decrements %sp by 65535.)
29503+ */
29504+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
29505+ bad_area(regs, error_code, address);
29506+ return;
29507 }
29508+
29509+#ifdef CONFIG_PAX_SEGMEXEC
29510+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
29511+ bad_area(regs, error_code, address);
29512+ return;
29513+ }
29514+#endif
29515+
29516 if (unlikely(expand_stack(vma, address))) {
29517 bad_area(regs, error_code, address);
29518 return;
29519@@ -1230,3 +1464,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
29520 __do_page_fault(regs, error_code);
29521 exception_exit(prev_state);
29522 }
29523+
29524+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29525+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
29526+{
29527+ struct mm_struct *mm = current->mm;
29528+ unsigned long ip = regs->ip;
29529+
29530+ if (v8086_mode(regs))
29531+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
29532+
29533+#ifdef CONFIG_PAX_PAGEEXEC
29534+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
29535+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
29536+ return true;
29537+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
29538+ return true;
29539+ return false;
29540+ }
29541+#endif
29542+
29543+#ifdef CONFIG_PAX_SEGMEXEC
29544+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
29545+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
29546+ return true;
29547+ return false;
29548+ }
29549+#endif
29550+
29551+ return false;
29552+}
29553+#endif
29554+
29555+#ifdef CONFIG_PAX_EMUTRAMP
29556+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
29557+{
29558+ int err;
29559+
29560+ do { /* PaX: libffi trampoline emulation */
29561+ unsigned char mov, jmp;
29562+ unsigned int addr1, addr2;
29563+
29564+#ifdef CONFIG_X86_64
29565+ if ((regs->ip + 9) >> 32)
29566+ break;
29567+#endif
29568+
29569+ err = get_user(mov, (unsigned char __user *)regs->ip);
29570+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
29571+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
29572+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
29573+
29574+ if (err)
29575+ break;
29576+
29577+ if (mov == 0xB8 && jmp == 0xE9) {
29578+ regs->ax = addr1;
29579+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
29580+ return 2;
29581+ }
29582+ } while (0);
29583+
29584+ do { /* PaX: gcc trampoline emulation #1 */
29585+ unsigned char mov1, mov2;
29586+ unsigned short jmp;
29587+ unsigned int addr1, addr2;
29588+
29589+#ifdef CONFIG_X86_64
29590+ if ((regs->ip + 11) >> 32)
29591+ break;
29592+#endif
29593+
29594+ err = get_user(mov1, (unsigned char __user *)regs->ip);
29595+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
29596+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
29597+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
29598+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
29599+
29600+ if (err)
29601+ break;
29602+
29603+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
29604+ regs->cx = addr1;
29605+ regs->ax = addr2;
29606+ regs->ip = addr2;
29607+ return 2;
29608+ }
29609+ } while (0);
29610+
29611+ do { /* PaX: gcc trampoline emulation #2 */
29612+ unsigned char mov, jmp;
29613+ unsigned int addr1, addr2;
29614+
29615+#ifdef CONFIG_X86_64
29616+ if ((regs->ip + 9) >> 32)
29617+ break;
29618+#endif
29619+
29620+ err = get_user(mov, (unsigned char __user *)regs->ip);
29621+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
29622+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
29623+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
29624+
29625+ if (err)
29626+ break;
29627+
29628+ if (mov == 0xB9 && jmp == 0xE9) {
29629+ regs->cx = addr1;
29630+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
29631+ return 2;
29632+ }
29633+ } while (0);
29634+
29635+ return 1; /* PaX in action */
29636+}
29637+
29638+#ifdef CONFIG_X86_64
29639+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
29640+{
29641+ int err;
29642+
29643+ do { /* PaX: libffi trampoline emulation */
29644+ unsigned short mov1, mov2, jmp1;
29645+ unsigned char stcclc, jmp2;
29646+ unsigned long addr1, addr2;
29647+
29648+ err = get_user(mov1, (unsigned short __user *)regs->ip);
29649+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
29650+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
29651+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
29652+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
29653+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
29654+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
29655+
29656+ if (err)
29657+ break;
29658+
29659+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
29660+ regs->r11 = addr1;
29661+ regs->r10 = addr2;
29662+ if (stcclc == 0xF8)
29663+ regs->flags &= ~X86_EFLAGS_CF;
29664+ else
29665+ regs->flags |= X86_EFLAGS_CF;
29666+ regs->ip = addr1;
29667+ return 2;
29668+ }
29669+ } while (0);
29670+
29671+ do { /* PaX: gcc trampoline emulation #1 */
29672+ unsigned short mov1, mov2, jmp1;
29673+ unsigned char jmp2;
29674+ unsigned int addr1;
29675+ unsigned long addr2;
29676+
29677+ err = get_user(mov1, (unsigned short __user *)regs->ip);
29678+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
29679+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
29680+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
29681+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
29682+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
29683+
29684+ if (err)
29685+ break;
29686+
29687+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
29688+ regs->r11 = addr1;
29689+ regs->r10 = addr2;
29690+ regs->ip = addr1;
29691+ return 2;
29692+ }
29693+ } while (0);
29694+
29695+ do { /* PaX: gcc trampoline emulation #2 */
29696+ unsigned short mov1, mov2, jmp1;
29697+ unsigned char jmp2;
29698+ unsigned long addr1, addr2;
29699+
29700+ err = get_user(mov1, (unsigned short __user *)regs->ip);
29701+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
29702+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
29703+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
29704+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
29705+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
29706+
29707+ if (err)
29708+ break;
29709+
29710+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
29711+ regs->r11 = addr1;
29712+ regs->r10 = addr2;
29713+ regs->ip = addr1;
29714+ return 2;
29715+ }
29716+ } while (0);
29717+
29718+ return 1; /* PaX in action */
29719+}
29720+#endif
29721+
29722+/*
29723+ * PaX: decide what to do with offenders (regs->ip = fault address)
29724+ *
29725+ * returns 1 when task should be killed
29726+ * 2 when gcc trampoline was detected
29727+ */
29728+static int pax_handle_fetch_fault(struct pt_regs *regs)
29729+{
29730+ if (v8086_mode(regs))
29731+ return 1;
29732+
29733+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
29734+ return 1;
29735+
29736+#ifdef CONFIG_X86_32
29737+ return pax_handle_fetch_fault_32(regs);
29738+#else
29739+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
29740+ return pax_handle_fetch_fault_32(regs);
29741+ else
29742+ return pax_handle_fetch_fault_64(regs);
29743+#endif
29744+}
29745+#endif
29746+
29747+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29748+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
29749+{
29750+ long i;
29751+
29752+ printk(KERN_ERR "PAX: bytes at PC: ");
29753+ for (i = 0; i < 20; i++) {
29754+ unsigned char c;
29755+ if (get_user(c, (unsigned char __force_user *)pc+i))
29756+ printk(KERN_CONT "?? ");
29757+ else
29758+ printk(KERN_CONT "%02x ", c);
29759+ }
29760+ printk("\n");
29761+
29762+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
29763+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
29764+ unsigned long c;
29765+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
29766+#ifdef CONFIG_X86_32
29767+ printk(KERN_CONT "???????? ");
29768+#else
29769+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
29770+ printk(KERN_CONT "???????? ???????? ");
29771+ else
29772+ printk(KERN_CONT "???????????????? ");
29773+#endif
29774+ } else {
29775+#ifdef CONFIG_X86_64
29776+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
29777+ printk(KERN_CONT "%08x ", (unsigned int)c);
29778+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
29779+ } else
29780+#endif
29781+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
29782+ }
29783+ }
29784+ printk("\n");
29785+}
29786+#endif
29787+
29788+/**
29789+ * probe_kernel_write(): safely attempt to write to a location
29790+ * @dst: address to write to
29791+ * @src: pointer to the data that shall be written
29792+ * @size: size of the data chunk
29793+ *
29794+ * Safely write to address @dst from the buffer at @src. If a kernel fault
29795+ * happens, handle that and return -EFAULT.
29796+ */
29797+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
29798+{
29799+ long ret;
29800+ mm_segment_t old_fs = get_fs();
29801+
29802+ set_fs(KERNEL_DS);
29803+ pagefault_disable();
29804+ pax_open_kernel();
29805+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
29806+ pax_close_kernel();
29807+ pagefault_enable();
29808+ set_fs(old_fs);
29809+
29810+ return ret ? -EFAULT : 0;
29811+}
29812diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
29813index dd74e46..7d26398 100644
29814--- a/arch/x86/mm/gup.c
29815+++ b/arch/x86/mm/gup.c
29816@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
29817 addr = start;
29818 len = (unsigned long) nr_pages << PAGE_SHIFT;
29819 end = start + len;
29820- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
29821+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
29822 (void __user *)start, len)))
29823 return 0;
29824
29825diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
29826index 252b8f5..4dcfdc1 100644
29827--- a/arch/x86/mm/highmem_32.c
29828+++ b/arch/x86/mm/highmem_32.c
29829@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
29830 idx = type + KM_TYPE_NR*smp_processor_id();
29831 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
29832 BUG_ON(!pte_none(*(kmap_pte-idx)));
29833+
29834+ pax_open_kernel();
29835 set_pte(kmap_pte-idx, mk_pte(page, prot));
29836+ pax_close_kernel();
29837+
29838 arch_flush_lazy_mmu_mode();
29839
29840 return (void *)vaddr;
29841diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
29842index ae1aa71..d9bea75 100644
29843--- a/arch/x86/mm/hugetlbpage.c
29844+++ b/arch/x86/mm/hugetlbpage.c
29845@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
29846 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
29847 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
29848 unsigned long addr, unsigned long len,
29849- unsigned long pgoff, unsigned long flags)
29850+ unsigned long pgoff, unsigned long flags, unsigned long offset)
29851 {
29852 struct hstate *h = hstate_file(file);
29853 struct vm_unmapped_area_info info;
29854-
29855+
29856 info.flags = 0;
29857 info.length = len;
29858 info.low_limit = TASK_UNMAPPED_BASE;
29859+
29860+#ifdef CONFIG_PAX_RANDMMAP
29861+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
29862+ info.low_limit += current->mm->delta_mmap;
29863+#endif
29864+
29865 info.high_limit = TASK_SIZE;
29866 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
29867 info.align_offset = 0;
29868+ info.threadstack_offset = offset;
29869 return vm_unmapped_area(&info);
29870 }
29871
29872 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
29873 unsigned long addr0, unsigned long len,
29874- unsigned long pgoff, unsigned long flags)
29875+ unsigned long pgoff, unsigned long flags, unsigned long offset)
29876 {
29877 struct hstate *h = hstate_file(file);
29878 struct vm_unmapped_area_info info;
29879@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
29880 info.high_limit = current->mm->mmap_base;
29881 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
29882 info.align_offset = 0;
29883+ info.threadstack_offset = offset;
29884 addr = vm_unmapped_area(&info);
29885
29886 /*
29887@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
29888 VM_BUG_ON(addr != -ENOMEM);
29889 info.flags = 0;
29890 info.low_limit = TASK_UNMAPPED_BASE;
29891+
29892+#ifdef CONFIG_PAX_RANDMMAP
29893+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
29894+ info.low_limit += current->mm->delta_mmap;
29895+#endif
29896+
29897 info.high_limit = TASK_SIZE;
29898 addr = vm_unmapped_area(&info);
29899 }
29900@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
29901 struct hstate *h = hstate_file(file);
29902 struct mm_struct *mm = current->mm;
29903 struct vm_area_struct *vma;
29904+ unsigned long pax_task_size = TASK_SIZE;
29905+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
29906
29907 if (len & ~huge_page_mask(h))
29908 return -EINVAL;
29909- if (len > TASK_SIZE)
29910+
29911+#ifdef CONFIG_PAX_SEGMEXEC
29912+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29913+ pax_task_size = SEGMEXEC_TASK_SIZE;
29914+#endif
29915+
29916+ pax_task_size -= PAGE_SIZE;
29917+
29918+ if (len > pax_task_size)
29919 return -ENOMEM;
29920
29921 if (flags & MAP_FIXED) {
29922@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
29923 return addr;
29924 }
29925
29926+#ifdef CONFIG_PAX_RANDMMAP
29927+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
29928+#endif
29929+
29930 if (addr) {
29931 addr = ALIGN(addr, huge_page_size(h));
29932 vma = find_vma(mm, addr);
29933- if (TASK_SIZE - len >= addr &&
29934- (!vma || addr + len <= vma->vm_start))
29935+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
29936 return addr;
29937 }
29938 if (mm->get_unmapped_area == arch_get_unmapped_area)
29939 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
29940- pgoff, flags);
29941+ pgoff, flags, offset);
29942 else
29943 return hugetlb_get_unmapped_area_topdown(file, addr, len,
29944- pgoff, flags);
29945+ pgoff, flags, offset);
29946 }
29947
29948 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
29949diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
29950index 1f34e92..c97b98f 100644
29951--- a/arch/x86/mm/init.c
29952+++ b/arch/x86/mm/init.c
29953@@ -4,6 +4,7 @@
29954 #include <linux/swap.h>
29955 #include <linux/memblock.h>
29956 #include <linux/bootmem.h> /* for max_low_pfn */
29957+#include <linux/tboot.h>
29958
29959 #include <asm/cacheflush.h>
29960 #include <asm/e820.h>
29961@@ -17,6 +18,8 @@
29962 #include <asm/proto.h>
29963 #include <asm/dma.h> /* for MAX_DMA_PFN */
29964 #include <asm/microcode.h>
29965+#include <asm/desc.h>
29966+#include <asm/bios_ebda.h>
29967
29968 #include "mm_internal.h"
29969
29970@@ -465,7 +468,18 @@ void __init init_mem_mapping(void)
29971 early_ioremap_page_table_range_init();
29972 #endif
29973
29974+#ifdef CONFIG_PAX_PER_CPU_PGD
29975+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
29976+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
29977+ KERNEL_PGD_PTRS);
29978+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
29979+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
29980+ KERNEL_PGD_PTRS);
29981+ load_cr3(get_cpu_pgd(0, kernel));
29982+#else
29983 load_cr3(swapper_pg_dir);
29984+#endif
29985+
29986 __flush_tlb_all();
29987
29988 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
29989@@ -481,10 +495,40 @@ void __init init_mem_mapping(void)
29990 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
29991 * mmio resources as well as potential bios/acpi data regions.
29992 */
29993+
29994+#ifdef CONFIG_GRKERNSEC_KMEM
29995+static unsigned int ebda_start __read_only;
29996+static unsigned int ebda_end __read_only;
29997+#endif
29998+
29999 int devmem_is_allowed(unsigned long pagenr)
30000 {
30001- if (pagenr < 256)
30002+#ifdef CONFIG_GRKERNSEC_KMEM
30003+ /* allow BDA */
30004+ if (!pagenr)
30005 return 1;
30006+ /* allow EBDA */
30007+ if (pagenr >= ebda_start && pagenr < ebda_end)
30008+ return 1;
30009+ /* if tboot is in use, allow access to its hardcoded serial log range */
30010+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
30011+ return 1;
30012+#else
30013+ if (!pagenr)
30014+ return 1;
30015+#ifdef CONFIG_VM86
30016+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
30017+ return 1;
30018+#endif
30019+#endif
30020+
30021+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
30022+ return 1;
30023+#ifdef CONFIG_GRKERNSEC_KMEM
30024+ /* throw out everything else below 1MB */
30025+ if (pagenr <= 256)
30026+ return 0;
30027+#endif
30028 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
30029 return 0;
30030 if (!page_is_ram(pagenr))
30031@@ -538,8 +582,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
30032 #endif
30033 }
30034
30035+#ifdef CONFIG_GRKERNSEC_KMEM
30036+static inline void gr_init_ebda(void)
30037+{
30038+ unsigned int ebda_addr;
30039+ unsigned int ebda_size = 0;
30040+
30041+ ebda_addr = get_bios_ebda();
30042+ if (ebda_addr) {
30043+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
30044+ ebda_size <<= 10;
30045+ }
30046+ if (ebda_addr && ebda_size) {
30047+ ebda_start = ebda_addr >> PAGE_SHIFT;
30048+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
30049+ } else {
30050+ ebda_start = 0x9f000 >> PAGE_SHIFT;
30051+ ebda_end = 0xa0000 >> PAGE_SHIFT;
30052+ }
30053+}
30054+#else
30055+static inline void gr_init_ebda(void) { }
30056+#endif
30057+
30058 void free_initmem(void)
30059 {
30060+#ifdef CONFIG_PAX_KERNEXEC
30061+#ifdef CONFIG_X86_32
30062+ /* PaX: limit KERNEL_CS to actual size */
30063+ unsigned long addr, limit;
30064+ struct desc_struct d;
30065+ int cpu;
30066+#else
30067+ pgd_t *pgd;
30068+ pud_t *pud;
30069+ pmd_t *pmd;
30070+ unsigned long addr, end;
30071+#endif
30072+#endif
30073+
30074+ gr_init_ebda();
30075+
30076+#ifdef CONFIG_PAX_KERNEXEC
30077+#ifdef CONFIG_X86_32
30078+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
30079+ limit = (limit - 1UL) >> PAGE_SHIFT;
30080+
30081+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
30082+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30083+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
30084+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
30085+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
30086+ }
30087+
30088+ /* PaX: make KERNEL_CS read-only */
30089+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
30090+ if (!paravirt_enabled())
30091+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
30092+/*
30093+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
30094+ pgd = pgd_offset_k(addr);
30095+ pud = pud_offset(pgd, addr);
30096+ pmd = pmd_offset(pud, addr);
30097+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30098+ }
30099+*/
30100+#ifdef CONFIG_X86_PAE
30101+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
30102+/*
30103+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
30104+ pgd = pgd_offset_k(addr);
30105+ pud = pud_offset(pgd, addr);
30106+ pmd = pmd_offset(pud, addr);
30107+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30108+ }
30109+*/
30110+#endif
30111+
30112+#ifdef CONFIG_MODULES
30113+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
30114+#endif
30115+
30116+#else
30117+ /* PaX: make kernel code/rodata read-only, rest non-executable */
30118+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
30119+ pgd = pgd_offset_k(addr);
30120+ pud = pud_offset(pgd, addr);
30121+ pmd = pmd_offset(pud, addr);
30122+ if (!pmd_present(*pmd))
30123+ continue;
30124+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
30125+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30126+ else
30127+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
30128+ }
30129+
30130+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
30131+ end = addr + KERNEL_IMAGE_SIZE;
30132+ for (; addr < end; addr += PMD_SIZE) {
30133+ pgd = pgd_offset_k(addr);
30134+ pud = pud_offset(pgd, addr);
30135+ pmd = pmd_offset(pud, addr);
30136+ if (!pmd_present(*pmd))
30137+ continue;
30138+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
30139+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
30140+ }
30141+#endif
30142+
30143+ flush_tlb_all();
30144+#endif
30145+
30146 free_init_pages("unused kernel memory",
30147 (unsigned long)(&__init_begin),
30148 (unsigned long)(&__init_end));
30149diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
30150index 3ac7e31..89611b7 100644
30151--- a/arch/x86/mm/init_32.c
30152+++ b/arch/x86/mm/init_32.c
30153@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
30154 bool __read_mostly __vmalloc_start_set = false;
30155
30156 /*
30157- * Creates a middle page table and puts a pointer to it in the
30158- * given global directory entry. This only returns the gd entry
30159- * in non-PAE compilation mode, since the middle layer is folded.
30160- */
30161-static pmd_t * __init one_md_table_init(pgd_t *pgd)
30162-{
30163- pud_t *pud;
30164- pmd_t *pmd_table;
30165-
30166-#ifdef CONFIG_X86_PAE
30167- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
30168- pmd_table = (pmd_t *)alloc_low_page();
30169- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
30170- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
30171- pud = pud_offset(pgd, 0);
30172- BUG_ON(pmd_table != pmd_offset(pud, 0));
30173-
30174- return pmd_table;
30175- }
30176-#endif
30177- pud = pud_offset(pgd, 0);
30178- pmd_table = pmd_offset(pud, 0);
30179-
30180- return pmd_table;
30181-}
30182-
30183-/*
30184 * Create a page table and place a pointer to it in a middle page
30185 * directory entry:
30186 */
30187@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
30188 pte_t *page_table = (pte_t *)alloc_low_page();
30189
30190 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
30191+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30192+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
30193+#else
30194 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
30195+#endif
30196 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
30197 }
30198
30199 return pte_offset_kernel(pmd, 0);
30200 }
30201
30202+static pmd_t * __init one_md_table_init(pgd_t *pgd)
30203+{
30204+ pud_t *pud;
30205+ pmd_t *pmd_table;
30206+
30207+ pud = pud_offset(pgd, 0);
30208+ pmd_table = pmd_offset(pud, 0);
30209+
30210+ return pmd_table;
30211+}
30212+
30213 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
30214 {
30215 int pgd_idx = pgd_index(vaddr);
30216@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
30217 int pgd_idx, pmd_idx;
30218 unsigned long vaddr;
30219 pgd_t *pgd;
30220+ pud_t *pud;
30221 pmd_t *pmd;
30222 pte_t *pte = NULL;
30223 unsigned long count = page_table_range_init_count(start, end);
30224@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
30225 pgd = pgd_base + pgd_idx;
30226
30227 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
30228- pmd = one_md_table_init(pgd);
30229- pmd = pmd + pmd_index(vaddr);
30230+ pud = pud_offset(pgd, vaddr);
30231+ pmd = pmd_offset(pud, vaddr);
30232+
30233+#ifdef CONFIG_X86_PAE
30234+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
30235+#endif
30236+
30237 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
30238 pmd++, pmd_idx++) {
30239 pte = page_table_kmap_check(one_page_table_init(pmd),
30240@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
30241 }
30242 }
30243
30244-static inline int is_kernel_text(unsigned long addr)
30245+static inline int is_kernel_text(unsigned long start, unsigned long end)
30246 {
30247- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
30248- return 1;
30249- return 0;
30250+ if ((start > ktla_ktva((unsigned long)_etext) ||
30251+ end <= ktla_ktva((unsigned long)_stext)) &&
30252+ (start > ktla_ktva((unsigned long)_einittext) ||
30253+ end <= ktla_ktva((unsigned long)_sinittext)) &&
30254+
30255+#ifdef CONFIG_ACPI_SLEEP
30256+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
30257+#endif
30258+
30259+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
30260+ return 0;
30261+ return 1;
30262 }
30263
30264 /*
30265@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
30266 unsigned long last_map_addr = end;
30267 unsigned long start_pfn, end_pfn;
30268 pgd_t *pgd_base = swapper_pg_dir;
30269- int pgd_idx, pmd_idx, pte_ofs;
30270+ unsigned int pgd_idx, pmd_idx, pte_ofs;
30271 unsigned long pfn;
30272 pgd_t *pgd;
30273+ pud_t *pud;
30274 pmd_t *pmd;
30275 pte_t *pte;
30276 unsigned pages_2m, pages_4k;
30277@@ -291,8 +295,13 @@ repeat:
30278 pfn = start_pfn;
30279 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
30280 pgd = pgd_base + pgd_idx;
30281- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
30282- pmd = one_md_table_init(pgd);
30283+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
30284+ pud = pud_offset(pgd, 0);
30285+ pmd = pmd_offset(pud, 0);
30286+
30287+#ifdef CONFIG_X86_PAE
30288+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
30289+#endif
30290
30291 if (pfn >= end_pfn)
30292 continue;
30293@@ -304,14 +313,13 @@ repeat:
30294 #endif
30295 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
30296 pmd++, pmd_idx++) {
30297- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
30298+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
30299
30300 /*
30301 * Map with big pages if possible, otherwise
30302 * create normal page tables:
30303 */
30304 if (use_pse) {
30305- unsigned int addr2;
30306 pgprot_t prot = PAGE_KERNEL_LARGE;
30307 /*
30308 * first pass will use the same initial
30309@@ -322,11 +330,7 @@ repeat:
30310 _PAGE_PSE);
30311
30312 pfn &= PMD_MASK >> PAGE_SHIFT;
30313- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
30314- PAGE_OFFSET + PAGE_SIZE-1;
30315-
30316- if (is_kernel_text(addr) ||
30317- is_kernel_text(addr2))
30318+ if (is_kernel_text(address, address + PMD_SIZE))
30319 prot = PAGE_KERNEL_LARGE_EXEC;
30320
30321 pages_2m++;
30322@@ -343,7 +347,7 @@ repeat:
30323 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
30324 pte += pte_ofs;
30325 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
30326- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
30327+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
30328 pgprot_t prot = PAGE_KERNEL;
30329 /*
30330 * first pass will use the same initial
30331@@ -351,7 +355,7 @@ repeat:
30332 */
30333 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
30334
30335- if (is_kernel_text(addr))
30336+ if (is_kernel_text(address, address + PAGE_SIZE))
30337 prot = PAGE_KERNEL_EXEC;
30338
30339 pages_4k++;
30340@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
30341
30342 pud = pud_offset(pgd, va);
30343 pmd = pmd_offset(pud, va);
30344- if (!pmd_present(*pmd))
30345+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
30346 break;
30347
30348 /* should not be large page here */
30349@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
30350
30351 static void __init pagetable_init(void)
30352 {
30353- pgd_t *pgd_base = swapper_pg_dir;
30354-
30355- permanent_kmaps_init(pgd_base);
30356+ permanent_kmaps_init(swapper_pg_dir);
30357 }
30358
30359-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
30360+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
30361 EXPORT_SYMBOL_GPL(__supported_pte_mask);
30362
30363 /* user-defined highmem size */
30364@@ -772,7 +774,7 @@ void __init mem_init(void)
30365 after_bootmem = 1;
30366
30367 codesize = (unsigned long) &_etext - (unsigned long) &_text;
30368- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
30369+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
30370 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
30371
30372 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
30373@@ -813,10 +815,10 @@ void __init mem_init(void)
30374 ((unsigned long)&__init_end -
30375 (unsigned long)&__init_begin) >> 10,
30376
30377- (unsigned long)&_etext, (unsigned long)&_edata,
30378- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
30379+ (unsigned long)&_sdata, (unsigned long)&_edata,
30380+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
30381
30382- (unsigned long)&_text, (unsigned long)&_etext,
30383+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
30384 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
30385
30386 /*
30387@@ -906,6 +908,7 @@ void set_kernel_text_rw(void)
30388 if (!kernel_set_to_readonly)
30389 return;
30390
30391+ start = ktla_ktva(start);
30392 pr_debug("Set kernel text: %lx - %lx for read write\n",
30393 start, start+size);
30394
30395@@ -920,6 +923,7 @@ void set_kernel_text_ro(void)
30396 if (!kernel_set_to_readonly)
30397 return;
30398
30399+ start = ktla_ktva(start);
30400 pr_debug("Set kernel text: %lx - %lx for read only\n",
30401 start, start+size);
30402
30403@@ -948,6 +952,7 @@ void mark_rodata_ro(void)
30404 unsigned long start = PFN_ALIGN(_text);
30405 unsigned long size = PFN_ALIGN(_etext) - start;
30406
30407+ start = ktla_ktva(start);
30408 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
30409 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
30410 size >> 10);
30411diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
30412index bb00c46..bf91a67 100644
30413--- a/arch/x86/mm/init_64.c
30414+++ b/arch/x86/mm/init_64.c
30415@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
30416 * around without checking the pgd every time.
30417 */
30418
30419-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
30420+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
30421 EXPORT_SYMBOL_GPL(__supported_pte_mask);
30422
30423 int force_personality32;
30424@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
30425
30426 for (address = start; address <= end; address += PGDIR_SIZE) {
30427 const pgd_t *pgd_ref = pgd_offset_k(address);
30428+
30429+#ifdef CONFIG_PAX_PER_CPU_PGD
30430+ unsigned long cpu;
30431+#else
30432 struct page *page;
30433+#endif
30434
30435 if (pgd_none(*pgd_ref))
30436 continue;
30437
30438 spin_lock(&pgd_lock);
30439+
30440+#ifdef CONFIG_PAX_PER_CPU_PGD
30441+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
30442+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
30443+
30444+ if (pgd_none(*pgd))
30445+ set_pgd(pgd, *pgd_ref);
30446+ else
30447+ BUG_ON(pgd_page_vaddr(*pgd)
30448+ != pgd_page_vaddr(*pgd_ref));
30449+ pgd = pgd_offset_cpu(cpu, kernel, address);
30450+#else
30451 list_for_each_entry(page, &pgd_list, lru) {
30452 pgd_t *pgd;
30453 spinlock_t *pgt_lock;
30454@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
30455 /* the pgt_lock only for Xen */
30456 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
30457 spin_lock(pgt_lock);
30458+#endif
30459
30460 if (pgd_none(*pgd))
30461 set_pgd(pgd, *pgd_ref);
30462@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
30463 BUG_ON(pgd_page_vaddr(*pgd)
30464 != pgd_page_vaddr(*pgd_ref));
30465
30466+#ifndef CONFIG_PAX_PER_CPU_PGD
30467 spin_unlock(pgt_lock);
30468+#endif
30469+
30470 }
30471 spin_unlock(&pgd_lock);
30472 }
30473@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
30474 {
30475 if (pgd_none(*pgd)) {
30476 pud_t *pud = (pud_t *)spp_getpage();
30477- pgd_populate(&init_mm, pgd, pud);
30478+ pgd_populate_kernel(&init_mm, pgd, pud);
30479 if (pud != pud_offset(pgd, 0))
30480 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
30481 pud, pud_offset(pgd, 0));
30482@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
30483 {
30484 if (pud_none(*pud)) {
30485 pmd_t *pmd = (pmd_t *) spp_getpage();
30486- pud_populate(&init_mm, pud, pmd);
30487+ pud_populate_kernel(&init_mm, pud, pmd);
30488 if (pmd != pmd_offset(pud, 0))
30489 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
30490 pmd, pmd_offset(pud, 0));
30491@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
30492 pmd = fill_pmd(pud, vaddr);
30493 pte = fill_pte(pmd, vaddr);
30494
30495+ pax_open_kernel();
30496 set_pte(pte, new_pte);
30497+ pax_close_kernel();
30498
30499 /*
30500 * It's enough to flush this one mapping.
30501@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
30502 pgd = pgd_offset_k((unsigned long)__va(phys));
30503 if (pgd_none(*pgd)) {
30504 pud = (pud_t *) spp_getpage();
30505- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
30506- _PAGE_USER));
30507+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
30508 }
30509 pud = pud_offset(pgd, (unsigned long)__va(phys));
30510 if (pud_none(*pud)) {
30511 pmd = (pmd_t *) spp_getpage();
30512- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
30513- _PAGE_USER));
30514+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
30515 }
30516 pmd = pmd_offset(pud, phys);
30517 BUG_ON(!pmd_none(*pmd));
30518@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
30519 prot);
30520
30521 spin_lock(&init_mm.page_table_lock);
30522- pud_populate(&init_mm, pud, pmd);
30523+ pud_populate_kernel(&init_mm, pud, pmd);
30524 spin_unlock(&init_mm.page_table_lock);
30525 }
30526 __flush_tlb_all();
30527@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
30528 page_size_mask);
30529
30530 spin_lock(&init_mm.page_table_lock);
30531- pgd_populate(&init_mm, pgd, pud);
30532+ pgd_populate_kernel(&init_mm, pgd, pud);
30533 spin_unlock(&init_mm.page_table_lock);
30534 pgd_changed = true;
30535 }
30536@@ -1221,8 +1242,8 @@ int kern_addr_valid(unsigned long addr)
30537 static struct vm_area_struct gate_vma = {
30538 .vm_start = VSYSCALL_START,
30539 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
30540- .vm_page_prot = PAGE_READONLY_EXEC,
30541- .vm_flags = VM_READ | VM_EXEC
30542+ .vm_page_prot = PAGE_READONLY,
30543+ .vm_flags = VM_READ
30544 };
30545
30546 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
30547@@ -1256,7 +1277,7 @@ int in_gate_area_no_mm(unsigned long addr)
30548
30549 const char *arch_vma_name(struct vm_area_struct *vma)
30550 {
30551- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
30552+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
30553 return "[vdso]";
30554 if (vma == &gate_vma)
30555 return "[vsyscall]";
30556diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
30557index 7b179b4..6bd17777 100644
30558--- a/arch/x86/mm/iomap_32.c
30559+++ b/arch/x86/mm/iomap_32.c
30560@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
30561 type = kmap_atomic_idx_push();
30562 idx = type + KM_TYPE_NR * smp_processor_id();
30563 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
30564+
30565+ pax_open_kernel();
30566 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
30567+ pax_close_kernel();
30568+
30569 arch_flush_lazy_mmu_mode();
30570
30571 return (void *)vaddr;
30572diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
30573index 9a1e658..da003f3 100644
30574--- a/arch/x86/mm/ioremap.c
30575+++ b/arch/x86/mm/ioremap.c
30576@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
30577 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
30578 int is_ram = page_is_ram(pfn);
30579
30580- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
30581+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
30582 return NULL;
30583 WARN_ON_ONCE(is_ram);
30584 }
30585@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
30586 *
30587 * Caller must ensure there is only one unmapping for the same pointer.
30588 */
30589-void iounmap(volatile void __iomem *addr)
30590+void iounmap(const volatile void __iomem *addr)
30591 {
30592 struct vm_struct *p, *o;
30593
30594@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
30595
30596 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
30597 if (page_is_ram(start >> PAGE_SHIFT))
30598+#ifdef CONFIG_HIGHMEM
30599+ if ((start >> PAGE_SHIFT) < max_low_pfn)
30600+#endif
30601 return __va(phys);
30602
30603 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
30604@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
30605 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
30606 {
30607 if (page_is_ram(phys >> PAGE_SHIFT))
30608+#ifdef CONFIG_HIGHMEM
30609+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
30610+#endif
30611 return;
30612
30613 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
30614@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
30615 early_param("early_ioremap_debug", early_ioremap_debug_setup);
30616
30617 static __initdata int after_paging_init;
30618-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
30619+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
30620
30621 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
30622 {
30623@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
30624 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
30625
30626 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
30627- memset(bm_pte, 0, sizeof(bm_pte));
30628- pmd_populate_kernel(&init_mm, pmd, bm_pte);
30629+ pmd_populate_user(&init_mm, pmd, bm_pte);
30630
30631 /*
30632 * The boot-ioremap range spans multiple pmds, for which
30633diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
30634index d87dd6d..bf3fa66 100644
30635--- a/arch/x86/mm/kmemcheck/kmemcheck.c
30636+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
30637@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
30638 * memory (e.g. tracked pages)? For now, we need this to avoid
30639 * invoking kmemcheck for PnP BIOS calls.
30640 */
30641- if (regs->flags & X86_VM_MASK)
30642+ if (v8086_mode(regs))
30643 return false;
30644- if (regs->cs != __KERNEL_CS)
30645+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
30646 return false;
30647
30648 pte = kmemcheck_pte_lookup(address);
30649diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
30650index 845df68..1d8d29f 100644
30651--- a/arch/x86/mm/mmap.c
30652+++ b/arch/x86/mm/mmap.c
30653@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
30654 * Leave an at least ~128 MB hole with possible stack randomization.
30655 */
30656 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
30657-#define MAX_GAP (TASK_SIZE/6*5)
30658+#define MAX_GAP (pax_task_size/6*5)
30659
30660 static int mmap_is_legacy(void)
30661 {
30662@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
30663 return rnd << PAGE_SHIFT;
30664 }
30665
30666-static unsigned long mmap_base(void)
30667+static unsigned long mmap_base(struct mm_struct *mm)
30668 {
30669 unsigned long gap = rlimit(RLIMIT_STACK);
30670+ unsigned long pax_task_size = TASK_SIZE;
30671+
30672+#ifdef CONFIG_PAX_SEGMEXEC
30673+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
30674+ pax_task_size = SEGMEXEC_TASK_SIZE;
30675+#endif
30676
30677 if (gap < MIN_GAP)
30678 gap = MIN_GAP;
30679 else if (gap > MAX_GAP)
30680 gap = MAX_GAP;
30681
30682- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
30683+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
30684 }
30685
30686 /*
30687 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
30688 * does, but not when emulating X86_32
30689 */
30690-static unsigned long mmap_legacy_base(void)
30691+static unsigned long mmap_legacy_base(struct mm_struct *mm)
30692 {
30693- if (mmap_is_ia32())
30694+ if (mmap_is_ia32()) {
30695+
30696+#ifdef CONFIG_PAX_SEGMEXEC
30697+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
30698+ return SEGMEXEC_TASK_UNMAPPED_BASE;
30699+ else
30700+#endif
30701+
30702 return TASK_UNMAPPED_BASE;
30703- else
30704+ } else
30705 return TASK_UNMAPPED_BASE + mmap_rnd();
30706 }
30707
30708@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
30709 void arch_pick_mmap_layout(struct mm_struct *mm)
30710 {
30711 if (mmap_is_legacy()) {
30712- mm->mmap_base = mmap_legacy_base();
30713+ mm->mmap_base = mmap_legacy_base(mm);
30714+
30715+#ifdef CONFIG_PAX_RANDMMAP
30716+ if (mm->pax_flags & MF_PAX_RANDMMAP)
30717+ mm->mmap_base += mm->delta_mmap;
30718+#endif
30719+
30720 mm->get_unmapped_area = arch_get_unmapped_area;
30721 mm->unmap_area = arch_unmap_area;
30722 } else {
30723- mm->mmap_base = mmap_base();
30724+ mm->mmap_base = mmap_base(mm);
30725+
30726+#ifdef CONFIG_PAX_RANDMMAP
30727+ if (mm->pax_flags & MF_PAX_RANDMMAP)
30728+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
30729+#endif
30730+
30731 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
30732 mm->unmap_area = arch_unmap_area_topdown;
30733 }
30734diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
30735index dc0b727..f612039 100644
30736--- a/arch/x86/mm/mmio-mod.c
30737+++ b/arch/x86/mm/mmio-mod.c
30738@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
30739 break;
30740 default:
30741 {
30742- unsigned char *ip = (unsigned char *)instptr;
30743+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
30744 my_trace->opcode = MMIO_UNKNOWN_OP;
30745 my_trace->width = 0;
30746 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
30747@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
30748 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
30749 void __iomem *addr)
30750 {
30751- static atomic_t next_id;
30752+ static atomic_unchecked_t next_id;
30753 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
30754 /* These are page-unaligned. */
30755 struct mmiotrace_map map = {
30756@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
30757 .private = trace
30758 },
30759 .phys = offset,
30760- .id = atomic_inc_return(&next_id)
30761+ .id = atomic_inc_return_unchecked(&next_id)
30762 };
30763 map.map_id = trace->id;
30764
30765@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
30766 ioremap_trace_core(offset, size, addr);
30767 }
30768
30769-static void iounmap_trace_core(volatile void __iomem *addr)
30770+static void iounmap_trace_core(const volatile void __iomem *addr)
30771 {
30772 struct mmiotrace_map map = {
30773 .phys = 0,
30774@@ -328,7 +328,7 @@ not_enabled:
30775 }
30776 }
30777
30778-void mmiotrace_iounmap(volatile void __iomem *addr)
30779+void mmiotrace_iounmap(const volatile void __iomem *addr)
30780 {
30781 might_sleep();
30782 if (is_enabled()) /* recheck and proper locking in *_core() */
30783diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
30784index a71c4e2..301ae44 100644
30785--- a/arch/x86/mm/numa.c
30786+++ b/arch/x86/mm/numa.c
30787@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
30788 return true;
30789 }
30790
30791-static int __init numa_register_memblks(struct numa_meminfo *mi)
30792+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
30793 {
30794 unsigned long uninitialized_var(pfn_align);
30795 int i, nid;
30796diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
30797index d0b1773..4c3327c 100644
30798--- a/arch/x86/mm/pageattr-test.c
30799+++ b/arch/x86/mm/pageattr-test.c
30800@@ -36,7 +36,7 @@ enum {
30801
30802 static int pte_testbit(pte_t pte)
30803 {
30804- return pte_flags(pte) & _PAGE_UNUSED1;
30805+ return pte_flags(pte) & _PAGE_CPA_TEST;
30806 }
30807
30808 struct split_state {
30809diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
30810index bb32480..75f2f5e 100644
30811--- a/arch/x86/mm/pageattr.c
30812+++ b/arch/x86/mm/pageattr.c
30813@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
30814 */
30815 #ifdef CONFIG_PCI_BIOS
30816 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
30817- pgprot_val(forbidden) |= _PAGE_NX;
30818+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
30819 #endif
30820
30821 /*
30822@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
30823 * Does not cover __inittext since that is gone later on. On
30824 * 64bit we do not enforce !NX on the low mapping
30825 */
30826- if (within(address, (unsigned long)_text, (unsigned long)_etext))
30827- pgprot_val(forbidden) |= _PAGE_NX;
30828+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
30829+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
30830
30831+#ifdef CONFIG_DEBUG_RODATA
30832 /*
30833 * The .rodata section needs to be read-only. Using the pfn
30834 * catches all aliases.
30835@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
30836 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
30837 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
30838 pgprot_val(forbidden) |= _PAGE_RW;
30839+#endif
30840
30841 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
30842 /*
30843@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
30844 }
30845 #endif
30846
30847+#ifdef CONFIG_PAX_KERNEXEC
30848+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
30849+ pgprot_val(forbidden) |= _PAGE_RW;
30850+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
30851+ }
30852+#endif
30853+
30854 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
30855
30856 return prot;
30857@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
30858 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
30859 {
30860 /* change init_mm */
30861+ pax_open_kernel();
30862 set_pte_atomic(kpte, pte);
30863+
30864 #ifdef CONFIG_X86_32
30865 if (!SHARED_KERNEL_PMD) {
30866+
30867+#ifdef CONFIG_PAX_PER_CPU_PGD
30868+ unsigned long cpu;
30869+#else
30870 struct page *page;
30871+#endif
30872
30873+#ifdef CONFIG_PAX_PER_CPU_PGD
30874+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
30875+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
30876+#else
30877 list_for_each_entry(page, &pgd_list, lru) {
30878- pgd_t *pgd;
30879+ pgd_t *pgd = (pgd_t *)page_address(page);
30880+#endif
30881+
30882 pud_t *pud;
30883 pmd_t *pmd;
30884
30885- pgd = (pgd_t *)page_address(page) + pgd_index(address);
30886+ pgd += pgd_index(address);
30887 pud = pud_offset(pgd, address);
30888 pmd = pmd_offset(pud, address);
30889 set_pte_atomic((pte_t *)pmd, pte);
30890 }
30891 }
30892 #endif
30893+ pax_close_kernel();
30894 }
30895
30896 static int
30897diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
30898index 6574388..87e9bef 100644
30899--- a/arch/x86/mm/pat.c
30900+++ b/arch/x86/mm/pat.c
30901@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
30902
30903 if (!entry) {
30904 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
30905- current->comm, current->pid, start, end - 1);
30906+ current->comm, task_pid_nr(current), start, end - 1);
30907 return -EINVAL;
30908 }
30909
30910@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
30911
30912 while (cursor < to) {
30913 if (!devmem_is_allowed(pfn)) {
30914- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
30915- current->comm, from, to - 1);
30916+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
30917+ current->comm, from, to - 1, cursor);
30918 return 0;
30919 }
30920 cursor += PAGE_SIZE;
30921@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
30922 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
30923 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
30924 "for [mem %#010Lx-%#010Lx]\n",
30925- current->comm, current->pid,
30926+ current->comm, task_pid_nr(current),
30927 cattr_name(flags),
30928 base, (unsigned long long)(base + size-1));
30929 return -EINVAL;
30930@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
30931 flags = lookup_memtype(paddr);
30932 if (want_flags != flags) {
30933 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
30934- current->comm, current->pid,
30935+ current->comm, task_pid_nr(current),
30936 cattr_name(want_flags),
30937 (unsigned long long)paddr,
30938 (unsigned long long)(paddr + size - 1),
30939@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
30940 free_memtype(paddr, paddr + size);
30941 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
30942 " for [mem %#010Lx-%#010Lx], got %s\n",
30943- current->comm, current->pid,
30944+ current->comm, task_pid_nr(current),
30945 cattr_name(want_flags),
30946 (unsigned long long)paddr,
30947 (unsigned long long)(paddr + size - 1),
30948diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
30949index 415f6c4..d319983 100644
30950--- a/arch/x86/mm/pat_rbtree.c
30951+++ b/arch/x86/mm/pat_rbtree.c
30952@@ -160,7 +160,7 @@ success:
30953
30954 failure:
30955 printk(KERN_INFO "%s:%d conflicting memory types "
30956- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
30957+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
30958 end, cattr_name(found_type), cattr_name(match->type));
30959 return -EBUSY;
30960 }
30961diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
30962index 9f0614d..92ae64a 100644
30963--- a/arch/x86/mm/pf_in.c
30964+++ b/arch/x86/mm/pf_in.c
30965@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
30966 int i;
30967 enum reason_type rv = OTHERS;
30968
30969- p = (unsigned char *)ins_addr;
30970+ p = (unsigned char *)ktla_ktva(ins_addr);
30971 p += skip_prefix(p, &prf);
30972 p += get_opcode(p, &opcode);
30973
30974@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
30975 struct prefix_bits prf;
30976 int i;
30977
30978- p = (unsigned char *)ins_addr;
30979+ p = (unsigned char *)ktla_ktva(ins_addr);
30980 p += skip_prefix(p, &prf);
30981 p += get_opcode(p, &opcode);
30982
30983@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
30984 struct prefix_bits prf;
30985 int i;
30986
30987- p = (unsigned char *)ins_addr;
30988+ p = (unsigned char *)ktla_ktva(ins_addr);
30989 p += skip_prefix(p, &prf);
30990 p += get_opcode(p, &opcode);
30991
30992@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
30993 struct prefix_bits prf;
30994 int i;
30995
30996- p = (unsigned char *)ins_addr;
30997+ p = (unsigned char *)ktla_ktva(ins_addr);
30998 p += skip_prefix(p, &prf);
30999 p += get_opcode(p, &opcode);
31000 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
31001@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
31002 struct prefix_bits prf;
31003 int i;
31004
31005- p = (unsigned char *)ins_addr;
31006+ p = (unsigned char *)ktla_ktva(ins_addr);
31007 p += skip_prefix(p, &prf);
31008 p += get_opcode(p, &opcode);
31009 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
31010diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
31011index 17fda6a..f7d54a0 100644
31012--- a/arch/x86/mm/pgtable.c
31013+++ b/arch/x86/mm/pgtable.c
31014@@ -91,10 +91,67 @@ static inline void pgd_list_del(pgd_t *pgd)
31015 list_del(&page->lru);
31016 }
31017
31018-#define UNSHARED_PTRS_PER_PGD \
31019- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31020+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31021+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
31022
31023+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
31024+{
31025+ unsigned int count = USER_PGD_PTRS;
31026
31027+ if (!pax_user_shadow_base)
31028+ return;
31029+
31030+ while (count--)
31031+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
31032+}
31033+#endif
31034+
31035+#ifdef CONFIG_PAX_PER_CPU_PGD
31036+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
31037+{
31038+ unsigned int count = USER_PGD_PTRS;
31039+
31040+ while (count--) {
31041+ pgd_t pgd;
31042+
31043+#ifdef CONFIG_X86_64
31044+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
31045+#else
31046+ pgd = *src++;
31047+#endif
31048+
31049+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31050+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
31051+#endif
31052+
31053+ *dst++ = pgd;
31054+ }
31055+
31056+}
31057+#endif
31058+
31059+#ifdef CONFIG_X86_64
31060+#define pxd_t pud_t
31061+#define pyd_t pgd_t
31062+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
31063+#define pxd_free(mm, pud) pud_free((mm), (pud))
31064+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
31065+#define pyd_offset(mm, address) pgd_offset((mm), (address))
31066+#define PYD_SIZE PGDIR_SIZE
31067+#else
31068+#define pxd_t pmd_t
31069+#define pyd_t pud_t
31070+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
31071+#define pxd_free(mm, pud) pmd_free((mm), (pud))
31072+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
31073+#define pyd_offset(mm, address) pud_offset((mm), (address))
31074+#define PYD_SIZE PUD_SIZE
31075+#endif
31076+
31077+#ifdef CONFIG_PAX_PER_CPU_PGD
31078+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
31079+static inline void pgd_dtor(pgd_t *pgd) {}
31080+#else
31081 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
31082 {
31083 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
31084@@ -135,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
31085 pgd_list_del(pgd);
31086 spin_unlock(&pgd_lock);
31087 }
31088+#endif
31089
31090 /*
31091 * List of all pgd's needed for non-PAE so it can invalidate entries
31092@@ -147,7 +205,7 @@ static void pgd_dtor(pgd_t *pgd)
31093 * -- nyc
31094 */
31095
31096-#ifdef CONFIG_X86_PAE
31097+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
31098 /*
31099 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
31100 * updating the top-level pagetable entries to guarantee the
31101@@ -159,7 +217,7 @@ static void pgd_dtor(pgd_t *pgd)
31102 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
31103 * and initialize the kernel pmds here.
31104 */
31105-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
31106+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
31107
31108 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31109 {
31110@@ -177,36 +235,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
31111 */
31112 flush_tlb_mm(mm);
31113 }
31114+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
31115+#define PREALLOCATED_PXDS USER_PGD_PTRS
31116 #else /* !CONFIG_X86_PAE */
31117
31118 /* No need to prepopulate any pagetable entries in non-PAE modes. */
31119-#define PREALLOCATED_PMDS 0
31120+#define PREALLOCATED_PXDS 0
31121
31122 #endif /* CONFIG_X86_PAE */
31123
31124-static void free_pmds(pmd_t *pmds[])
31125+static void free_pxds(pxd_t *pxds[])
31126 {
31127 int i;
31128
31129- for(i = 0; i < PREALLOCATED_PMDS; i++)
31130- if (pmds[i])
31131- free_page((unsigned long)pmds[i]);
31132+ for(i = 0; i < PREALLOCATED_PXDS; i++)
31133+ if (pxds[i])
31134+ free_page((unsigned long)pxds[i]);
31135 }
31136
31137-static int preallocate_pmds(pmd_t *pmds[])
31138+static int preallocate_pxds(pxd_t *pxds[])
31139 {
31140 int i;
31141 bool failed = false;
31142
31143- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31144- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
31145- if (pmd == NULL)
31146+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31147+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
31148+ if (pxd == NULL)
31149 failed = true;
31150- pmds[i] = pmd;
31151+ pxds[i] = pxd;
31152 }
31153
31154 if (failed) {
31155- free_pmds(pmds);
31156+ free_pxds(pxds);
31157 return -ENOMEM;
31158 }
31159
31160@@ -219,51 +279,55 @@ static int preallocate_pmds(pmd_t *pmds[])
31161 * preallocate which never got a corresponding vma will need to be
31162 * freed manually.
31163 */
31164-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
31165+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
31166 {
31167 int i;
31168
31169- for(i = 0; i < PREALLOCATED_PMDS; i++) {
31170+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
31171 pgd_t pgd = pgdp[i];
31172
31173 if (pgd_val(pgd) != 0) {
31174- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
31175+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
31176
31177- pgdp[i] = native_make_pgd(0);
31178+ set_pgd(pgdp + i, native_make_pgd(0));
31179
31180- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
31181- pmd_free(mm, pmd);
31182+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
31183+ pxd_free(mm, pxd);
31184 }
31185 }
31186 }
31187
31188-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
31189+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
31190 {
31191- pud_t *pud;
31192+ pyd_t *pyd;
31193 unsigned long addr;
31194 int i;
31195
31196- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
31197+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
31198 return;
31199
31200- pud = pud_offset(pgd, 0);
31201+#ifdef CONFIG_X86_64
31202+ pyd = pyd_offset(mm, 0L);
31203+#else
31204+ pyd = pyd_offset(pgd, 0L);
31205+#endif
31206
31207- for (addr = i = 0; i < PREALLOCATED_PMDS;
31208- i++, pud++, addr += PUD_SIZE) {
31209- pmd_t *pmd = pmds[i];
31210+ for (addr = i = 0; i < PREALLOCATED_PXDS;
31211+ i++, pyd++, addr += PYD_SIZE) {
31212+ pxd_t *pxd = pxds[i];
31213
31214 if (i >= KERNEL_PGD_BOUNDARY)
31215- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
31216- sizeof(pmd_t) * PTRS_PER_PMD);
31217+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
31218+ sizeof(pxd_t) * PTRS_PER_PMD);
31219
31220- pud_populate(mm, pud, pmd);
31221+ pyd_populate(mm, pyd, pxd);
31222 }
31223 }
31224
31225 pgd_t *pgd_alloc(struct mm_struct *mm)
31226 {
31227 pgd_t *pgd;
31228- pmd_t *pmds[PREALLOCATED_PMDS];
31229+ pxd_t *pxds[PREALLOCATED_PXDS];
31230
31231 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
31232
31233@@ -272,11 +336,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
31234
31235 mm->pgd = pgd;
31236
31237- if (preallocate_pmds(pmds) != 0)
31238+ if (preallocate_pxds(pxds) != 0)
31239 goto out_free_pgd;
31240
31241 if (paravirt_pgd_alloc(mm) != 0)
31242- goto out_free_pmds;
31243+ goto out_free_pxds;
31244
31245 /*
31246 * Make sure that pre-populating the pmds is atomic with
31247@@ -286,14 +350,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
31248 spin_lock(&pgd_lock);
31249
31250 pgd_ctor(mm, pgd);
31251- pgd_prepopulate_pmd(mm, pgd, pmds);
31252+ pgd_prepopulate_pxd(mm, pgd, pxds);
31253
31254 spin_unlock(&pgd_lock);
31255
31256 return pgd;
31257
31258-out_free_pmds:
31259- free_pmds(pmds);
31260+out_free_pxds:
31261+ free_pxds(pxds);
31262 out_free_pgd:
31263 free_page((unsigned long)pgd);
31264 out:
31265@@ -302,7 +366,7 @@ out:
31266
31267 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
31268 {
31269- pgd_mop_up_pmds(mm, pgd);
31270+ pgd_mop_up_pxds(mm, pgd);
31271 pgd_dtor(pgd);
31272 paravirt_pgd_free(mm, pgd);
31273 free_page((unsigned long)pgd);
31274diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
31275index a69bcb8..19068ab 100644
31276--- a/arch/x86/mm/pgtable_32.c
31277+++ b/arch/x86/mm/pgtable_32.c
31278@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
31279 return;
31280 }
31281 pte = pte_offset_kernel(pmd, vaddr);
31282+
31283+ pax_open_kernel();
31284 if (pte_val(pteval))
31285 set_pte_at(&init_mm, vaddr, pte, pteval);
31286 else
31287 pte_clear(&init_mm, vaddr, pte);
31288+ pax_close_kernel();
31289
31290 /*
31291 * It's enough to flush this one mapping.
31292diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
31293index e666cbb..61788c45 100644
31294--- a/arch/x86/mm/physaddr.c
31295+++ b/arch/x86/mm/physaddr.c
31296@@ -10,7 +10,7 @@
31297 #ifdef CONFIG_X86_64
31298
31299 #ifdef CONFIG_DEBUG_VIRTUAL
31300-unsigned long __phys_addr(unsigned long x)
31301+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
31302 {
31303 unsigned long y = x - __START_KERNEL_map;
31304
31305@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
31306 #else
31307
31308 #ifdef CONFIG_DEBUG_VIRTUAL
31309-unsigned long __phys_addr(unsigned long x)
31310+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
31311 {
31312 unsigned long phys_addr = x - PAGE_OFFSET;
31313 /* VMALLOC_* aren't constants */
31314diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
31315index 410531d..0f16030 100644
31316--- a/arch/x86/mm/setup_nx.c
31317+++ b/arch/x86/mm/setup_nx.c
31318@@ -5,8 +5,10 @@
31319 #include <asm/pgtable.h>
31320 #include <asm/proto.h>
31321
31322+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31323 static int disable_nx __cpuinitdata;
31324
31325+#ifndef CONFIG_PAX_PAGEEXEC
31326 /*
31327 * noexec = on|off
31328 *
31329@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
31330 return 0;
31331 }
31332 early_param("noexec", noexec_setup);
31333+#endif
31334+
31335+#endif
31336
31337 void __cpuinit x86_configure_nx(void)
31338 {
31339+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31340 if (cpu_has_nx && !disable_nx)
31341 __supported_pte_mask |= _PAGE_NX;
31342 else
31343+#endif
31344 __supported_pte_mask &= ~_PAGE_NX;
31345 }
31346
31347diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
31348index 282375f..e03a98f 100644
31349--- a/arch/x86/mm/tlb.c
31350+++ b/arch/x86/mm/tlb.c
31351@@ -48,7 +48,11 @@ void leave_mm(int cpu)
31352 BUG();
31353 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
31354 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
31355+
31356+#ifndef CONFIG_PAX_PER_CPU_PGD
31357 load_cr3(swapper_pg_dir);
31358+#endif
31359+
31360 }
31361 }
31362 EXPORT_SYMBOL_GPL(leave_mm);
31363diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
31364new file mode 100644
31365index 0000000..dace51c
31366--- /dev/null
31367+++ b/arch/x86/mm/uderef_64.c
31368@@ -0,0 +1,37 @@
31369+#include <linux/mm.h>
31370+#include <asm/pgtable.h>
31371+#include <asm/uaccess.h>
31372+
31373+#ifdef CONFIG_PAX_MEMORY_UDEREF
31374+/* PaX: due to the special call convention these functions must
31375+ * - remain leaf functions under all configurations,
31376+ * - never be called directly, only dereferenced from the wrappers.
31377+ */
31378+void __pax_open_userland(void)
31379+{
31380+ unsigned int cpu;
31381+
31382+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
31383+ return;
31384+
31385+ cpu = raw_get_cpu();
31386+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
31387+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
31388+ raw_put_cpu_no_resched();
31389+}
31390+EXPORT_SYMBOL(__pax_open_userland);
31391+
31392+void __pax_close_userland(void)
31393+{
31394+ unsigned int cpu;
31395+
31396+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
31397+ return;
31398+
31399+ cpu = raw_get_cpu();
31400+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
31401+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
31402+ raw_put_cpu_no_resched();
31403+}
31404+EXPORT_SYMBOL(__pax_close_userland);
31405+#endif
31406diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
31407index 877b9a1..a8ecf42 100644
31408--- a/arch/x86/net/bpf_jit.S
31409+++ b/arch/x86/net/bpf_jit.S
31410@@ -9,6 +9,7 @@
31411 */
31412 #include <linux/linkage.h>
31413 #include <asm/dwarf2.h>
31414+#include <asm/alternative-asm.h>
31415
31416 /*
31417 * Calling convention :
31418@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
31419 jle bpf_slow_path_word
31420 mov (SKBDATA,%rsi),%eax
31421 bswap %eax /* ntohl() */
31422+ pax_force_retaddr
31423 ret
31424
31425 sk_load_half:
31426@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
31427 jle bpf_slow_path_half
31428 movzwl (SKBDATA,%rsi),%eax
31429 rol $8,%ax # ntohs()
31430+ pax_force_retaddr
31431 ret
31432
31433 sk_load_byte:
31434@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
31435 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
31436 jle bpf_slow_path_byte
31437 movzbl (SKBDATA,%rsi),%eax
31438+ pax_force_retaddr
31439 ret
31440
31441 /**
31442@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
31443 movzbl (SKBDATA,%rsi),%ebx
31444 and $15,%bl
31445 shl $2,%bl
31446+ pax_force_retaddr
31447 ret
31448
31449 /* rsi contains offset and can be scratched */
31450@@ -109,6 +114,7 @@ bpf_slow_path_word:
31451 js bpf_error
31452 mov -12(%rbp),%eax
31453 bswap %eax
31454+ pax_force_retaddr
31455 ret
31456
31457 bpf_slow_path_half:
31458@@ -117,12 +123,14 @@ bpf_slow_path_half:
31459 mov -12(%rbp),%ax
31460 rol $8,%ax
31461 movzwl %ax,%eax
31462+ pax_force_retaddr
31463 ret
31464
31465 bpf_slow_path_byte:
31466 bpf_slow_path_common(1)
31467 js bpf_error
31468 movzbl -12(%rbp),%eax
31469+ pax_force_retaddr
31470 ret
31471
31472 bpf_slow_path_byte_msh:
31473@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
31474 and $15,%al
31475 shl $2,%al
31476 xchg %eax,%ebx
31477+ pax_force_retaddr
31478 ret
31479
31480 #define sk_negative_common(SIZE) \
31481@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
31482 sk_negative_common(4)
31483 mov (%rax), %eax
31484 bswap %eax
31485+ pax_force_retaddr
31486 ret
31487
31488 bpf_slow_path_half_neg:
31489@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
31490 mov (%rax),%ax
31491 rol $8,%ax
31492 movzwl %ax,%eax
31493+ pax_force_retaddr
31494 ret
31495
31496 bpf_slow_path_byte_neg:
31497@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
31498 .globl sk_load_byte_negative_offset
31499 sk_negative_common(1)
31500 movzbl (%rax), %eax
31501+ pax_force_retaddr
31502 ret
31503
31504 bpf_slow_path_byte_msh_neg:
31505@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
31506 and $15,%al
31507 shl $2,%al
31508 xchg %eax,%ebx
31509+ pax_force_retaddr
31510 ret
31511
31512 bpf_error:
31513@@ -197,4 +210,5 @@ bpf_error:
31514 xor %eax,%eax
31515 mov -8(%rbp),%rbx
31516 leaveq
31517+ pax_force_retaddr
31518 ret
31519diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
31520index f66b540..3e88dfb 100644
31521--- a/arch/x86/net/bpf_jit_comp.c
31522+++ b/arch/x86/net/bpf_jit_comp.c
31523@@ -12,6 +12,7 @@
31524 #include <linux/netdevice.h>
31525 #include <linux/filter.h>
31526 #include <linux/if_vlan.h>
31527+#include <linux/random.h>
31528
31529 /*
31530 * Conventions :
31531@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
31532 return ptr + len;
31533 }
31534
31535+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31536+#define MAX_INSTR_CODE_SIZE 96
31537+#else
31538+#define MAX_INSTR_CODE_SIZE 64
31539+#endif
31540+
31541 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
31542
31543 #define EMIT1(b1) EMIT(b1, 1)
31544 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
31545 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
31546 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
31547+
31548+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31549+/* original constant will appear in ecx */
31550+#define DILUTE_CONST_SEQUENCE(_off, _key) \
31551+do { \
31552+ /* mov ecx, randkey */ \
31553+ EMIT1(0xb9); \
31554+ EMIT(_key, 4); \
31555+ /* xor ecx, randkey ^ off */ \
31556+ EMIT2(0x81, 0xf1); \
31557+ EMIT((_key) ^ (_off), 4); \
31558+} while (0)
31559+
31560+#define EMIT1_off32(b1, _off) \
31561+do { \
31562+ switch (b1) { \
31563+ case 0x05: /* add eax, imm32 */ \
31564+ case 0x2d: /* sub eax, imm32 */ \
31565+ case 0x25: /* and eax, imm32 */ \
31566+ case 0x0d: /* or eax, imm32 */ \
31567+ case 0xb8: /* mov eax, imm32 */ \
31568+ case 0x35: /* xor eax, imm32 */ \
31569+ case 0x3d: /* cmp eax, imm32 */ \
31570+ case 0xa9: /* test eax, imm32 */ \
31571+ DILUTE_CONST_SEQUENCE(_off, randkey); \
31572+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
31573+ break; \
31574+ case 0xbb: /* mov ebx, imm32 */ \
31575+ DILUTE_CONST_SEQUENCE(_off, randkey); \
31576+ /* mov ebx, ecx */ \
31577+ EMIT2(0x89, 0xcb); \
31578+ break; \
31579+ case 0xbe: /* mov esi, imm32 */ \
31580+ DILUTE_CONST_SEQUENCE(_off, randkey); \
31581+ /* mov esi, ecx */ \
31582+ EMIT2(0x89, 0xce); \
31583+ break; \
31584+ case 0xe8: /* call rel imm32, always to known funcs */ \
31585+ EMIT1(b1); \
31586+ EMIT(_off, 4); \
31587+ break; \
31588+ case 0xe9: /* jmp rel imm32 */ \
31589+ EMIT1(b1); \
31590+ EMIT(_off, 4); \
31591+ /* prevent fall-through, we're not called if off = 0 */ \
31592+ EMIT(0xcccccccc, 4); \
31593+ EMIT(0xcccccccc, 4); \
31594+ break; \
31595+ default: \
31596+ BUILD_BUG(); \
31597+ } \
31598+} while (0)
31599+
31600+#define EMIT2_off32(b1, b2, _off) \
31601+do { \
31602+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
31603+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
31604+ EMIT(randkey, 4); \
31605+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
31606+ EMIT((_off) - randkey, 4); \
31607+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
31608+ DILUTE_CONST_SEQUENCE(_off, randkey); \
31609+ /* imul eax, ecx */ \
31610+ EMIT3(0x0f, 0xaf, 0xc1); \
31611+ } else { \
31612+ BUILD_BUG(); \
31613+ } \
31614+} while (0)
31615+#else
31616 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
31617+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
31618+#endif
31619
31620 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
31621 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
31622@@ -90,6 +168,24 @@ do { \
31623 #define X86_JBE 0x76
31624 #define X86_JA 0x77
31625
31626+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31627+#define APPEND_FLOW_VERIFY() \
31628+do { \
31629+ /* mov ecx, randkey */ \
31630+ EMIT1(0xb9); \
31631+ EMIT(randkey, 4); \
31632+ /* cmp ecx, randkey */ \
31633+ EMIT2(0x81, 0xf9); \
31634+ EMIT(randkey, 4); \
31635+ /* jz after 8 int 3s */ \
31636+ EMIT2(0x74, 0x08); \
31637+ EMIT(0xcccccccc, 4); \
31638+ EMIT(0xcccccccc, 4); \
31639+} while (0)
31640+#else
31641+#define APPEND_FLOW_VERIFY() do { } while (0)
31642+#endif
31643+
31644 #define EMIT_COND_JMP(op, offset) \
31645 do { \
31646 if (is_near(offset)) \
31647@@ -97,6 +193,7 @@ do { \
31648 else { \
31649 EMIT2(0x0f, op + 0x10); \
31650 EMIT(offset, 4); /* jxx .+off32 */ \
31651+ APPEND_FLOW_VERIFY(); \
31652 } \
31653 } while (0)
31654
31655@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
31656 set_fs(old_fs);
31657 }
31658
31659+struct bpf_jit_work {
31660+ struct work_struct work;
31661+ void *image;
31662+};
31663+
31664 #define CHOOSE_LOAD_FUNC(K, func) \
31665 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
31666
31667@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
31668
31669 void bpf_jit_compile(struct sk_filter *fp)
31670 {
31671- u8 temp[64];
31672+ u8 temp[MAX_INSTR_CODE_SIZE];
31673 u8 *prog;
31674 unsigned int proglen, oldproglen = 0;
31675 int ilen, i;
31676@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
31677 unsigned int *addrs;
31678 const struct sock_filter *filter = fp->insns;
31679 int flen = fp->len;
31680+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31681+ unsigned int randkey;
31682+#endif
31683
31684 if (!bpf_jit_enable)
31685 return;
31686@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
31687 if (addrs == NULL)
31688 return;
31689
31690+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
31691+ if (!fp->work)
31692+ goto out;
31693+
31694+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31695+ randkey = get_random_int();
31696+#endif
31697+
31698 /* Before first pass, make a rough estimation of addrs[]
31699- * each bpf instruction is translated to less than 64 bytes
31700+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
31701 */
31702 for (proglen = 0, i = 0; i < flen; i++) {
31703- proglen += 64;
31704+ proglen += MAX_INSTR_CODE_SIZE;
31705 addrs[i] = proglen;
31706 }
31707 cleanup_addr = proglen; /* epilogue address */
31708@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
31709 case BPF_S_ALU_MUL_K: /* A *= K */
31710 if (is_imm8(K))
31711 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
31712- else {
31713- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
31714- EMIT(K, 4);
31715- }
31716+ else
31717+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
31718 break;
31719 case BPF_S_ALU_DIV_X: /* A /= X; */
31720 seen |= SEEN_XREG;
31721@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
31722 break;
31723 case BPF_S_ALU_MOD_K: /* A %= K; */
31724 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
31725+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31726+ DILUTE_CONST_SEQUENCE(K, randkey);
31727+#else
31728 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
31729+#endif
31730 EMIT2(0xf7, 0xf1); /* div %ecx */
31731 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
31732 break;
31733 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
31734+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
31735+ DILUTE_CONST_SEQUENCE(K, randkey);
31736+ // imul rax, rcx
31737+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
31738+#else
31739 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
31740 EMIT(K, 4);
31741+#endif
31742 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
31743 break;
31744 case BPF_S_ALU_AND_X:
31745@@ -602,8 +723,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
31746 if (is_imm8(K)) {
31747 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
31748 } else {
31749- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
31750- EMIT(K, 4);
31751+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
31752 }
31753 } else {
31754 EMIT2(0x89,0xde); /* mov %ebx,%esi */
31755@@ -686,17 +806,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
31756 break;
31757 default:
31758 /* hmm, too complex filter, give up with jit compiler */
31759- goto out;
31760+ goto error;
31761 }
31762 ilen = prog - temp;
31763 if (image) {
31764 if (unlikely(proglen + ilen > oldproglen)) {
31765 pr_err("bpb_jit_compile fatal error\n");
31766- kfree(addrs);
31767- module_free(NULL, image);
31768- return;
31769+ module_free_exec(NULL, image);
31770+ goto error;
31771 }
31772+ pax_open_kernel();
31773 memcpy(image + proglen, temp, ilen);
31774+ pax_close_kernel();
31775 }
31776 proglen += ilen;
31777 addrs[i] = proglen;
31778@@ -717,11 +838,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
31779 break;
31780 }
31781 if (proglen == oldproglen) {
31782- image = module_alloc(max_t(unsigned int,
31783- proglen,
31784- sizeof(struct work_struct)));
31785+ image = module_alloc_exec(proglen);
31786 if (!image)
31787- goto out;
31788+ goto error;
31789 }
31790 oldproglen = proglen;
31791 }
31792@@ -732,7 +851,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
31793 if (image) {
31794 bpf_flush_icache(image, image + proglen);
31795 fp->bpf_func = (void *)image;
31796- }
31797+ } else
31798+error:
31799+ kfree(fp->work);
31800+
31801 out:
31802 kfree(addrs);
31803 return;
31804@@ -740,18 +862,20 @@ out:
31805
31806 static void jit_free_defer(struct work_struct *arg)
31807 {
31808- module_free(NULL, arg);
31809+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
31810+ kfree(arg);
31811 }
31812
31813 /* run from softirq, we must use a work_struct to call
31814- * module_free() from process context
31815+ * module_free_exec() from process context
31816 */
31817 void bpf_jit_free(struct sk_filter *fp)
31818 {
31819 if (fp->bpf_func != sk_run_filter) {
31820- struct work_struct *work = (struct work_struct *)fp->bpf_func;
31821+ struct work_struct *work = &fp->work->work;
31822
31823 INIT_WORK(work, jit_free_defer);
31824+ fp->work->image = fp->bpf_func;
31825 schedule_work(work);
31826 }
31827 }
31828diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
31829index d6aa6e8..266395a 100644
31830--- a/arch/x86/oprofile/backtrace.c
31831+++ b/arch/x86/oprofile/backtrace.c
31832@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
31833 struct stack_frame_ia32 *fp;
31834 unsigned long bytes;
31835
31836- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
31837+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
31838 if (bytes != sizeof(bufhead))
31839 return NULL;
31840
31841- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
31842+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
31843
31844 oprofile_add_trace(bufhead[0].return_address);
31845
31846@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
31847 struct stack_frame bufhead[2];
31848 unsigned long bytes;
31849
31850- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
31851+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
31852 if (bytes != sizeof(bufhead))
31853 return NULL;
31854
31855@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
31856 {
31857 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
31858
31859- if (!user_mode_vm(regs)) {
31860+ if (!user_mode(regs)) {
31861 unsigned long stack = kernel_stack_pointer(regs);
31862 if (depth)
31863 dump_trace(NULL, regs, (unsigned long *)stack, 0,
31864diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
31865index 48768df..ba9143c 100644
31866--- a/arch/x86/oprofile/nmi_int.c
31867+++ b/arch/x86/oprofile/nmi_int.c
31868@@ -23,6 +23,7 @@
31869 #include <asm/nmi.h>
31870 #include <asm/msr.h>
31871 #include <asm/apic.h>
31872+#include <asm/pgtable.h>
31873
31874 #include "op_counter.h"
31875 #include "op_x86_model.h"
31876@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
31877 if (ret)
31878 return ret;
31879
31880- if (!model->num_virt_counters)
31881- model->num_virt_counters = model->num_counters;
31882+ if (!model->num_virt_counters) {
31883+ pax_open_kernel();
31884+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
31885+ pax_close_kernel();
31886+ }
31887
31888 mux_init(ops);
31889
31890diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
31891index b2b9443..be58856 100644
31892--- a/arch/x86/oprofile/op_model_amd.c
31893+++ b/arch/x86/oprofile/op_model_amd.c
31894@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
31895 num_counters = AMD64_NUM_COUNTERS;
31896 }
31897
31898- op_amd_spec.num_counters = num_counters;
31899- op_amd_spec.num_controls = num_counters;
31900- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
31901+ pax_open_kernel();
31902+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
31903+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
31904+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
31905+ pax_close_kernel();
31906
31907 return 0;
31908 }
31909diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
31910index d90528e..0127e2b 100644
31911--- a/arch/x86/oprofile/op_model_ppro.c
31912+++ b/arch/x86/oprofile/op_model_ppro.c
31913@@ -19,6 +19,7 @@
31914 #include <asm/msr.h>
31915 #include <asm/apic.h>
31916 #include <asm/nmi.h>
31917+#include <asm/pgtable.h>
31918
31919 #include "op_x86_model.h"
31920 #include "op_counter.h"
31921@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
31922
31923 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
31924
31925- op_arch_perfmon_spec.num_counters = num_counters;
31926- op_arch_perfmon_spec.num_controls = num_counters;
31927+ pax_open_kernel();
31928+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
31929+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
31930+ pax_close_kernel();
31931 }
31932
31933 static int arch_perfmon_init(struct oprofile_operations *ignore)
31934diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
31935index 71e8a67..6a313bb 100644
31936--- a/arch/x86/oprofile/op_x86_model.h
31937+++ b/arch/x86/oprofile/op_x86_model.h
31938@@ -52,7 +52,7 @@ struct op_x86_model_spec {
31939 void (*switch_ctrl)(struct op_x86_model_spec const *model,
31940 struct op_msrs const * const msrs);
31941 #endif
31942-};
31943+} __do_const;
31944
31945 struct op_counter_config;
31946
31947diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
31948index e9e6ed5..e47ae67 100644
31949--- a/arch/x86/pci/amd_bus.c
31950+++ b/arch/x86/pci/amd_bus.c
31951@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
31952 return NOTIFY_OK;
31953 }
31954
31955-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
31956+static struct notifier_block amd_cpu_notifier = {
31957 .notifier_call = amd_cpu_notify,
31958 };
31959
31960diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
31961index 372e9b8..e775a6c 100644
31962--- a/arch/x86/pci/irq.c
31963+++ b/arch/x86/pci/irq.c
31964@@ -50,7 +50,7 @@ struct irq_router {
31965 struct irq_router_handler {
31966 u16 vendor;
31967 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
31968-};
31969+} __do_const;
31970
31971 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
31972 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
31973@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
31974 return 0;
31975 }
31976
31977-static __initdata struct irq_router_handler pirq_routers[] = {
31978+static __initconst const struct irq_router_handler pirq_routers[] = {
31979 { PCI_VENDOR_ID_INTEL, intel_router_probe },
31980 { PCI_VENDOR_ID_AL, ali_router_probe },
31981 { PCI_VENDOR_ID_ITE, ite_router_probe },
31982@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
31983 static void __init pirq_find_router(struct irq_router *r)
31984 {
31985 struct irq_routing_table *rt = pirq_table;
31986- struct irq_router_handler *h;
31987+ const struct irq_router_handler *h;
31988
31989 #ifdef CONFIG_PCI_BIOS
31990 if (!rt->signature) {
31991@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
31992 return 0;
31993 }
31994
31995-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
31996+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
31997 {
31998 .callback = fix_broken_hp_bios_irq9,
31999 .ident = "HP Pavilion N5400 Series Laptop",
32000diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
32001index 6eb18c4..20d83de 100644
32002--- a/arch/x86/pci/mrst.c
32003+++ b/arch/x86/pci/mrst.c
32004@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
32005 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
32006 pci_mmcfg_late_init();
32007 pcibios_enable_irq = mrst_pci_irq_enable;
32008- pci_root_ops = pci_mrst_ops;
32009+ pax_open_kernel();
32010+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
32011+ pax_close_kernel();
32012 pci_soc_mode = 1;
32013 /* Continue with standard init */
32014 return 1;
32015diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
32016index c77b24a..c979855 100644
32017--- a/arch/x86/pci/pcbios.c
32018+++ b/arch/x86/pci/pcbios.c
32019@@ -79,7 +79,7 @@ union bios32 {
32020 static struct {
32021 unsigned long address;
32022 unsigned short segment;
32023-} bios32_indirect = { 0, __KERNEL_CS };
32024+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
32025
32026 /*
32027 * Returns the entry point for the given service, NULL on error
32028@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
32029 unsigned long length; /* %ecx */
32030 unsigned long entry; /* %edx */
32031 unsigned long flags;
32032+ struct desc_struct d, *gdt;
32033
32034 local_irq_save(flags);
32035- __asm__("lcall *(%%edi); cld"
32036+
32037+ gdt = get_cpu_gdt_table(smp_processor_id());
32038+
32039+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
32040+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32041+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
32042+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32043+
32044+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
32045 : "=a" (return_code),
32046 "=b" (address),
32047 "=c" (length),
32048 "=d" (entry)
32049 : "0" (service),
32050 "1" (0),
32051- "D" (&bios32_indirect));
32052+ "D" (&bios32_indirect),
32053+ "r"(__PCIBIOS_DS)
32054+ : "memory");
32055+
32056+ pax_open_kernel();
32057+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
32058+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
32059+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
32060+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
32061+ pax_close_kernel();
32062+
32063 local_irq_restore(flags);
32064
32065 switch (return_code) {
32066- case 0:
32067- return address + entry;
32068- case 0x80: /* Not present */
32069- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32070- return 0;
32071- default: /* Shouldn't happen */
32072- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32073- service, return_code);
32074+ case 0: {
32075+ int cpu;
32076+ unsigned char flags;
32077+
32078+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
32079+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
32080+ printk(KERN_WARNING "bios32_service: not valid\n");
32081 return 0;
32082+ }
32083+ address = address + PAGE_OFFSET;
32084+ length += 16UL; /* some BIOSs underreport this... */
32085+ flags = 4;
32086+ if (length >= 64*1024*1024) {
32087+ length >>= PAGE_SHIFT;
32088+ flags |= 8;
32089+ }
32090+
32091+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32092+ gdt = get_cpu_gdt_table(cpu);
32093+ pack_descriptor(&d, address, length, 0x9b, flags);
32094+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
32095+ pack_descriptor(&d, address, length, 0x93, flags);
32096+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
32097+ }
32098+ return entry;
32099+ }
32100+ case 0x80: /* Not present */
32101+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
32102+ return 0;
32103+ default: /* Shouldn't happen */
32104+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
32105+ service, return_code);
32106+ return 0;
32107 }
32108 }
32109
32110 static struct {
32111 unsigned long address;
32112 unsigned short segment;
32113-} pci_indirect = { 0, __KERNEL_CS };
32114+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
32115
32116-static int pci_bios_present;
32117+static int pci_bios_present __read_only;
32118
32119 static int check_pcibios(void)
32120 {
32121@@ -131,11 +174,13 @@ static int check_pcibios(void)
32122 unsigned long flags, pcibios_entry;
32123
32124 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
32125- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
32126+ pci_indirect.address = pcibios_entry;
32127
32128 local_irq_save(flags);
32129- __asm__(
32130- "lcall *(%%edi); cld\n\t"
32131+ __asm__("movw %w6, %%ds\n\t"
32132+ "lcall *%%ss:(%%edi); cld\n\t"
32133+ "push %%ss\n\t"
32134+ "pop %%ds\n\t"
32135 "jc 1f\n\t"
32136 "xor %%ah, %%ah\n"
32137 "1:"
32138@@ -144,7 +189,8 @@ static int check_pcibios(void)
32139 "=b" (ebx),
32140 "=c" (ecx)
32141 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
32142- "D" (&pci_indirect)
32143+ "D" (&pci_indirect),
32144+ "r" (__PCIBIOS_DS)
32145 : "memory");
32146 local_irq_restore(flags);
32147
32148@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32149
32150 switch (len) {
32151 case 1:
32152- __asm__("lcall *(%%esi); cld\n\t"
32153+ __asm__("movw %w6, %%ds\n\t"
32154+ "lcall *%%ss:(%%esi); cld\n\t"
32155+ "push %%ss\n\t"
32156+ "pop %%ds\n\t"
32157 "jc 1f\n\t"
32158 "xor %%ah, %%ah\n"
32159 "1:"
32160@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32161 : "1" (PCIBIOS_READ_CONFIG_BYTE),
32162 "b" (bx),
32163 "D" ((long)reg),
32164- "S" (&pci_indirect));
32165+ "S" (&pci_indirect),
32166+ "r" (__PCIBIOS_DS));
32167 /*
32168 * Zero-extend the result beyond 8 bits, do not trust the
32169 * BIOS having done it:
32170@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32171 *value &= 0xff;
32172 break;
32173 case 2:
32174- __asm__("lcall *(%%esi); cld\n\t"
32175+ __asm__("movw %w6, %%ds\n\t"
32176+ "lcall *%%ss:(%%esi); cld\n\t"
32177+ "push %%ss\n\t"
32178+ "pop %%ds\n\t"
32179 "jc 1f\n\t"
32180 "xor %%ah, %%ah\n"
32181 "1:"
32182@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32183 : "1" (PCIBIOS_READ_CONFIG_WORD),
32184 "b" (bx),
32185 "D" ((long)reg),
32186- "S" (&pci_indirect));
32187+ "S" (&pci_indirect),
32188+ "r" (__PCIBIOS_DS));
32189 /*
32190 * Zero-extend the result beyond 16 bits, do not trust the
32191 * BIOS having done it:
32192@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32193 *value &= 0xffff;
32194 break;
32195 case 4:
32196- __asm__("lcall *(%%esi); cld\n\t"
32197+ __asm__("movw %w6, %%ds\n\t"
32198+ "lcall *%%ss:(%%esi); cld\n\t"
32199+ "push %%ss\n\t"
32200+ "pop %%ds\n\t"
32201 "jc 1f\n\t"
32202 "xor %%ah, %%ah\n"
32203 "1:"
32204@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
32205 : "1" (PCIBIOS_READ_CONFIG_DWORD),
32206 "b" (bx),
32207 "D" ((long)reg),
32208- "S" (&pci_indirect));
32209+ "S" (&pci_indirect),
32210+ "r" (__PCIBIOS_DS));
32211 break;
32212 }
32213
32214@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
32215
32216 switch (len) {
32217 case 1:
32218- __asm__("lcall *(%%esi); cld\n\t"
32219+ __asm__("movw %w6, %%ds\n\t"
32220+ "lcall *%%ss:(%%esi); cld\n\t"
32221+ "push %%ss\n\t"
32222+ "pop %%ds\n\t"
32223 "jc 1f\n\t"
32224 "xor %%ah, %%ah\n"
32225 "1:"
32226@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
32227 "c" (value),
32228 "b" (bx),
32229 "D" ((long)reg),
32230- "S" (&pci_indirect));
32231+ "S" (&pci_indirect),
32232+ "r" (__PCIBIOS_DS));
32233 break;
32234 case 2:
32235- __asm__("lcall *(%%esi); cld\n\t"
32236+ __asm__("movw %w6, %%ds\n\t"
32237+ "lcall *%%ss:(%%esi); cld\n\t"
32238+ "push %%ss\n\t"
32239+ "pop %%ds\n\t"
32240 "jc 1f\n\t"
32241 "xor %%ah, %%ah\n"
32242 "1:"
32243@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
32244 "c" (value),
32245 "b" (bx),
32246 "D" ((long)reg),
32247- "S" (&pci_indirect));
32248+ "S" (&pci_indirect),
32249+ "r" (__PCIBIOS_DS));
32250 break;
32251 case 4:
32252- __asm__("lcall *(%%esi); cld\n\t"
32253+ __asm__("movw %w6, %%ds\n\t"
32254+ "lcall *%%ss:(%%esi); cld\n\t"
32255+ "push %%ss\n\t"
32256+ "pop %%ds\n\t"
32257 "jc 1f\n\t"
32258 "xor %%ah, %%ah\n"
32259 "1:"
32260@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
32261 "c" (value),
32262 "b" (bx),
32263 "D" ((long)reg),
32264- "S" (&pci_indirect));
32265+ "S" (&pci_indirect),
32266+ "r" (__PCIBIOS_DS));
32267 break;
32268 }
32269
32270@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
32271
32272 DBG("PCI: Fetching IRQ routing table... ");
32273 __asm__("push %%es\n\t"
32274+ "movw %w8, %%ds\n\t"
32275 "push %%ds\n\t"
32276 "pop %%es\n\t"
32277- "lcall *(%%esi); cld\n\t"
32278+ "lcall *%%ss:(%%esi); cld\n\t"
32279 "pop %%es\n\t"
32280+ "push %%ss\n\t"
32281+ "pop %%ds\n"
32282 "jc 1f\n\t"
32283 "xor %%ah, %%ah\n"
32284 "1:"
32285@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
32286 "1" (0),
32287 "D" ((long) &opt),
32288 "S" (&pci_indirect),
32289- "m" (opt)
32290+ "m" (opt),
32291+ "r" (__PCIBIOS_DS)
32292 : "memory");
32293 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
32294 if (ret & 0xff00)
32295@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
32296 {
32297 int ret;
32298
32299- __asm__("lcall *(%%esi); cld\n\t"
32300+ __asm__("movw %w5, %%ds\n\t"
32301+ "lcall *%%ss:(%%esi); cld\n\t"
32302+ "push %%ss\n\t"
32303+ "pop %%ds\n"
32304 "jc 1f\n\t"
32305 "xor %%ah, %%ah\n"
32306 "1:"
32307@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
32308 : "0" (PCIBIOS_SET_PCI_HW_INT),
32309 "b" ((dev->bus->number << 8) | dev->devfn),
32310 "c" ((irq << 8) | (pin + 10)),
32311- "S" (&pci_indirect));
32312+ "S" (&pci_indirect),
32313+ "r" (__PCIBIOS_DS));
32314 return !(ret & 0xff00);
32315 }
32316 EXPORT_SYMBOL(pcibios_set_irq_routing);
32317diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
32318index 40e4469..d915bf9 100644
32319--- a/arch/x86/platform/efi/efi_32.c
32320+++ b/arch/x86/platform/efi/efi_32.c
32321@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
32322 {
32323 struct desc_ptr gdt_descr;
32324
32325+#ifdef CONFIG_PAX_KERNEXEC
32326+ struct desc_struct d;
32327+#endif
32328+
32329 local_irq_save(efi_rt_eflags);
32330
32331 load_cr3(initial_page_table);
32332 __flush_tlb_all();
32333
32334+#ifdef CONFIG_PAX_KERNEXEC
32335+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
32336+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
32337+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
32338+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
32339+#endif
32340+
32341 gdt_descr.address = __pa(get_cpu_gdt_table(0));
32342 gdt_descr.size = GDT_SIZE - 1;
32343 load_gdt(&gdt_descr);
32344@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
32345 {
32346 struct desc_ptr gdt_descr;
32347
32348+#ifdef CONFIG_PAX_KERNEXEC
32349+ struct desc_struct d;
32350+
32351+ memset(&d, 0, sizeof d);
32352+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
32353+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
32354+#endif
32355+
32356 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
32357 gdt_descr.size = GDT_SIZE - 1;
32358 load_gdt(&gdt_descr);
32359
32360+#ifdef CONFIG_PAX_PER_CPU_PGD
32361+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
32362+#else
32363 load_cr3(swapper_pg_dir);
32364+#endif
32365+
32366 __flush_tlb_all();
32367
32368 local_irq_restore(efi_rt_eflags);
32369diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
32370index 39a0e7f1..872396e 100644
32371--- a/arch/x86/platform/efi/efi_64.c
32372+++ b/arch/x86/platform/efi/efi_64.c
32373@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
32374 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
32375 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
32376 }
32377+
32378+#ifdef CONFIG_PAX_PER_CPU_PGD
32379+ load_cr3(swapper_pg_dir);
32380+#endif
32381+
32382 __flush_tlb_all();
32383 }
32384
32385@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
32386 for (pgd = 0; pgd < n_pgds; pgd++)
32387 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
32388 kfree(save_pgd);
32389+
32390+#ifdef CONFIG_PAX_PER_CPU_PGD
32391+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
32392+#endif
32393+
32394 __flush_tlb_all();
32395 local_irq_restore(efi_flags);
32396 early_code_mapping_set_exec(0);
32397diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
32398index fbe66e6..eae5e38 100644
32399--- a/arch/x86/platform/efi/efi_stub_32.S
32400+++ b/arch/x86/platform/efi/efi_stub_32.S
32401@@ -6,7 +6,9 @@
32402 */
32403
32404 #include <linux/linkage.h>
32405+#include <linux/init.h>
32406 #include <asm/page_types.h>
32407+#include <asm/segment.h>
32408
32409 /*
32410 * efi_call_phys(void *, ...) is a function with variable parameters.
32411@@ -20,7 +22,7 @@
32412 * service functions will comply with gcc calling convention, too.
32413 */
32414
32415-.text
32416+__INIT
32417 ENTRY(efi_call_phys)
32418 /*
32419 * 0. The function can only be called in Linux kernel. So CS has been
32420@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
32421 * The mapping of lower virtual memory has been created in prelog and
32422 * epilog.
32423 */
32424- movl $1f, %edx
32425- subl $__PAGE_OFFSET, %edx
32426- jmp *%edx
32427+#ifdef CONFIG_PAX_KERNEXEC
32428+ movl $(__KERNEXEC_EFI_DS), %edx
32429+ mov %edx, %ds
32430+ mov %edx, %es
32431+ mov %edx, %ss
32432+ addl $2f,(1f)
32433+ ljmp *(1f)
32434+
32435+__INITDATA
32436+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
32437+.previous
32438+
32439+2:
32440+ subl $2b,(1b)
32441+#else
32442+ jmp 1f-__PAGE_OFFSET
32443 1:
32444+#endif
32445
32446 /*
32447 * 2. Now on the top of stack is the return
32448@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
32449 * parameter 2, ..., param n. To make things easy, we save the return
32450 * address of efi_call_phys in a global variable.
32451 */
32452- popl %edx
32453- movl %edx, saved_return_addr
32454- /* get the function pointer into ECX*/
32455- popl %ecx
32456- movl %ecx, efi_rt_function_ptr
32457- movl $2f, %edx
32458- subl $__PAGE_OFFSET, %edx
32459- pushl %edx
32460+ popl (saved_return_addr)
32461+ popl (efi_rt_function_ptr)
32462
32463 /*
32464 * 3. Clear PG bit in %CR0.
32465@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
32466 /*
32467 * 5. Call the physical function.
32468 */
32469- jmp *%ecx
32470+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
32471
32472-2:
32473 /*
32474 * 6. After EFI runtime service returns, control will return to
32475 * following instruction. We'd better readjust stack pointer first.
32476@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
32477 movl %cr0, %edx
32478 orl $0x80000000, %edx
32479 movl %edx, %cr0
32480- jmp 1f
32481-1:
32482+
32483 /*
32484 * 8. Now restore the virtual mode from flat mode by
32485 * adding EIP with PAGE_OFFSET.
32486 */
32487- movl $1f, %edx
32488- jmp *%edx
32489+#ifdef CONFIG_PAX_KERNEXEC
32490+ movl $(__KERNEL_DS), %edx
32491+ mov %edx, %ds
32492+ mov %edx, %es
32493+ mov %edx, %ss
32494+ ljmp $(__KERNEL_CS),$1f
32495+#else
32496+ jmp 1f+__PAGE_OFFSET
32497+#endif
32498 1:
32499
32500 /*
32501 * 9. Balance the stack. And because EAX contain the return value,
32502 * we'd better not clobber it.
32503 */
32504- leal efi_rt_function_ptr, %edx
32505- movl (%edx), %ecx
32506- pushl %ecx
32507+ pushl (efi_rt_function_ptr)
32508
32509 /*
32510- * 10. Push the saved return address onto the stack and return.
32511+ * 10. Return to the saved return address.
32512 */
32513- leal saved_return_addr, %edx
32514- movl (%edx), %ecx
32515- pushl %ecx
32516- ret
32517+ jmpl *(saved_return_addr)
32518 ENDPROC(efi_call_phys)
32519 .previous
32520
32521-.data
32522+__INITDATA
32523 saved_return_addr:
32524 .long 0
32525 efi_rt_function_ptr:
32526diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
32527index 4c07cca..2c8427d 100644
32528--- a/arch/x86/platform/efi/efi_stub_64.S
32529+++ b/arch/x86/platform/efi/efi_stub_64.S
32530@@ -7,6 +7,7 @@
32531 */
32532
32533 #include <linux/linkage.h>
32534+#include <asm/alternative-asm.h>
32535
32536 #define SAVE_XMM \
32537 mov %rsp, %rax; \
32538@@ -40,6 +41,7 @@ ENTRY(efi_call0)
32539 call *%rdi
32540 addq $32, %rsp
32541 RESTORE_XMM
32542+ pax_force_retaddr 0, 1
32543 ret
32544 ENDPROC(efi_call0)
32545
32546@@ -50,6 +52,7 @@ ENTRY(efi_call1)
32547 call *%rdi
32548 addq $32, %rsp
32549 RESTORE_XMM
32550+ pax_force_retaddr 0, 1
32551 ret
32552 ENDPROC(efi_call1)
32553
32554@@ -60,6 +63,7 @@ ENTRY(efi_call2)
32555 call *%rdi
32556 addq $32, %rsp
32557 RESTORE_XMM
32558+ pax_force_retaddr 0, 1
32559 ret
32560 ENDPROC(efi_call2)
32561
32562@@ -71,6 +75,7 @@ ENTRY(efi_call3)
32563 call *%rdi
32564 addq $32, %rsp
32565 RESTORE_XMM
32566+ pax_force_retaddr 0, 1
32567 ret
32568 ENDPROC(efi_call3)
32569
32570@@ -83,6 +88,7 @@ ENTRY(efi_call4)
32571 call *%rdi
32572 addq $32, %rsp
32573 RESTORE_XMM
32574+ pax_force_retaddr 0, 1
32575 ret
32576 ENDPROC(efi_call4)
32577
32578@@ -96,6 +102,7 @@ ENTRY(efi_call5)
32579 call *%rdi
32580 addq $48, %rsp
32581 RESTORE_XMM
32582+ pax_force_retaddr 0, 1
32583 ret
32584 ENDPROC(efi_call5)
32585
32586@@ -112,5 +119,6 @@ ENTRY(efi_call6)
32587 call *%rdi
32588 addq $48, %rsp
32589 RESTORE_XMM
32590+ pax_force_retaddr 0, 1
32591 ret
32592 ENDPROC(efi_call6)
32593diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
32594index a0a0a43..a48e233 100644
32595--- a/arch/x86/platform/mrst/mrst.c
32596+++ b/arch/x86/platform/mrst/mrst.c
32597@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
32598 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
32599 int sfi_mrtc_num;
32600
32601-static void mrst_power_off(void)
32602+static __noreturn void mrst_power_off(void)
32603 {
32604+ BUG();
32605 }
32606
32607-static void mrst_reboot(void)
32608+static __noreturn void mrst_reboot(void)
32609 {
32610 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
32611+ BUG();
32612 }
32613
32614 /* parse all the mtimer info to a static mtimer array */
32615diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
32616index d6ee929..3637cb5 100644
32617--- a/arch/x86/platform/olpc/olpc_dt.c
32618+++ b/arch/x86/platform/olpc/olpc_dt.c
32619@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
32620 return res;
32621 }
32622
32623-static struct of_pdt_ops prom_olpc_ops __initdata = {
32624+static struct of_pdt_ops prom_olpc_ops __initconst = {
32625 .nextprop = olpc_dt_nextprop,
32626 .getproplen = olpc_dt_getproplen,
32627 .getproperty = olpc_dt_getproperty,
32628diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
32629index 1cf5b30..fd45732 100644
32630--- a/arch/x86/power/cpu.c
32631+++ b/arch/x86/power/cpu.c
32632@@ -137,11 +137,8 @@ static void do_fpu_end(void)
32633 static void fix_processor_context(void)
32634 {
32635 int cpu = smp_processor_id();
32636- struct tss_struct *t = &per_cpu(init_tss, cpu);
32637-#ifdef CONFIG_X86_64
32638- struct desc_struct *desc = get_cpu_gdt_table(cpu);
32639- tss_desc tss;
32640-#endif
32641+ struct tss_struct *t = init_tss + cpu;
32642+
32643 set_tss_desc(cpu, t); /*
32644 * This just modifies memory; should not be
32645 * necessary. But... This is necessary, because
32646@@ -150,10 +147,6 @@ static void fix_processor_context(void)
32647 */
32648
32649 #ifdef CONFIG_X86_64
32650- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
32651- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
32652- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
32653-
32654 syscall_init(); /* This sets MSR_*STAR and related */
32655 #endif
32656 load_TR_desc(); /* This does ltr */
32657diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
32658index a44f457..9140171 100644
32659--- a/arch/x86/realmode/init.c
32660+++ b/arch/x86/realmode/init.c
32661@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
32662 __va(real_mode_header->trampoline_header);
32663
32664 #ifdef CONFIG_X86_32
32665- trampoline_header->start = __pa_symbol(startup_32_smp);
32666+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
32667+
32668+#ifdef CONFIG_PAX_KERNEXEC
32669+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
32670+#endif
32671+
32672+ trampoline_header->boot_cs = __BOOT_CS;
32673 trampoline_header->gdt_limit = __BOOT_DS + 7;
32674 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
32675 #else
32676@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
32677 *trampoline_cr4_features = read_cr4();
32678
32679 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
32680- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
32681+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
32682 trampoline_pgd[511] = init_level4_pgt[511].pgd;
32683 #endif
32684 }
32685diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
32686index 8869287..d577672 100644
32687--- a/arch/x86/realmode/rm/Makefile
32688+++ b/arch/x86/realmode/rm/Makefile
32689@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
32690 $(call cc-option, -fno-unit-at-a-time)) \
32691 $(call cc-option, -fno-stack-protector) \
32692 $(call cc-option, -mpreferred-stack-boundary=2)
32693+ifdef CONSTIFY_PLUGIN
32694+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
32695+endif
32696 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
32697 GCOV_PROFILE := n
32698diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
32699index a28221d..93c40f1 100644
32700--- a/arch/x86/realmode/rm/header.S
32701+++ b/arch/x86/realmode/rm/header.S
32702@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
32703 #endif
32704 /* APM/BIOS reboot */
32705 .long pa_machine_real_restart_asm
32706-#ifdef CONFIG_X86_64
32707+#ifdef CONFIG_X86_32
32708+ .long __KERNEL_CS
32709+#else
32710 .long __KERNEL32_CS
32711 #endif
32712 END(real_mode_header)
32713diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
32714index c1b2791..f9e31c7 100644
32715--- a/arch/x86/realmode/rm/trampoline_32.S
32716+++ b/arch/x86/realmode/rm/trampoline_32.S
32717@@ -25,6 +25,12 @@
32718 #include <asm/page_types.h>
32719 #include "realmode.h"
32720
32721+#ifdef CONFIG_PAX_KERNEXEC
32722+#define ta(X) (X)
32723+#else
32724+#define ta(X) (pa_ ## X)
32725+#endif
32726+
32727 .text
32728 .code16
32729
32730@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
32731
32732 cli # We should be safe anyway
32733
32734- movl tr_start, %eax # where we need to go
32735-
32736 movl $0xA5A5A5A5, trampoline_status
32737 # write marker for master knows we're running
32738
32739@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
32740 movw $1, %dx # protected mode (PE) bit
32741 lmsw %dx # into protected mode
32742
32743- ljmpl $__BOOT_CS, $pa_startup_32
32744+ ljmpl *(trampoline_header)
32745
32746 .section ".text32","ax"
32747 .code32
32748@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
32749 .balign 8
32750 GLOBAL(trampoline_header)
32751 tr_start: .space 4
32752- tr_gdt_pad: .space 2
32753+ tr_boot_cs: .space 2
32754 tr_gdt: .space 6
32755 END(trampoline_header)
32756
32757diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
32758index bb360dc..d0fd8f8 100644
32759--- a/arch/x86/realmode/rm/trampoline_64.S
32760+++ b/arch/x86/realmode/rm/trampoline_64.S
32761@@ -94,6 +94,7 @@ ENTRY(startup_32)
32762 movl %edx, %gs
32763
32764 movl pa_tr_cr4, %eax
32765+ andl $~X86_CR4_PCIDE, %eax
32766 movl %eax, %cr4 # Enable PAE mode
32767
32768 # Setup trampoline 4 level pagetables
32769@@ -107,7 +108,7 @@ ENTRY(startup_32)
32770 wrmsr
32771
32772 # Enable paging and in turn activate Long Mode
32773- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
32774+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
32775 movl %eax, %cr0
32776
32777 /*
32778diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
32779index e812034..c747134 100644
32780--- a/arch/x86/tools/Makefile
32781+++ b/arch/x86/tools/Makefile
32782@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
32783
32784 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
32785
32786-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
32787+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
32788 hostprogs-y += relocs
32789 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
32790 relocs: $(obj)/relocs
32791diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
32792index f7bab68..b6d9886 100644
32793--- a/arch/x86/tools/relocs.c
32794+++ b/arch/x86/tools/relocs.c
32795@@ -1,5 +1,7 @@
32796 /* This is included from relocs_32/64.c */
32797
32798+#include "../../../include/generated/autoconf.h"
32799+
32800 #define ElfW(type) _ElfW(ELF_BITS, type)
32801 #define _ElfW(bits, type) __ElfW(bits, type)
32802 #define __ElfW(bits, type) Elf##bits##_##type
32803@@ -11,6 +13,7 @@
32804 #define Elf_Sym ElfW(Sym)
32805
32806 static Elf_Ehdr ehdr;
32807+static Elf_Phdr *phdr;
32808
32809 struct relocs {
32810 uint32_t *offset;
32811@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
32812 }
32813 }
32814
32815+static void read_phdrs(FILE *fp)
32816+{
32817+ unsigned int i;
32818+
32819+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
32820+ if (!phdr) {
32821+ die("Unable to allocate %d program headers\n",
32822+ ehdr.e_phnum);
32823+ }
32824+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
32825+ die("Seek to %d failed: %s\n",
32826+ ehdr.e_phoff, strerror(errno));
32827+ }
32828+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
32829+ die("Cannot read ELF program headers: %s\n",
32830+ strerror(errno));
32831+ }
32832+ for(i = 0; i < ehdr.e_phnum; i++) {
32833+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
32834+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
32835+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
32836+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
32837+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
32838+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
32839+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
32840+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
32841+ }
32842+
32843+}
32844+
32845 static void read_shdrs(FILE *fp)
32846 {
32847- int i;
32848+ unsigned int i;
32849 Elf_Shdr shdr;
32850
32851 secs = calloc(ehdr.e_shnum, sizeof(struct section));
32852@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
32853
32854 static void read_strtabs(FILE *fp)
32855 {
32856- int i;
32857+ unsigned int i;
32858 for (i = 0; i < ehdr.e_shnum; i++) {
32859 struct section *sec = &secs[i];
32860 if (sec->shdr.sh_type != SHT_STRTAB) {
32861@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
32862
32863 static void read_symtabs(FILE *fp)
32864 {
32865- int i,j;
32866+ unsigned int i,j;
32867 for (i = 0; i < ehdr.e_shnum; i++) {
32868 struct section *sec = &secs[i];
32869 if (sec->shdr.sh_type != SHT_SYMTAB) {
32870@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
32871 }
32872
32873
32874-static void read_relocs(FILE *fp)
32875+static void read_relocs(FILE *fp, int use_real_mode)
32876 {
32877- int i,j;
32878+ unsigned int i,j;
32879+ uint32_t base;
32880+
32881 for (i = 0; i < ehdr.e_shnum; i++) {
32882 struct section *sec = &secs[i];
32883 if (sec->shdr.sh_type != SHT_REL_TYPE) {
32884@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
32885 die("Cannot read symbol table: %s\n",
32886 strerror(errno));
32887 }
32888+ base = 0;
32889+
32890+#ifdef CONFIG_X86_32
32891+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
32892+ if (phdr[j].p_type != PT_LOAD )
32893+ continue;
32894+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
32895+ continue;
32896+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
32897+ break;
32898+ }
32899+#endif
32900+
32901 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
32902 Elf_Rel *rel = &sec->reltab[j];
32903- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
32904+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
32905 rel->r_info = elf_xword_to_cpu(rel->r_info);
32906 #if (SHT_REL_TYPE == SHT_RELA)
32907 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
32908@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
32909
32910 static void print_absolute_symbols(void)
32911 {
32912- int i;
32913+ unsigned int i;
32914 const char *format;
32915
32916 if (ELF_BITS == 64)
32917@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
32918 for (i = 0; i < ehdr.e_shnum; i++) {
32919 struct section *sec = &secs[i];
32920 char *sym_strtab;
32921- int j;
32922+ unsigned int j;
32923
32924 if (sec->shdr.sh_type != SHT_SYMTAB) {
32925 continue;
32926@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
32927
32928 static void print_absolute_relocs(void)
32929 {
32930- int i, printed = 0;
32931+ unsigned int i, printed = 0;
32932 const char *format;
32933
32934 if (ELF_BITS == 64)
32935@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
32936 struct section *sec_applies, *sec_symtab;
32937 char *sym_strtab;
32938 Elf_Sym *sh_symtab;
32939- int j;
32940+ unsigned int j;
32941 if (sec->shdr.sh_type != SHT_REL_TYPE) {
32942 continue;
32943 }
32944@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
32945 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
32946 Elf_Sym *sym, const char *symname))
32947 {
32948- int i;
32949+ unsigned int i;
32950 /* Walk through the relocations */
32951 for (i = 0; i < ehdr.e_shnum; i++) {
32952 char *sym_strtab;
32953 Elf_Sym *sh_symtab;
32954 struct section *sec_applies, *sec_symtab;
32955- int j;
32956+ unsigned int j;
32957 struct section *sec = &secs[i];
32958
32959 if (sec->shdr.sh_type != SHT_REL_TYPE) {
32960@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
32961 {
32962 unsigned r_type = ELF32_R_TYPE(rel->r_info);
32963 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
32964+ char *sym_strtab = sec->link->link->strtab;
32965+
32966+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
32967+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
32968+ return 0;
32969+
32970+#ifdef CONFIG_PAX_KERNEXEC
32971+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
32972+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
32973+ return 0;
32974+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
32975+ return 0;
32976+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
32977+ return 0;
32978+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
32979+ return 0;
32980+#endif
32981
32982 switch (r_type) {
32983 case R_386_NONE:
32984@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
32985
32986 static void emit_relocs(int as_text, int use_real_mode)
32987 {
32988- int i;
32989+ unsigned int i;
32990 int (*write_reloc)(uint32_t, FILE *) = write32;
32991 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
32992 const char *symname);
32993@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
32994 {
32995 regex_init(use_real_mode);
32996 read_ehdr(fp);
32997+ read_phdrs(fp);
32998 read_shdrs(fp);
32999 read_strtabs(fp);
33000 read_symtabs(fp);
33001- read_relocs(fp);
33002+ read_relocs(fp, use_real_mode);
33003 if (ELF_BITS == 64)
33004 percpu_init();
33005 if (show_absolute_syms) {
33006diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
33007index 80ffa5b..a33bd15 100644
33008--- a/arch/x86/um/tls_32.c
33009+++ b/arch/x86/um/tls_32.c
33010@@ -260,7 +260,7 @@ out:
33011 if (unlikely(task == current &&
33012 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
33013 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
33014- "without flushed TLS.", current->pid);
33015+ "without flushed TLS.", task_pid_nr(current));
33016 }
33017
33018 return 0;
33019diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
33020index fd14be1..e3c79c0 100644
33021--- a/arch/x86/vdso/Makefile
33022+++ b/arch/x86/vdso/Makefile
33023@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
33024 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
33025 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
33026
33027-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33028+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
33029 GCOV_PROFILE := n
33030
33031 #
33032diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
33033index 0faad64..39ef157 100644
33034--- a/arch/x86/vdso/vdso32-setup.c
33035+++ b/arch/x86/vdso/vdso32-setup.c
33036@@ -25,6 +25,7 @@
33037 #include <asm/tlbflush.h>
33038 #include <asm/vdso.h>
33039 #include <asm/proto.h>
33040+#include <asm/mman.h>
33041
33042 enum {
33043 VDSO_DISABLED = 0,
33044@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
33045 void enable_sep_cpu(void)
33046 {
33047 int cpu = get_cpu();
33048- struct tss_struct *tss = &per_cpu(init_tss, cpu);
33049+ struct tss_struct *tss = init_tss + cpu;
33050
33051 if (!boot_cpu_has(X86_FEATURE_SEP)) {
33052 put_cpu();
33053@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
33054 gate_vma.vm_start = FIXADDR_USER_START;
33055 gate_vma.vm_end = FIXADDR_USER_END;
33056 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
33057- gate_vma.vm_page_prot = __P101;
33058+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
33059
33060 return 0;
33061 }
33062@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33063 if (compat)
33064 addr = VDSO_HIGH_BASE;
33065 else {
33066- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
33067+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
33068 if (IS_ERR_VALUE(addr)) {
33069 ret = addr;
33070 goto up_fail;
33071 }
33072 }
33073
33074- current->mm->context.vdso = (void *)addr;
33075+ current->mm->context.vdso = addr;
33076
33077 if (compat_uses_vma || !compat) {
33078 /*
33079@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33080 }
33081
33082 current_thread_info()->sysenter_return =
33083- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33084+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
33085
33086 up_fail:
33087 if (ret)
33088- current->mm->context.vdso = NULL;
33089+ current->mm->context.vdso = 0;
33090
33091 up_write(&mm->mmap_sem);
33092
33093@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
33094
33095 const char *arch_vma_name(struct vm_area_struct *vma)
33096 {
33097- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
33098+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
33099 return "[vdso]";
33100+
33101+#ifdef CONFIG_PAX_SEGMEXEC
33102+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
33103+ return "[vdso]";
33104+#endif
33105+
33106 return NULL;
33107 }
33108
33109@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
33110 * Check to see if the corresponding task was created in compat vdso
33111 * mode.
33112 */
33113- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
33114+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
33115 return &gate_vma;
33116 return NULL;
33117 }
33118diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
33119index 431e875..cbb23f3 100644
33120--- a/arch/x86/vdso/vma.c
33121+++ b/arch/x86/vdso/vma.c
33122@@ -16,8 +16,6 @@
33123 #include <asm/vdso.h>
33124 #include <asm/page.h>
33125
33126-unsigned int __read_mostly vdso_enabled = 1;
33127-
33128 extern char vdso_start[], vdso_end[];
33129 extern unsigned short vdso_sync_cpuid;
33130
33131@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
33132 * unaligned here as a result of stack start randomization.
33133 */
33134 addr = PAGE_ALIGN(addr);
33135- addr = align_vdso_addr(addr);
33136
33137 return addr;
33138 }
33139@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
33140 unsigned size)
33141 {
33142 struct mm_struct *mm = current->mm;
33143- unsigned long addr;
33144+ unsigned long addr = 0;
33145 int ret;
33146
33147- if (!vdso_enabled)
33148- return 0;
33149-
33150 down_write(&mm->mmap_sem);
33151+
33152+#ifdef CONFIG_PAX_RANDMMAP
33153+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
33154+#endif
33155+
33156 addr = vdso_addr(mm->start_stack, size);
33157+ addr = align_vdso_addr(addr);
33158 addr = get_unmapped_area(NULL, addr, size, 0, 0);
33159 if (IS_ERR_VALUE(addr)) {
33160 ret = addr;
33161 goto up_fail;
33162 }
33163
33164- current->mm->context.vdso = (void *)addr;
33165+ mm->context.vdso = addr;
33166
33167 ret = install_special_mapping(mm, addr, size,
33168 VM_READ|VM_EXEC|
33169 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
33170 pages);
33171- if (ret) {
33172- current->mm->context.vdso = NULL;
33173- goto up_fail;
33174- }
33175+ if (ret)
33176+ mm->context.vdso = 0;
33177
33178 up_fail:
33179 up_write(&mm->mmap_sem);
33180@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
33181 vdsox32_size);
33182 }
33183 #endif
33184-
33185-static __init int vdso_setup(char *s)
33186-{
33187- vdso_enabled = simple_strtoul(s, NULL, 0);
33188- return 0;
33189-}
33190-__setup("vdso=", vdso_setup);
33191diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
33192index a492be2..08678da 100644
33193--- a/arch/x86/xen/enlighten.c
33194+++ b/arch/x86/xen/enlighten.c
33195@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
33196
33197 struct shared_info xen_dummy_shared_info;
33198
33199-void *xen_initial_gdt;
33200-
33201 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
33202 __read_mostly int xen_have_vector_callback;
33203 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
33204@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
33205 {
33206 unsigned long va = dtr->address;
33207 unsigned int size = dtr->size + 1;
33208- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
33209- unsigned long frames[pages];
33210+ unsigned long frames[65536 / PAGE_SIZE];
33211 int f;
33212
33213 /*
33214@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
33215 {
33216 unsigned long va = dtr->address;
33217 unsigned int size = dtr->size + 1;
33218- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
33219- unsigned long frames[pages];
33220+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
33221 int f;
33222
33223 /*
33224@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
33225 * 8-byte entries, or 16 4k pages..
33226 */
33227
33228- BUG_ON(size > 65536);
33229+ BUG_ON(size > GDT_SIZE);
33230 BUG_ON(va & ~PAGE_MASK);
33231
33232 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
33233@@ -985,7 +981,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
33234 return 0;
33235 }
33236
33237-static void set_xen_basic_apic_ops(void)
33238+static void __init set_xen_basic_apic_ops(void)
33239 {
33240 apic->read = xen_apic_read;
33241 apic->write = xen_apic_write;
33242@@ -1290,30 +1286,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
33243 #endif
33244 };
33245
33246-static void xen_reboot(int reason)
33247+static __noreturn void xen_reboot(int reason)
33248 {
33249 struct sched_shutdown r = { .reason = reason };
33250
33251- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
33252- BUG();
33253+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
33254+ BUG();
33255 }
33256
33257-static void xen_restart(char *msg)
33258+static __noreturn void xen_restart(char *msg)
33259 {
33260 xen_reboot(SHUTDOWN_reboot);
33261 }
33262
33263-static void xen_emergency_restart(void)
33264+static __noreturn void xen_emergency_restart(void)
33265 {
33266 xen_reboot(SHUTDOWN_reboot);
33267 }
33268
33269-static void xen_machine_halt(void)
33270+static __noreturn void xen_machine_halt(void)
33271 {
33272 xen_reboot(SHUTDOWN_poweroff);
33273 }
33274
33275-static void xen_machine_power_off(void)
33276+static __noreturn void xen_machine_power_off(void)
33277 {
33278 if (pm_power_off)
33279 pm_power_off();
33280@@ -1464,7 +1460,17 @@ asmlinkage void __init xen_start_kernel(void)
33281 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
33282
33283 /* Work out if we support NX */
33284- x86_configure_nx();
33285+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
33286+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
33287+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
33288+ unsigned l, h;
33289+
33290+ __supported_pte_mask |= _PAGE_NX;
33291+ rdmsr(MSR_EFER, l, h);
33292+ l |= EFER_NX;
33293+ wrmsr(MSR_EFER, l, h);
33294+ }
33295+#endif
33296
33297 xen_setup_features();
33298
33299@@ -1495,13 +1501,6 @@ asmlinkage void __init xen_start_kernel(void)
33300
33301 machine_ops = xen_machine_ops;
33302
33303- /*
33304- * The only reliable way to retain the initial address of the
33305- * percpu gdt_page is to remember it here, so we can go and
33306- * mark it RW later, when the initial percpu area is freed.
33307- */
33308- xen_initial_gdt = &per_cpu(gdt_page, 0);
33309-
33310 xen_smp_init();
33311
33312 #ifdef CONFIG_ACPI_NUMA
33313@@ -1700,7 +1699,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
33314 return NOTIFY_OK;
33315 }
33316
33317-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
33318+static struct notifier_block xen_hvm_cpu_notifier = {
33319 .notifier_call = xen_hvm_cpu_notify,
33320 };
33321
33322diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
33323index fdc3ba2..3daee39 100644
33324--- a/arch/x86/xen/mmu.c
33325+++ b/arch/x86/xen/mmu.c
33326@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
33327 /* L3_k[510] -> level2_kernel_pgt
33328 * L3_i[511] -> level2_fixmap_pgt */
33329 convert_pfn_mfn(level3_kernel_pgt);
33330+ convert_pfn_mfn(level3_vmalloc_start_pgt);
33331+ convert_pfn_mfn(level3_vmalloc_end_pgt);
33332+ convert_pfn_mfn(level3_vmemmap_pgt);
33333
33334 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
33335 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
33336@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
33337 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
33338 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
33339 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
33340+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
33341+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
33342+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
33343 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
33344 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
33345+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
33346 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
33347 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
33348
33349@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
33350 pv_mmu_ops.set_pud = xen_set_pud;
33351 #if PAGETABLE_LEVELS == 4
33352 pv_mmu_ops.set_pgd = xen_set_pgd;
33353+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
33354 #endif
33355
33356 /* This will work as long as patching hasn't happened yet
33357@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
33358 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
33359 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
33360 .set_pgd = xen_set_pgd_hyper,
33361+ .set_pgd_batched = xen_set_pgd_hyper,
33362
33363 .alloc_pud = xen_alloc_pmd_init,
33364 .release_pud = xen_release_pmd_init,
33365diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
33366index d99cae8..18401e1 100644
33367--- a/arch/x86/xen/smp.c
33368+++ b/arch/x86/xen/smp.c
33369@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
33370 {
33371 BUG_ON(smp_processor_id() != 0);
33372 native_smp_prepare_boot_cpu();
33373-
33374- /* We've switched to the "real" per-cpu gdt, so make sure the
33375- old memory can be recycled */
33376- make_lowmem_page_readwrite(xen_initial_gdt);
33377-
33378 xen_filter_cpu_maps();
33379 xen_setup_vcpu_info_placement();
33380 }
33381@@ -314,7 +309,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
33382 ctxt->user_regs.ss = __KERNEL_DS;
33383 #ifdef CONFIG_X86_32
33384 ctxt->user_regs.fs = __KERNEL_PERCPU;
33385- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
33386+ savesegment(gs, ctxt->user_regs.gs);
33387 #else
33388 ctxt->gs_base_kernel = per_cpu_offset(cpu);
33389 #endif
33390@@ -324,8 +319,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
33391
33392 {
33393 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
33394- ctxt->user_regs.ds = __USER_DS;
33395- ctxt->user_regs.es = __USER_DS;
33396+ ctxt->user_regs.ds = __KERNEL_DS;
33397+ ctxt->user_regs.es = __KERNEL_DS;
33398
33399 xen_copy_trap_info(ctxt->trap_ctxt);
33400
33401@@ -370,13 +365,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
33402 int rc;
33403
33404 per_cpu(current_task, cpu) = idle;
33405+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
33406 #ifdef CONFIG_X86_32
33407 irq_ctx_init(cpu);
33408 #else
33409 clear_tsk_thread_flag(idle, TIF_FORK);
33410- per_cpu(kernel_stack, cpu) =
33411- (unsigned long)task_stack_page(idle) -
33412- KERNEL_STACK_OFFSET + THREAD_SIZE;
33413+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
33414 #endif
33415 xen_setup_runstate_info(cpu);
33416 xen_setup_timer(cpu);
33417@@ -651,7 +645,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
33418
33419 void __init xen_smp_init(void)
33420 {
33421- smp_ops = xen_smp_ops;
33422+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
33423 xen_fill_possible_map();
33424 xen_init_spinlocks();
33425 }
33426diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
33427index 33ca6e4..0ded929 100644
33428--- a/arch/x86/xen/xen-asm_32.S
33429+++ b/arch/x86/xen/xen-asm_32.S
33430@@ -84,14 +84,14 @@ ENTRY(xen_iret)
33431 ESP_OFFSET=4 # bytes pushed onto stack
33432
33433 /*
33434- * Store vcpu_info pointer for easy access. Do it this way to
33435- * avoid having to reload %fs
33436+ * Store vcpu_info pointer for easy access.
33437 */
33438 #ifdef CONFIG_SMP
33439- GET_THREAD_INFO(%eax)
33440- movl %ss:TI_cpu(%eax), %eax
33441- movl %ss:__per_cpu_offset(,%eax,4), %eax
33442- mov %ss:xen_vcpu(%eax), %eax
33443+ push %fs
33444+ mov $(__KERNEL_PERCPU), %eax
33445+ mov %eax, %fs
33446+ mov PER_CPU_VAR(xen_vcpu), %eax
33447+ pop %fs
33448 #else
33449 movl %ss:xen_vcpu, %eax
33450 #endif
33451diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
33452index 7faed58..ba4427c 100644
33453--- a/arch/x86/xen/xen-head.S
33454+++ b/arch/x86/xen/xen-head.S
33455@@ -19,6 +19,17 @@ ENTRY(startup_xen)
33456 #ifdef CONFIG_X86_32
33457 mov %esi,xen_start_info
33458 mov $init_thread_union+THREAD_SIZE,%esp
33459+#ifdef CONFIG_SMP
33460+ movl $cpu_gdt_table,%edi
33461+ movl $__per_cpu_load,%eax
33462+ movw %ax,__KERNEL_PERCPU + 2(%edi)
33463+ rorl $16,%eax
33464+ movb %al,__KERNEL_PERCPU + 4(%edi)
33465+ movb %ah,__KERNEL_PERCPU + 7(%edi)
33466+ movl $__per_cpu_end - 1,%eax
33467+ subl $__per_cpu_start,%eax
33468+ movw %ax,__KERNEL_PERCPU + 0(%edi)
33469+#endif
33470 #else
33471 mov %rsi,xen_start_info
33472 mov $init_thread_union+THREAD_SIZE,%rsp
33473diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
33474index a95b417..b6dbd0b 100644
33475--- a/arch/x86/xen/xen-ops.h
33476+++ b/arch/x86/xen/xen-ops.h
33477@@ -10,8 +10,6 @@
33478 extern const char xen_hypervisor_callback[];
33479 extern const char xen_failsafe_callback[];
33480
33481-extern void *xen_initial_gdt;
33482-
33483 struct trap_info;
33484 void xen_copy_trap_info(struct trap_info *traps);
33485
33486diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
33487index 525bd3d..ef888b1 100644
33488--- a/arch/xtensa/variants/dc232b/include/variant/core.h
33489+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
33490@@ -119,9 +119,9 @@
33491 ----------------------------------------------------------------------*/
33492
33493 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
33494-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
33495 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
33496 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
33497+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
33498
33499 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
33500 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
33501diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
33502index 2f33760..835e50a 100644
33503--- a/arch/xtensa/variants/fsf/include/variant/core.h
33504+++ b/arch/xtensa/variants/fsf/include/variant/core.h
33505@@ -11,6 +11,7 @@
33506 #ifndef _XTENSA_CORE_H
33507 #define _XTENSA_CORE_H
33508
33509+#include <linux/const.h>
33510
33511 /****************************************************************************
33512 Parameters Useful for Any Code, USER or PRIVILEGED
33513@@ -112,9 +113,9 @@
33514 ----------------------------------------------------------------------*/
33515
33516 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
33517-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
33518 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
33519 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
33520+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
33521
33522 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
33523 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
33524diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
33525index af00795..2bb8105 100644
33526--- a/arch/xtensa/variants/s6000/include/variant/core.h
33527+++ b/arch/xtensa/variants/s6000/include/variant/core.h
33528@@ -11,6 +11,7 @@
33529 #ifndef _XTENSA_CORE_CONFIGURATION_H
33530 #define _XTENSA_CORE_CONFIGURATION_H
33531
33532+#include <linux/const.h>
33533
33534 /****************************************************************************
33535 Parameters Useful for Any Code, USER or PRIVILEGED
33536@@ -118,9 +119,9 @@
33537 ----------------------------------------------------------------------*/
33538
33539 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
33540-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
33541 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
33542 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
33543+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
33544
33545 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
33546 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
33547diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
33548index 58916af..eb9dbcf6 100644
33549--- a/block/blk-iopoll.c
33550+++ b/block/blk-iopoll.c
33551@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
33552 }
33553 EXPORT_SYMBOL(blk_iopoll_complete);
33554
33555-static void blk_iopoll_softirq(struct softirq_action *h)
33556+static void blk_iopoll_softirq(void)
33557 {
33558 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
33559 int rearm = 0, budget = blk_iopoll_budget;
33560@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
33561 return NOTIFY_OK;
33562 }
33563
33564-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
33565+static struct notifier_block blk_iopoll_cpu_notifier = {
33566 .notifier_call = blk_iopoll_cpu_notify,
33567 };
33568
33569diff --git a/block/blk-map.c b/block/blk-map.c
33570index 623e1cd..ca1e109 100644
33571--- a/block/blk-map.c
33572+++ b/block/blk-map.c
33573@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
33574 if (!len || !kbuf)
33575 return -EINVAL;
33576
33577- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
33578+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
33579 if (do_copy)
33580 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
33581 else
33582diff --git a/block/blk-softirq.c b/block/blk-softirq.c
33583index 467c8de..f3628c5 100644
33584--- a/block/blk-softirq.c
33585+++ b/block/blk-softirq.c
33586@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
33587 * Softirq action handler - move entries to local list and loop over them
33588 * while passing them to the queue registered handler.
33589 */
33590-static void blk_done_softirq(struct softirq_action *h)
33591+static void blk_done_softirq(void)
33592 {
33593 struct list_head *cpu_list, local_list;
33594
33595@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
33596 return NOTIFY_OK;
33597 }
33598
33599-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
33600+static struct notifier_block blk_cpu_notifier = {
33601 .notifier_call = blk_cpu_notify,
33602 };
33603
33604diff --git a/block/bsg.c b/block/bsg.c
33605index 420a5a9..23834aa 100644
33606--- a/block/bsg.c
33607+++ b/block/bsg.c
33608@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
33609 struct sg_io_v4 *hdr, struct bsg_device *bd,
33610 fmode_t has_write_perm)
33611 {
33612+ unsigned char tmpcmd[sizeof(rq->__cmd)];
33613+ unsigned char *cmdptr;
33614+
33615 if (hdr->request_len > BLK_MAX_CDB) {
33616 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
33617 if (!rq->cmd)
33618 return -ENOMEM;
33619- }
33620+ cmdptr = rq->cmd;
33621+ } else
33622+ cmdptr = tmpcmd;
33623
33624- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
33625+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
33626 hdr->request_len))
33627 return -EFAULT;
33628
33629+ if (cmdptr != rq->cmd)
33630+ memcpy(rq->cmd, cmdptr, hdr->request_len);
33631+
33632 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
33633 if (blk_verify_command(rq->cmd, has_write_perm))
33634 return -EPERM;
33635diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
33636index 7c668c8..db3521c 100644
33637--- a/block/compat_ioctl.c
33638+++ b/block/compat_ioctl.c
33639@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
33640 err |= __get_user(f->spec1, &uf->spec1);
33641 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
33642 err |= __get_user(name, &uf->name);
33643- f->name = compat_ptr(name);
33644+ f->name = (void __force_kernel *)compat_ptr(name);
33645 if (err) {
33646 err = -EFAULT;
33647 goto out;
33648diff --git a/block/genhd.c b/block/genhd.c
33649index cdeb527..10aa34db 100644
33650--- a/block/genhd.c
33651+++ b/block/genhd.c
33652@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
33653
33654 /*
33655 * Register device numbers dev..(dev+range-1)
33656- * range must be nonzero
33657+ * Noop if @range is zero.
33658 * The hash chain is sorted on range, so that subranges can override.
33659 */
33660 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
33661 struct kobject *(*probe)(dev_t, int *, void *),
33662 int (*lock)(dev_t, void *), void *data)
33663 {
33664- kobj_map(bdev_map, devt, range, module, probe, lock, data);
33665+ if (range)
33666+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
33667 }
33668
33669 EXPORT_SYMBOL(blk_register_region);
33670
33671+/* undo blk_register_region(), noop if @range is zero */
33672 void blk_unregister_region(dev_t devt, unsigned long range)
33673 {
33674- kobj_unmap(bdev_map, devt, range);
33675+ if (range)
33676+ kobj_unmap(bdev_map, devt, range);
33677 }
33678
33679 EXPORT_SYMBOL(blk_unregister_region);
33680diff --git a/block/partitions/efi.c b/block/partitions/efi.c
33681index c85fc89..51e690b 100644
33682--- a/block/partitions/efi.c
33683+++ b/block/partitions/efi.c
33684@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
33685 if (!gpt)
33686 return NULL;
33687
33688+ if (!le32_to_cpu(gpt->num_partition_entries))
33689+ return NULL;
33690+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
33691+ if (!pte)
33692+ return NULL;
33693+
33694 count = le32_to_cpu(gpt->num_partition_entries) *
33695 le32_to_cpu(gpt->sizeof_partition_entry);
33696- if (!count)
33697- return NULL;
33698- pte = kmalloc(count, GFP_KERNEL);
33699- if (!pte)
33700- return NULL;
33701-
33702 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
33703 (u8 *) pte,
33704 count) < count) {
33705diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
33706index a5ffcc9..3cedc9c 100644
33707--- a/block/scsi_ioctl.c
33708+++ b/block/scsi_ioctl.c
33709@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
33710 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
33711 struct sg_io_hdr *hdr, fmode_t mode)
33712 {
33713- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
33714+ unsigned char tmpcmd[sizeof(rq->__cmd)];
33715+ unsigned char *cmdptr;
33716+
33717+ if (rq->cmd != rq->__cmd)
33718+ cmdptr = rq->cmd;
33719+ else
33720+ cmdptr = tmpcmd;
33721+
33722+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
33723 return -EFAULT;
33724+
33725+ if (cmdptr != rq->cmd)
33726+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
33727+
33728 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
33729 return -EPERM;
33730
33731@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
33732 int err;
33733 unsigned int in_len, out_len, bytes, opcode, cmdlen;
33734 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
33735+ unsigned char tmpcmd[sizeof(rq->__cmd)];
33736+ unsigned char *cmdptr;
33737
33738 if (!sic)
33739 return -EINVAL;
33740@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
33741 */
33742 err = -EFAULT;
33743 rq->cmd_len = cmdlen;
33744- if (copy_from_user(rq->cmd, sic->data, cmdlen))
33745+
33746+ if (rq->cmd != rq->__cmd)
33747+ cmdptr = rq->cmd;
33748+ else
33749+ cmdptr = tmpcmd;
33750+
33751+ if (copy_from_user(cmdptr, sic->data, cmdlen))
33752 goto error;
33753
33754+ if (rq->cmd != cmdptr)
33755+ memcpy(rq->cmd, cmdptr, cmdlen);
33756+
33757 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
33758 goto error;
33759
33760diff --git a/crypto/cryptd.c b/crypto/cryptd.c
33761index 7bdd61b..afec999 100644
33762--- a/crypto/cryptd.c
33763+++ b/crypto/cryptd.c
33764@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
33765
33766 struct cryptd_blkcipher_request_ctx {
33767 crypto_completion_t complete;
33768-};
33769+} __no_const;
33770
33771 struct cryptd_hash_ctx {
33772 struct crypto_shash *child;
33773@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
33774
33775 struct cryptd_aead_request_ctx {
33776 crypto_completion_t complete;
33777-};
33778+} __no_const;
33779
33780 static void cryptd_queue_worker(struct work_struct *work);
33781
33782diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
33783index b2c99dc..476c9fb 100644
33784--- a/crypto/pcrypt.c
33785+++ b/crypto/pcrypt.c
33786@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
33787 int ret;
33788
33789 pinst->kobj.kset = pcrypt_kset;
33790- ret = kobject_add(&pinst->kobj, NULL, name);
33791+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
33792 if (!ret)
33793 kobject_uevent(&pinst->kobj, KOBJ_ADD);
33794
33795@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
33796
33797 get_online_cpus();
33798
33799- pcrypt->wq = alloc_workqueue(name,
33800- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
33801+ pcrypt->wq = alloc_workqueue("%s",
33802+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
33803 if (!pcrypt->wq)
33804 goto err;
33805
33806diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
33807index f220d64..d359ad6 100644
33808--- a/drivers/acpi/apei/apei-internal.h
33809+++ b/drivers/acpi/apei/apei-internal.h
33810@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
33811 struct apei_exec_ins_type {
33812 u32 flags;
33813 apei_exec_ins_func_t run;
33814-};
33815+} __do_const;
33816
33817 struct apei_exec_context {
33818 u32 ip;
33819diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
33820index 33dc6a0..4b24b47 100644
33821--- a/drivers/acpi/apei/cper.c
33822+++ b/drivers/acpi/apei/cper.c
33823@@ -39,12 +39,12 @@
33824 */
33825 u64 cper_next_record_id(void)
33826 {
33827- static atomic64_t seq;
33828+ static atomic64_unchecked_t seq;
33829
33830- if (!atomic64_read(&seq))
33831- atomic64_set(&seq, ((u64)get_seconds()) << 32);
33832+ if (!atomic64_read_unchecked(&seq))
33833+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
33834
33835- return atomic64_inc_return(&seq);
33836+ return atomic64_inc_return_unchecked(&seq);
33837 }
33838 EXPORT_SYMBOL_GPL(cper_next_record_id);
33839
33840diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
33841index be60399..778b33e8 100644
33842--- a/drivers/acpi/bgrt.c
33843+++ b/drivers/acpi/bgrt.c
33844@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
33845 return -ENODEV;
33846
33847 sysfs_bin_attr_init(&image_attr);
33848- image_attr.private = bgrt_image;
33849- image_attr.size = bgrt_image_size;
33850+ pax_open_kernel();
33851+ *(void **)&image_attr.private = bgrt_image;
33852+ *(size_t *)&image_attr.size = bgrt_image_size;
33853+ pax_close_kernel();
33854
33855 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
33856 if (!bgrt_kobj)
33857diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
33858index cb96296..b81293b 100644
33859--- a/drivers/acpi/blacklist.c
33860+++ b/drivers/acpi/blacklist.c
33861@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
33862 u32 is_critical_error;
33863 };
33864
33865-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
33866+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
33867
33868 /*
33869 * POLICY: If *anything* doesn't work, put it on the blacklist.
33870@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
33871 return 0;
33872 }
33873
33874-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
33875+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
33876 {
33877 .callback = dmi_disable_osi_vista,
33878 .ident = "Fujitsu Siemens",
33879diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
33880index 7586544..636a2f0 100644
33881--- a/drivers/acpi/ec_sys.c
33882+++ b/drivers/acpi/ec_sys.c
33883@@ -12,6 +12,7 @@
33884 #include <linux/acpi.h>
33885 #include <linux/debugfs.h>
33886 #include <linux/module.h>
33887+#include <linux/uaccess.h>
33888 #include "internal.h"
33889
33890 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
33891@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
33892 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
33893 */
33894 unsigned int size = EC_SPACE_SIZE;
33895- u8 *data = (u8 *) buf;
33896+ u8 data;
33897 loff_t init_off = *off;
33898 int err = 0;
33899
33900@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
33901 size = count;
33902
33903 while (size) {
33904- err = ec_read(*off, &data[*off - init_off]);
33905+ err = ec_read(*off, &data);
33906 if (err)
33907 return err;
33908+ if (put_user(data, &buf[*off - init_off]))
33909+ return -EFAULT;
33910 *off += 1;
33911 size--;
33912 }
33913@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
33914
33915 unsigned int size = count;
33916 loff_t init_off = *off;
33917- u8 *data = (u8 *) buf;
33918 int err = 0;
33919
33920 if (*off >= EC_SPACE_SIZE)
33921@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
33922 }
33923
33924 while (size) {
33925- u8 byte_write = data[*off - init_off];
33926+ u8 byte_write;
33927+ if (get_user(byte_write, &buf[*off - init_off]))
33928+ return -EFAULT;
33929 err = ec_write(*off, byte_write);
33930 if (err)
33931 return err;
33932diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
33933index eb133c7..f571552 100644
33934--- a/drivers/acpi/processor_idle.c
33935+++ b/drivers/acpi/processor_idle.c
33936@@ -994,7 +994,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
33937 {
33938 int i, count = CPUIDLE_DRIVER_STATE_START;
33939 struct acpi_processor_cx *cx;
33940- struct cpuidle_state *state;
33941+ cpuidle_state_no_const *state;
33942 struct cpuidle_driver *drv = &acpi_idle_driver;
33943
33944 if (!pr->flags.power_setup_done)
33945diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
33946index fcae5fa..e9f71ea 100644
33947--- a/drivers/acpi/sysfs.c
33948+++ b/drivers/acpi/sysfs.c
33949@@ -423,11 +423,11 @@ static u32 num_counters;
33950 static struct attribute **all_attrs;
33951 static u32 acpi_gpe_count;
33952
33953-static struct attribute_group interrupt_stats_attr_group = {
33954+static attribute_group_no_const interrupt_stats_attr_group = {
33955 .name = "interrupts",
33956 };
33957
33958-static struct kobj_attribute *counter_attrs;
33959+static kobj_attribute_no_const *counter_attrs;
33960
33961 static void delete_gpe_attr_array(void)
33962 {
33963diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
33964index 7b9bdd8..37638ca 100644
33965--- a/drivers/ata/libahci.c
33966+++ b/drivers/ata/libahci.c
33967@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
33968 }
33969 EXPORT_SYMBOL_GPL(ahci_kick_engine);
33970
33971-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
33972+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
33973 struct ata_taskfile *tf, int is_cmd, u16 flags,
33974 unsigned long timeout_msec)
33975 {
33976diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
33977index adf002a..39bb8f9 100644
33978--- a/drivers/ata/libata-core.c
33979+++ b/drivers/ata/libata-core.c
33980@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
33981 struct ata_port *ap;
33982 unsigned int tag;
33983
33984- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
33985+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
33986 ap = qc->ap;
33987
33988 qc->flags = 0;
33989@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
33990 struct ata_port *ap;
33991 struct ata_link *link;
33992
33993- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
33994+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
33995 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
33996 ap = qc->ap;
33997 link = qc->dev->link;
33998@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
33999 return;
34000
34001 spin_lock(&lock);
34002+ pax_open_kernel();
34003
34004 for (cur = ops->inherits; cur; cur = cur->inherits) {
34005 void **inherit = (void **)cur;
34006@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
34007 if (IS_ERR(*pp))
34008 *pp = NULL;
34009
34010- ops->inherits = NULL;
34011+ *(struct ata_port_operations **)&ops->inherits = NULL;
34012
34013+ pax_close_kernel();
34014 spin_unlock(&lock);
34015 }
34016
34017diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
34018index 7638121..357a965 100644
34019--- a/drivers/ata/pata_arasan_cf.c
34020+++ b/drivers/ata/pata_arasan_cf.c
34021@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
34022 /* Handle platform specific quirks */
34023 if (quirk) {
34024 if (quirk & CF_BROKEN_PIO) {
34025- ap->ops->set_piomode = NULL;
34026+ pax_open_kernel();
34027+ *(void **)&ap->ops->set_piomode = NULL;
34028+ pax_close_kernel();
34029 ap->pio_mask = 0;
34030 }
34031 if (quirk & CF_BROKEN_MWDMA)
34032diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
34033index f9b983a..887b9d8 100644
34034--- a/drivers/atm/adummy.c
34035+++ b/drivers/atm/adummy.c
34036@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
34037 vcc->pop(vcc, skb);
34038 else
34039 dev_kfree_skb_any(skb);
34040- atomic_inc(&vcc->stats->tx);
34041+ atomic_inc_unchecked(&vcc->stats->tx);
34042
34043 return 0;
34044 }
34045diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
34046index 77a7480d..05cde58 100644
34047--- a/drivers/atm/ambassador.c
34048+++ b/drivers/atm/ambassador.c
34049@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
34050 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
34051
34052 // VC layer stats
34053- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34054+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34055
34056 // free the descriptor
34057 kfree (tx_descr);
34058@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34059 dump_skb ("<<<", vc, skb);
34060
34061 // VC layer stats
34062- atomic_inc(&atm_vcc->stats->rx);
34063+ atomic_inc_unchecked(&atm_vcc->stats->rx);
34064 __net_timestamp(skb);
34065 // end of our responsibility
34066 atm_vcc->push (atm_vcc, skb);
34067@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
34068 } else {
34069 PRINTK (KERN_INFO, "dropped over-size frame");
34070 // should we count this?
34071- atomic_inc(&atm_vcc->stats->rx_drop);
34072+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34073 }
34074
34075 } else {
34076@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
34077 }
34078
34079 if (check_area (skb->data, skb->len)) {
34080- atomic_inc(&atm_vcc->stats->tx_err);
34081+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
34082 return -ENOMEM; // ?
34083 }
34084
34085diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
34086index 0e3f8f9..765a7a5 100644
34087--- a/drivers/atm/atmtcp.c
34088+++ b/drivers/atm/atmtcp.c
34089@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34090 if (vcc->pop) vcc->pop(vcc,skb);
34091 else dev_kfree_skb(skb);
34092 if (dev_data) return 0;
34093- atomic_inc(&vcc->stats->tx_err);
34094+ atomic_inc_unchecked(&vcc->stats->tx_err);
34095 return -ENOLINK;
34096 }
34097 size = skb->len+sizeof(struct atmtcp_hdr);
34098@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34099 if (!new_skb) {
34100 if (vcc->pop) vcc->pop(vcc,skb);
34101 else dev_kfree_skb(skb);
34102- atomic_inc(&vcc->stats->tx_err);
34103+ atomic_inc_unchecked(&vcc->stats->tx_err);
34104 return -ENOBUFS;
34105 }
34106 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
34107@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
34108 if (vcc->pop) vcc->pop(vcc,skb);
34109 else dev_kfree_skb(skb);
34110 out_vcc->push(out_vcc,new_skb);
34111- atomic_inc(&vcc->stats->tx);
34112- atomic_inc(&out_vcc->stats->rx);
34113+ atomic_inc_unchecked(&vcc->stats->tx);
34114+ atomic_inc_unchecked(&out_vcc->stats->rx);
34115 return 0;
34116 }
34117
34118@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34119 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
34120 read_unlock(&vcc_sklist_lock);
34121 if (!out_vcc) {
34122- atomic_inc(&vcc->stats->tx_err);
34123+ atomic_inc_unchecked(&vcc->stats->tx_err);
34124 goto done;
34125 }
34126 skb_pull(skb,sizeof(struct atmtcp_hdr));
34127@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
34128 __net_timestamp(new_skb);
34129 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
34130 out_vcc->push(out_vcc,new_skb);
34131- atomic_inc(&vcc->stats->tx);
34132- atomic_inc(&out_vcc->stats->rx);
34133+ atomic_inc_unchecked(&vcc->stats->tx);
34134+ atomic_inc_unchecked(&out_vcc->stats->rx);
34135 done:
34136 if (vcc->pop) vcc->pop(vcc,skb);
34137 else dev_kfree_skb(skb);
34138diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
34139index b1955ba..b179940 100644
34140--- a/drivers/atm/eni.c
34141+++ b/drivers/atm/eni.c
34142@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
34143 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
34144 vcc->dev->number);
34145 length = 0;
34146- atomic_inc(&vcc->stats->rx_err);
34147+ atomic_inc_unchecked(&vcc->stats->rx_err);
34148 }
34149 else {
34150 length = ATM_CELL_SIZE-1; /* no HEC */
34151@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34152 size);
34153 }
34154 eff = length = 0;
34155- atomic_inc(&vcc->stats->rx_err);
34156+ atomic_inc_unchecked(&vcc->stats->rx_err);
34157 }
34158 else {
34159 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
34160@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
34161 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
34162 vcc->dev->number,vcc->vci,length,size << 2,descr);
34163 length = eff = 0;
34164- atomic_inc(&vcc->stats->rx_err);
34165+ atomic_inc_unchecked(&vcc->stats->rx_err);
34166 }
34167 }
34168 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
34169@@ -767,7 +767,7 @@ rx_dequeued++;
34170 vcc->push(vcc,skb);
34171 pushed++;
34172 }
34173- atomic_inc(&vcc->stats->rx);
34174+ atomic_inc_unchecked(&vcc->stats->rx);
34175 }
34176 wake_up(&eni_dev->rx_wait);
34177 }
34178@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
34179 PCI_DMA_TODEVICE);
34180 if (vcc->pop) vcc->pop(vcc,skb);
34181 else dev_kfree_skb_irq(skb);
34182- atomic_inc(&vcc->stats->tx);
34183+ atomic_inc_unchecked(&vcc->stats->tx);
34184 wake_up(&eni_dev->tx_wait);
34185 dma_complete++;
34186 }
34187diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
34188index b41c948..a002b17 100644
34189--- a/drivers/atm/firestream.c
34190+++ b/drivers/atm/firestream.c
34191@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
34192 }
34193 }
34194
34195- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34196+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34197
34198 fs_dprintk (FS_DEBUG_TXMEM, "i");
34199 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
34200@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
34201 #endif
34202 skb_put (skb, qe->p1 & 0xffff);
34203 ATM_SKB(skb)->vcc = atm_vcc;
34204- atomic_inc(&atm_vcc->stats->rx);
34205+ atomic_inc_unchecked(&atm_vcc->stats->rx);
34206 __net_timestamp(skb);
34207 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
34208 atm_vcc->push (atm_vcc, skb);
34209@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
34210 kfree (pe);
34211 }
34212 if (atm_vcc)
34213- atomic_inc(&atm_vcc->stats->rx_drop);
34214+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34215 break;
34216 case 0x1f: /* Reassembly abort: no buffers. */
34217 /* Silently increment error counter. */
34218 if (atm_vcc)
34219- atomic_inc(&atm_vcc->stats->rx_drop);
34220+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
34221 break;
34222 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
34223 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
34224diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
34225index 204814e..cede831 100644
34226--- a/drivers/atm/fore200e.c
34227+++ b/drivers/atm/fore200e.c
34228@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
34229 #endif
34230 /* check error condition */
34231 if (*entry->status & STATUS_ERROR)
34232- atomic_inc(&vcc->stats->tx_err);
34233+ atomic_inc_unchecked(&vcc->stats->tx_err);
34234 else
34235- atomic_inc(&vcc->stats->tx);
34236+ atomic_inc_unchecked(&vcc->stats->tx);
34237 }
34238 }
34239
34240@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
34241 if (skb == NULL) {
34242 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
34243
34244- atomic_inc(&vcc->stats->rx_drop);
34245+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34246 return -ENOMEM;
34247 }
34248
34249@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
34250
34251 dev_kfree_skb_any(skb);
34252
34253- atomic_inc(&vcc->stats->rx_drop);
34254+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34255 return -ENOMEM;
34256 }
34257
34258 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
34259
34260 vcc->push(vcc, skb);
34261- atomic_inc(&vcc->stats->rx);
34262+ atomic_inc_unchecked(&vcc->stats->rx);
34263
34264 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
34265
34266@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
34267 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
34268 fore200e->atm_dev->number,
34269 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
34270- atomic_inc(&vcc->stats->rx_err);
34271+ atomic_inc_unchecked(&vcc->stats->rx_err);
34272 }
34273 }
34274
34275@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
34276 goto retry_here;
34277 }
34278
34279- atomic_inc(&vcc->stats->tx_err);
34280+ atomic_inc_unchecked(&vcc->stats->tx_err);
34281
34282 fore200e->tx_sat++;
34283 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
34284diff --git a/drivers/atm/he.c b/drivers/atm/he.c
34285index 507362a..a845e57 100644
34286--- a/drivers/atm/he.c
34287+++ b/drivers/atm/he.c
34288@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
34289
34290 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
34291 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
34292- atomic_inc(&vcc->stats->rx_drop);
34293+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34294 goto return_host_buffers;
34295 }
34296
34297@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
34298 RBRQ_LEN_ERR(he_dev->rbrq_head)
34299 ? "LEN_ERR" : "",
34300 vcc->vpi, vcc->vci);
34301- atomic_inc(&vcc->stats->rx_err);
34302+ atomic_inc_unchecked(&vcc->stats->rx_err);
34303 goto return_host_buffers;
34304 }
34305
34306@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
34307 vcc->push(vcc, skb);
34308 spin_lock(&he_dev->global_lock);
34309
34310- atomic_inc(&vcc->stats->rx);
34311+ atomic_inc_unchecked(&vcc->stats->rx);
34312
34313 return_host_buffers:
34314 ++pdus_assembled;
34315@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
34316 tpd->vcc->pop(tpd->vcc, tpd->skb);
34317 else
34318 dev_kfree_skb_any(tpd->skb);
34319- atomic_inc(&tpd->vcc->stats->tx_err);
34320+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
34321 }
34322 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
34323 return;
34324@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34325 vcc->pop(vcc, skb);
34326 else
34327 dev_kfree_skb_any(skb);
34328- atomic_inc(&vcc->stats->tx_err);
34329+ atomic_inc_unchecked(&vcc->stats->tx_err);
34330 return -EINVAL;
34331 }
34332
34333@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34334 vcc->pop(vcc, skb);
34335 else
34336 dev_kfree_skb_any(skb);
34337- atomic_inc(&vcc->stats->tx_err);
34338+ atomic_inc_unchecked(&vcc->stats->tx_err);
34339 return -EINVAL;
34340 }
34341 #endif
34342@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34343 vcc->pop(vcc, skb);
34344 else
34345 dev_kfree_skb_any(skb);
34346- atomic_inc(&vcc->stats->tx_err);
34347+ atomic_inc_unchecked(&vcc->stats->tx_err);
34348 spin_unlock_irqrestore(&he_dev->global_lock, flags);
34349 return -ENOMEM;
34350 }
34351@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34352 vcc->pop(vcc, skb);
34353 else
34354 dev_kfree_skb_any(skb);
34355- atomic_inc(&vcc->stats->tx_err);
34356+ atomic_inc_unchecked(&vcc->stats->tx_err);
34357 spin_unlock_irqrestore(&he_dev->global_lock, flags);
34358 return -ENOMEM;
34359 }
34360@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
34361 __enqueue_tpd(he_dev, tpd, cid);
34362 spin_unlock_irqrestore(&he_dev->global_lock, flags);
34363
34364- atomic_inc(&vcc->stats->tx);
34365+ atomic_inc_unchecked(&vcc->stats->tx);
34366
34367 return 0;
34368 }
34369diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
34370index 1dc0519..1aadaf7 100644
34371--- a/drivers/atm/horizon.c
34372+++ b/drivers/atm/horizon.c
34373@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
34374 {
34375 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
34376 // VC layer stats
34377- atomic_inc(&vcc->stats->rx);
34378+ atomic_inc_unchecked(&vcc->stats->rx);
34379 __net_timestamp(skb);
34380 // end of our responsibility
34381 vcc->push (vcc, skb);
34382@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
34383 dev->tx_iovec = NULL;
34384
34385 // VC layer stats
34386- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
34387+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
34388
34389 // free the skb
34390 hrz_kfree_skb (skb);
34391diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
34392index 272f009..a18ba55 100644
34393--- a/drivers/atm/idt77252.c
34394+++ b/drivers/atm/idt77252.c
34395@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
34396 else
34397 dev_kfree_skb(skb);
34398
34399- atomic_inc(&vcc->stats->tx);
34400+ atomic_inc_unchecked(&vcc->stats->tx);
34401 }
34402
34403 atomic_dec(&scq->used);
34404@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34405 if ((sb = dev_alloc_skb(64)) == NULL) {
34406 printk("%s: Can't allocate buffers for aal0.\n",
34407 card->name);
34408- atomic_add(i, &vcc->stats->rx_drop);
34409+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
34410 break;
34411 }
34412 if (!atm_charge(vcc, sb->truesize)) {
34413 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
34414 card->name);
34415- atomic_add(i - 1, &vcc->stats->rx_drop);
34416+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
34417 dev_kfree_skb(sb);
34418 break;
34419 }
34420@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34421 ATM_SKB(sb)->vcc = vcc;
34422 __net_timestamp(sb);
34423 vcc->push(vcc, sb);
34424- atomic_inc(&vcc->stats->rx);
34425+ atomic_inc_unchecked(&vcc->stats->rx);
34426
34427 cell += ATM_CELL_PAYLOAD;
34428 }
34429@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34430 "(CDC: %08x)\n",
34431 card->name, len, rpp->len, readl(SAR_REG_CDC));
34432 recycle_rx_pool_skb(card, rpp);
34433- atomic_inc(&vcc->stats->rx_err);
34434+ atomic_inc_unchecked(&vcc->stats->rx_err);
34435 return;
34436 }
34437 if (stat & SAR_RSQE_CRC) {
34438 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
34439 recycle_rx_pool_skb(card, rpp);
34440- atomic_inc(&vcc->stats->rx_err);
34441+ atomic_inc_unchecked(&vcc->stats->rx_err);
34442 return;
34443 }
34444 if (skb_queue_len(&rpp->queue) > 1) {
34445@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34446 RXPRINTK("%s: Can't alloc RX skb.\n",
34447 card->name);
34448 recycle_rx_pool_skb(card, rpp);
34449- atomic_inc(&vcc->stats->rx_err);
34450+ atomic_inc_unchecked(&vcc->stats->rx_err);
34451 return;
34452 }
34453 if (!atm_charge(vcc, skb->truesize)) {
34454@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34455 __net_timestamp(skb);
34456
34457 vcc->push(vcc, skb);
34458- atomic_inc(&vcc->stats->rx);
34459+ atomic_inc_unchecked(&vcc->stats->rx);
34460
34461 return;
34462 }
34463@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
34464 __net_timestamp(skb);
34465
34466 vcc->push(vcc, skb);
34467- atomic_inc(&vcc->stats->rx);
34468+ atomic_inc_unchecked(&vcc->stats->rx);
34469
34470 if (skb->truesize > SAR_FB_SIZE_3)
34471 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
34472@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
34473 if (vcc->qos.aal != ATM_AAL0) {
34474 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
34475 card->name, vpi, vci);
34476- atomic_inc(&vcc->stats->rx_drop);
34477+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34478 goto drop;
34479 }
34480
34481 if ((sb = dev_alloc_skb(64)) == NULL) {
34482 printk("%s: Can't allocate buffers for AAL0.\n",
34483 card->name);
34484- atomic_inc(&vcc->stats->rx_err);
34485+ atomic_inc_unchecked(&vcc->stats->rx_err);
34486 goto drop;
34487 }
34488
34489@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
34490 ATM_SKB(sb)->vcc = vcc;
34491 __net_timestamp(sb);
34492 vcc->push(vcc, sb);
34493- atomic_inc(&vcc->stats->rx);
34494+ atomic_inc_unchecked(&vcc->stats->rx);
34495
34496 drop:
34497 skb_pull(queue, 64);
34498@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
34499
34500 if (vc == NULL) {
34501 printk("%s: NULL connection in send().\n", card->name);
34502- atomic_inc(&vcc->stats->tx_err);
34503+ atomic_inc_unchecked(&vcc->stats->tx_err);
34504 dev_kfree_skb(skb);
34505 return -EINVAL;
34506 }
34507 if (!test_bit(VCF_TX, &vc->flags)) {
34508 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
34509- atomic_inc(&vcc->stats->tx_err);
34510+ atomic_inc_unchecked(&vcc->stats->tx_err);
34511 dev_kfree_skb(skb);
34512 return -EINVAL;
34513 }
34514@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
34515 break;
34516 default:
34517 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
34518- atomic_inc(&vcc->stats->tx_err);
34519+ atomic_inc_unchecked(&vcc->stats->tx_err);
34520 dev_kfree_skb(skb);
34521 return -EINVAL;
34522 }
34523
34524 if (skb_shinfo(skb)->nr_frags != 0) {
34525 printk("%s: No scatter-gather yet.\n", card->name);
34526- atomic_inc(&vcc->stats->tx_err);
34527+ atomic_inc_unchecked(&vcc->stats->tx_err);
34528 dev_kfree_skb(skb);
34529 return -EINVAL;
34530 }
34531@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
34532
34533 err = queue_skb(card, vc, skb, oam);
34534 if (err) {
34535- atomic_inc(&vcc->stats->tx_err);
34536+ atomic_inc_unchecked(&vcc->stats->tx_err);
34537 dev_kfree_skb(skb);
34538 return err;
34539 }
34540@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
34541 skb = dev_alloc_skb(64);
34542 if (!skb) {
34543 printk("%s: Out of memory in send_oam().\n", card->name);
34544- atomic_inc(&vcc->stats->tx_err);
34545+ atomic_inc_unchecked(&vcc->stats->tx_err);
34546 return -ENOMEM;
34547 }
34548 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
34549diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
34550index 4217f29..88f547a 100644
34551--- a/drivers/atm/iphase.c
34552+++ b/drivers/atm/iphase.c
34553@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
34554 status = (u_short) (buf_desc_ptr->desc_mode);
34555 if (status & (RX_CER | RX_PTE | RX_OFL))
34556 {
34557- atomic_inc(&vcc->stats->rx_err);
34558+ atomic_inc_unchecked(&vcc->stats->rx_err);
34559 IF_ERR(printk("IA: bad packet, dropping it");)
34560 if (status & RX_CER) {
34561 IF_ERR(printk(" cause: packet CRC error\n");)
34562@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
34563 len = dma_addr - buf_addr;
34564 if (len > iadev->rx_buf_sz) {
34565 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
34566- atomic_inc(&vcc->stats->rx_err);
34567+ atomic_inc_unchecked(&vcc->stats->rx_err);
34568 goto out_free_desc;
34569 }
34570
34571@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
34572 ia_vcc = INPH_IA_VCC(vcc);
34573 if (ia_vcc == NULL)
34574 {
34575- atomic_inc(&vcc->stats->rx_err);
34576+ atomic_inc_unchecked(&vcc->stats->rx_err);
34577 atm_return(vcc, skb->truesize);
34578 dev_kfree_skb_any(skb);
34579 goto INCR_DLE;
34580@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
34581 if ((length > iadev->rx_buf_sz) || (length >
34582 (skb->len - sizeof(struct cpcs_trailer))))
34583 {
34584- atomic_inc(&vcc->stats->rx_err);
34585+ atomic_inc_unchecked(&vcc->stats->rx_err);
34586 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
34587 length, skb->len);)
34588 atm_return(vcc, skb->truesize);
34589@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
34590
34591 IF_RX(printk("rx_dle_intr: skb push");)
34592 vcc->push(vcc,skb);
34593- atomic_inc(&vcc->stats->rx);
34594+ atomic_inc_unchecked(&vcc->stats->rx);
34595 iadev->rx_pkt_cnt++;
34596 }
34597 INCR_DLE:
34598@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
34599 {
34600 struct k_sonet_stats *stats;
34601 stats = &PRIV(_ia_dev[board])->sonet_stats;
34602- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
34603- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
34604- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
34605- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
34606- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
34607- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
34608- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
34609- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
34610- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
34611+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
34612+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
34613+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
34614+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
34615+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
34616+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
34617+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
34618+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
34619+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
34620 }
34621 ia_cmds.status = 0;
34622 break;
34623@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
34624 if ((desc == 0) || (desc > iadev->num_tx_desc))
34625 {
34626 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
34627- atomic_inc(&vcc->stats->tx);
34628+ atomic_inc_unchecked(&vcc->stats->tx);
34629 if (vcc->pop)
34630 vcc->pop(vcc, skb);
34631 else
34632@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
34633 ATM_DESC(skb) = vcc->vci;
34634 skb_queue_tail(&iadev->tx_dma_q, skb);
34635
34636- atomic_inc(&vcc->stats->tx);
34637+ atomic_inc_unchecked(&vcc->stats->tx);
34638 iadev->tx_pkt_cnt++;
34639 /* Increment transaction counter */
34640 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
34641
34642 #if 0
34643 /* add flow control logic */
34644- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
34645+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
34646 if (iavcc->vc_desc_cnt > 10) {
34647 vcc->tx_quota = vcc->tx_quota * 3 / 4;
34648 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
34649diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
34650index fa7d701..1e404c7 100644
34651--- a/drivers/atm/lanai.c
34652+++ b/drivers/atm/lanai.c
34653@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
34654 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
34655 lanai_endtx(lanai, lvcc);
34656 lanai_free_skb(lvcc->tx.atmvcc, skb);
34657- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
34658+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
34659 }
34660
34661 /* Try to fill the buffer - don't call unless there is backlog */
34662@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
34663 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
34664 __net_timestamp(skb);
34665 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
34666- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
34667+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
34668 out:
34669 lvcc->rx.buf.ptr = end;
34670 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
34671@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
34672 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
34673 "vcc %d\n", lanai->number, (unsigned int) s, vci);
34674 lanai->stats.service_rxnotaal5++;
34675- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
34676+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
34677 return 0;
34678 }
34679 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
34680@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
34681 int bytes;
34682 read_unlock(&vcc_sklist_lock);
34683 DPRINTK("got trashed rx pdu on vci %d\n", vci);
34684- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
34685+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
34686 lvcc->stats.x.aal5.service_trash++;
34687 bytes = (SERVICE_GET_END(s) * 16) -
34688 (((unsigned long) lvcc->rx.buf.ptr) -
34689@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
34690 }
34691 if (s & SERVICE_STREAM) {
34692 read_unlock(&vcc_sklist_lock);
34693- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
34694+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
34695 lvcc->stats.x.aal5.service_stream++;
34696 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
34697 "PDU on VCI %d!\n", lanai->number, vci);
34698@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
34699 return 0;
34700 }
34701 DPRINTK("got rx crc error on vci %d\n", vci);
34702- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
34703+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
34704 lvcc->stats.x.aal5.service_rxcrc++;
34705 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
34706 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
34707diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
34708index 6587dc2..149833d 100644
34709--- a/drivers/atm/nicstar.c
34710+++ b/drivers/atm/nicstar.c
34711@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
34712 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
34713 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
34714 card->index);
34715- atomic_inc(&vcc->stats->tx_err);
34716+ atomic_inc_unchecked(&vcc->stats->tx_err);
34717 dev_kfree_skb_any(skb);
34718 return -EINVAL;
34719 }
34720@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
34721 if (!vc->tx) {
34722 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
34723 card->index);
34724- atomic_inc(&vcc->stats->tx_err);
34725+ atomic_inc_unchecked(&vcc->stats->tx_err);
34726 dev_kfree_skb_any(skb);
34727 return -EINVAL;
34728 }
34729@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
34730 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
34731 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
34732 card->index);
34733- atomic_inc(&vcc->stats->tx_err);
34734+ atomic_inc_unchecked(&vcc->stats->tx_err);
34735 dev_kfree_skb_any(skb);
34736 return -EINVAL;
34737 }
34738
34739 if (skb_shinfo(skb)->nr_frags != 0) {
34740 printk("nicstar%d: No scatter-gather yet.\n", card->index);
34741- atomic_inc(&vcc->stats->tx_err);
34742+ atomic_inc_unchecked(&vcc->stats->tx_err);
34743 dev_kfree_skb_any(skb);
34744 return -EINVAL;
34745 }
34746@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
34747 }
34748
34749 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
34750- atomic_inc(&vcc->stats->tx_err);
34751+ atomic_inc_unchecked(&vcc->stats->tx_err);
34752 dev_kfree_skb_any(skb);
34753 return -EIO;
34754 }
34755- atomic_inc(&vcc->stats->tx);
34756+ atomic_inc_unchecked(&vcc->stats->tx);
34757
34758 return 0;
34759 }
34760@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34761 printk
34762 ("nicstar%d: Can't allocate buffers for aal0.\n",
34763 card->index);
34764- atomic_add(i, &vcc->stats->rx_drop);
34765+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
34766 break;
34767 }
34768 if (!atm_charge(vcc, sb->truesize)) {
34769 RXPRINTK
34770 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
34771 card->index);
34772- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
34773+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
34774 dev_kfree_skb_any(sb);
34775 break;
34776 }
34777@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34778 ATM_SKB(sb)->vcc = vcc;
34779 __net_timestamp(sb);
34780 vcc->push(vcc, sb);
34781- atomic_inc(&vcc->stats->rx);
34782+ atomic_inc_unchecked(&vcc->stats->rx);
34783 cell += ATM_CELL_PAYLOAD;
34784 }
34785
34786@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34787 if (iovb == NULL) {
34788 printk("nicstar%d: Out of iovec buffers.\n",
34789 card->index);
34790- atomic_inc(&vcc->stats->rx_drop);
34791+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34792 recycle_rx_buf(card, skb);
34793 return;
34794 }
34795@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34796 small or large buffer itself. */
34797 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
34798 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
34799- atomic_inc(&vcc->stats->rx_err);
34800+ atomic_inc_unchecked(&vcc->stats->rx_err);
34801 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
34802 NS_MAX_IOVECS);
34803 NS_PRV_IOVCNT(iovb) = 0;
34804@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34805 ("nicstar%d: Expected a small buffer, and this is not one.\n",
34806 card->index);
34807 which_list(card, skb);
34808- atomic_inc(&vcc->stats->rx_err);
34809+ atomic_inc_unchecked(&vcc->stats->rx_err);
34810 recycle_rx_buf(card, skb);
34811 vc->rx_iov = NULL;
34812 recycle_iov_buf(card, iovb);
34813@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34814 ("nicstar%d: Expected a large buffer, and this is not one.\n",
34815 card->index);
34816 which_list(card, skb);
34817- atomic_inc(&vcc->stats->rx_err);
34818+ atomic_inc_unchecked(&vcc->stats->rx_err);
34819 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
34820 NS_PRV_IOVCNT(iovb));
34821 vc->rx_iov = NULL;
34822@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34823 printk(" - PDU size mismatch.\n");
34824 else
34825 printk(".\n");
34826- atomic_inc(&vcc->stats->rx_err);
34827+ atomic_inc_unchecked(&vcc->stats->rx_err);
34828 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
34829 NS_PRV_IOVCNT(iovb));
34830 vc->rx_iov = NULL;
34831@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34832 /* skb points to a small buffer */
34833 if (!atm_charge(vcc, skb->truesize)) {
34834 push_rxbufs(card, skb);
34835- atomic_inc(&vcc->stats->rx_drop);
34836+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34837 } else {
34838 skb_put(skb, len);
34839 dequeue_sm_buf(card, skb);
34840@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34841 ATM_SKB(skb)->vcc = vcc;
34842 __net_timestamp(skb);
34843 vcc->push(vcc, skb);
34844- atomic_inc(&vcc->stats->rx);
34845+ atomic_inc_unchecked(&vcc->stats->rx);
34846 }
34847 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
34848 struct sk_buff *sb;
34849@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34850 if (len <= NS_SMBUFSIZE) {
34851 if (!atm_charge(vcc, sb->truesize)) {
34852 push_rxbufs(card, sb);
34853- atomic_inc(&vcc->stats->rx_drop);
34854+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34855 } else {
34856 skb_put(sb, len);
34857 dequeue_sm_buf(card, sb);
34858@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34859 ATM_SKB(sb)->vcc = vcc;
34860 __net_timestamp(sb);
34861 vcc->push(vcc, sb);
34862- atomic_inc(&vcc->stats->rx);
34863+ atomic_inc_unchecked(&vcc->stats->rx);
34864 }
34865
34866 push_rxbufs(card, skb);
34867@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34868
34869 if (!atm_charge(vcc, skb->truesize)) {
34870 push_rxbufs(card, skb);
34871- atomic_inc(&vcc->stats->rx_drop);
34872+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34873 } else {
34874 dequeue_lg_buf(card, skb);
34875 #ifdef NS_USE_DESTRUCTORS
34876@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34877 ATM_SKB(skb)->vcc = vcc;
34878 __net_timestamp(skb);
34879 vcc->push(vcc, skb);
34880- atomic_inc(&vcc->stats->rx);
34881+ atomic_inc_unchecked(&vcc->stats->rx);
34882 }
34883
34884 push_rxbufs(card, sb);
34885@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34886 printk
34887 ("nicstar%d: Out of huge buffers.\n",
34888 card->index);
34889- atomic_inc(&vcc->stats->rx_drop);
34890+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34891 recycle_iovec_rx_bufs(card,
34892 (struct iovec *)
34893 iovb->data,
34894@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34895 card->hbpool.count++;
34896 } else
34897 dev_kfree_skb_any(hb);
34898- atomic_inc(&vcc->stats->rx_drop);
34899+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34900 } else {
34901 /* Copy the small buffer to the huge buffer */
34902 sb = (struct sk_buff *)iov->iov_base;
34903@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
34904 #endif /* NS_USE_DESTRUCTORS */
34905 __net_timestamp(hb);
34906 vcc->push(vcc, hb);
34907- atomic_inc(&vcc->stats->rx);
34908+ atomic_inc_unchecked(&vcc->stats->rx);
34909 }
34910 }
34911
34912diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
34913index 32784d1..4a8434a 100644
34914--- a/drivers/atm/solos-pci.c
34915+++ b/drivers/atm/solos-pci.c
34916@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
34917 }
34918 atm_charge(vcc, skb->truesize);
34919 vcc->push(vcc, skb);
34920- atomic_inc(&vcc->stats->rx);
34921+ atomic_inc_unchecked(&vcc->stats->rx);
34922 break;
34923
34924 case PKT_STATUS:
34925@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
34926 vcc = SKB_CB(oldskb)->vcc;
34927
34928 if (vcc) {
34929- atomic_inc(&vcc->stats->tx);
34930+ atomic_inc_unchecked(&vcc->stats->tx);
34931 solos_pop(vcc, oldskb);
34932 } else {
34933 dev_kfree_skb_irq(oldskb);
34934diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
34935index 0215934..ce9f5b1 100644
34936--- a/drivers/atm/suni.c
34937+++ b/drivers/atm/suni.c
34938@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
34939
34940
34941 #define ADD_LIMITED(s,v) \
34942- atomic_add((v),&stats->s); \
34943- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
34944+ atomic_add_unchecked((v),&stats->s); \
34945+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
34946
34947
34948 static void suni_hz(unsigned long from_timer)
34949diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
34950index 5120a96..e2572bd 100644
34951--- a/drivers/atm/uPD98402.c
34952+++ b/drivers/atm/uPD98402.c
34953@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
34954 struct sonet_stats tmp;
34955 int error = 0;
34956
34957- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
34958+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
34959 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
34960 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
34961 if (zero && !error) {
34962@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
34963
34964
34965 #define ADD_LIMITED(s,v) \
34966- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
34967- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
34968- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
34969+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
34970+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
34971+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
34972
34973
34974 static void stat_event(struct atm_dev *dev)
34975@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
34976 if (reason & uPD98402_INT_PFM) stat_event(dev);
34977 if (reason & uPD98402_INT_PCO) {
34978 (void) GET(PCOCR); /* clear interrupt cause */
34979- atomic_add(GET(HECCT),
34980+ atomic_add_unchecked(GET(HECCT),
34981 &PRIV(dev)->sonet_stats.uncorr_hcs);
34982 }
34983 if ((reason & uPD98402_INT_RFO) &&
34984@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
34985 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
34986 uPD98402_INT_LOS),PIMR); /* enable them */
34987 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
34988- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
34989- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
34990- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
34991+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
34992+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
34993+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
34994 return 0;
34995 }
34996
34997diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
34998index 969c3c2..9b72956 100644
34999--- a/drivers/atm/zatm.c
35000+++ b/drivers/atm/zatm.c
35001@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35002 }
35003 if (!size) {
35004 dev_kfree_skb_irq(skb);
35005- if (vcc) atomic_inc(&vcc->stats->rx_err);
35006+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
35007 continue;
35008 }
35009 if (!atm_charge(vcc,skb->truesize)) {
35010@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
35011 skb->len = size;
35012 ATM_SKB(skb)->vcc = vcc;
35013 vcc->push(vcc,skb);
35014- atomic_inc(&vcc->stats->rx);
35015+ atomic_inc_unchecked(&vcc->stats->rx);
35016 }
35017 zout(pos & 0xffff,MTA(mbx));
35018 #if 0 /* probably a stupid idea */
35019@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
35020 skb_queue_head(&zatm_vcc->backlog,skb);
35021 break;
35022 }
35023- atomic_inc(&vcc->stats->tx);
35024+ atomic_inc_unchecked(&vcc->stats->tx);
35025 wake_up(&zatm_vcc->tx_wait);
35026 }
35027
35028diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
35029index d78b204..ecc1929 100644
35030--- a/drivers/base/attribute_container.c
35031+++ b/drivers/base/attribute_container.c
35032@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
35033 ic->classdev.parent = get_device(dev);
35034 ic->classdev.class = cont->class;
35035 cont->class->dev_release = attribute_container_release;
35036- dev_set_name(&ic->classdev, dev_name(dev));
35037+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
35038 if (fn)
35039 fn(cont, dev, &ic->classdev);
35040 else
35041diff --git a/drivers/base/bus.c b/drivers/base/bus.c
35042index d414331..b4dd4ba 100644
35043--- a/drivers/base/bus.c
35044+++ b/drivers/base/bus.c
35045@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
35046 return -EINVAL;
35047
35048 mutex_lock(&subsys->p->mutex);
35049- list_add_tail(&sif->node, &subsys->p->interfaces);
35050+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
35051 if (sif->add_dev) {
35052 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35053 while ((dev = subsys_dev_iter_next(&iter)))
35054@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
35055 subsys = sif->subsys;
35056
35057 mutex_lock(&subsys->p->mutex);
35058- list_del_init(&sif->node);
35059+ pax_list_del_init((struct list_head *)&sif->node);
35060 if (sif->remove_dev) {
35061 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
35062 while ((dev = subsys_dev_iter_next(&iter)))
35063diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
35064index 7413d06..79155fa 100644
35065--- a/drivers/base/devtmpfs.c
35066+++ b/drivers/base/devtmpfs.c
35067@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
35068 if (!thread)
35069 return 0;
35070
35071- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
35072+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
35073 if (err)
35074 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
35075 else
35076@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
35077 *err = sys_unshare(CLONE_NEWNS);
35078 if (*err)
35079 goto out;
35080- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
35081+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
35082 if (*err)
35083 goto out;
35084- sys_chdir("/.."); /* will traverse into overmounted root */
35085- sys_chroot(".");
35086+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
35087+ sys_chroot((char __force_user *)".");
35088 complete(&setup_done);
35089 while (1) {
35090 spin_lock(&req_lock);
35091diff --git a/drivers/base/node.c b/drivers/base/node.c
35092index 7616a77c..8f57f51 100644
35093--- a/drivers/base/node.c
35094+++ b/drivers/base/node.c
35095@@ -626,7 +626,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
35096 struct node_attr {
35097 struct device_attribute attr;
35098 enum node_states state;
35099-};
35100+} __do_const;
35101
35102 static ssize_t show_node_state(struct device *dev,
35103 struct device_attribute *attr, char *buf)
35104diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
35105index 7072404..76dcebd 100644
35106--- a/drivers/base/power/domain.c
35107+++ b/drivers/base/power/domain.c
35108@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
35109 {
35110 struct cpuidle_driver *cpuidle_drv;
35111 struct gpd_cpu_data *cpu_data;
35112- struct cpuidle_state *idle_state;
35113+ cpuidle_state_no_const *idle_state;
35114 int ret = 0;
35115
35116 if (IS_ERR_OR_NULL(genpd) || state < 0)
35117@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
35118 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
35119 {
35120 struct gpd_cpu_data *cpu_data;
35121- struct cpuidle_state *idle_state;
35122+ cpuidle_state_no_const *idle_state;
35123 int ret = 0;
35124
35125 if (IS_ERR_OR_NULL(genpd))
35126diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
35127index a53ebd2..8f73eeb 100644
35128--- a/drivers/base/power/sysfs.c
35129+++ b/drivers/base/power/sysfs.c
35130@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
35131 return -EIO;
35132 }
35133 }
35134- return sprintf(buf, p);
35135+ return sprintf(buf, "%s", p);
35136 }
35137
35138 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
35139diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
35140index 79715e7..df06b3b 100644
35141--- a/drivers/base/power/wakeup.c
35142+++ b/drivers/base/power/wakeup.c
35143@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
35144 * They need to be modified together atomically, so it's better to use one
35145 * atomic variable to hold them both.
35146 */
35147-static atomic_t combined_event_count = ATOMIC_INIT(0);
35148+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
35149
35150 #define IN_PROGRESS_BITS (sizeof(int) * 4)
35151 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
35152
35153 static void split_counters(unsigned int *cnt, unsigned int *inpr)
35154 {
35155- unsigned int comb = atomic_read(&combined_event_count);
35156+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
35157
35158 *cnt = (comb >> IN_PROGRESS_BITS);
35159 *inpr = comb & MAX_IN_PROGRESS;
35160@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
35161 ws->start_prevent_time = ws->last_time;
35162
35163 /* Increment the counter of events in progress. */
35164- cec = atomic_inc_return(&combined_event_count);
35165+ cec = atomic_inc_return_unchecked(&combined_event_count);
35166
35167 trace_wakeup_source_activate(ws->name, cec);
35168 }
35169@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
35170 * Increment the counter of registered wakeup events and decrement the
35171 * couter of wakeup events in progress simultaneously.
35172 */
35173- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
35174+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
35175 trace_wakeup_source_deactivate(ws->name, cec);
35176
35177 split_counters(&cnt, &inpr);
35178diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
35179index e8d11b6..7b1b36f 100644
35180--- a/drivers/base/syscore.c
35181+++ b/drivers/base/syscore.c
35182@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
35183 void register_syscore_ops(struct syscore_ops *ops)
35184 {
35185 mutex_lock(&syscore_ops_lock);
35186- list_add_tail(&ops->node, &syscore_ops_list);
35187+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
35188 mutex_unlock(&syscore_ops_lock);
35189 }
35190 EXPORT_SYMBOL_GPL(register_syscore_ops);
35191@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
35192 void unregister_syscore_ops(struct syscore_ops *ops)
35193 {
35194 mutex_lock(&syscore_ops_lock);
35195- list_del(&ops->node);
35196+ pax_list_del((struct list_head *)&ops->node);
35197 mutex_unlock(&syscore_ops_lock);
35198 }
35199 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
35200diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
35201index 62b6c2c..4a11354 100644
35202--- a/drivers/block/cciss.c
35203+++ b/drivers/block/cciss.c
35204@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
35205 int err;
35206 u32 cp;
35207
35208+ memset(&arg64, 0, sizeof(arg64));
35209+
35210 err = 0;
35211 err |=
35212 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
35213@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
35214 while (!list_empty(&h->reqQ)) {
35215 c = list_entry(h->reqQ.next, CommandList_struct, list);
35216 /* can't do anything if fifo is full */
35217- if ((h->access.fifo_full(h))) {
35218+ if ((h->access->fifo_full(h))) {
35219 dev_warn(&h->pdev->dev, "fifo full\n");
35220 break;
35221 }
35222@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
35223 h->Qdepth--;
35224
35225 /* Tell the controller execute command */
35226- h->access.submit_command(h, c);
35227+ h->access->submit_command(h, c);
35228
35229 /* Put job onto the completed Q */
35230 addQ(&h->cmpQ, c);
35231@@ -3446,17 +3448,17 @@ startio:
35232
35233 static inline unsigned long get_next_completion(ctlr_info_t *h)
35234 {
35235- return h->access.command_completed(h);
35236+ return h->access->command_completed(h);
35237 }
35238
35239 static inline int interrupt_pending(ctlr_info_t *h)
35240 {
35241- return h->access.intr_pending(h);
35242+ return h->access->intr_pending(h);
35243 }
35244
35245 static inline long interrupt_not_for_us(ctlr_info_t *h)
35246 {
35247- return ((h->access.intr_pending(h) == 0) ||
35248+ return ((h->access->intr_pending(h) == 0) ||
35249 (h->interrupts_enabled == 0));
35250 }
35251
35252@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
35253 u32 a;
35254
35255 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35256- return h->access.command_completed(h);
35257+ return h->access->command_completed(h);
35258
35259 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35260 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35261@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
35262 trans_support & CFGTBL_Trans_use_short_tags);
35263
35264 /* Change the access methods to the performant access methods */
35265- h->access = SA5_performant_access;
35266+ h->access = &SA5_performant_access;
35267 h->transMethod = CFGTBL_Trans_Performant;
35268
35269 return;
35270@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
35271 if (prod_index < 0)
35272 return -ENODEV;
35273 h->product_name = products[prod_index].product_name;
35274- h->access = *(products[prod_index].access);
35275+ h->access = products[prod_index].access;
35276
35277 if (cciss_board_disabled(h)) {
35278 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35279@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
35280 }
35281
35282 /* make sure the board interrupts are off */
35283- h->access.set_intr_mask(h, CCISS_INTR_OFF);
35284+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
35285 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
35286 if (rc)
35287 goto clean2;
35288@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
35289 * fake ones to scoop up any residual completions.
35290 */
35291 spin_lock_irqsave(&h->lock, flags);
35292- h->access.set_intr_mask(h, CCISS_INTR_OFF);
35293+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
35294 spin_unlock_irqrestore(&h->lock, flags);
35295 free_irq(h->intr[h->intr_mode], h);
35296 rc = cciss_request_irq(h, cciss_msix_discard_completions,
35297@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
35298 dev_info(&h->pdev->dev, "Board READY.\n");
35299 dev_info(&h->pdev->dev,
35300 "Waiting for stale completions to drain.\n");
35301- h->access.set_intr_mask(h, CCISS_INTR_ON);
35302+ h->access->set_intr_mask(h, CCISS_INTR_ON);
35303 msleep(10000);
35304- h->access.set_intr_mask(h, CCISS_INTR_OFF);
35305+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
35306
35307 rc = controller_reset_failed(h->cfgtable);
35308 if (rc)
35309@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
35310 cciss_scsi_setup(h);
35311
35312 /* Turn the interrupts on so we can service requests */
35313- h->access.set_intr_mask(h, CCISS_INTR_ON);
35314+ h->access->set_intr_mask(h, CCISS_INTR_ON);
35315
35316 /* Get the firmware version */
35317 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
35318@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
35319 kfree(flush_buf);
35320 if (return_code != IO_OK)
35321 dev_warn(&h->pdev->dev, "Error flushing cache\n");
35322- h->access.set_intr_mask(h, CCISS_INTR_OFF);
35323+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
35324 free_irq(h->intr[h->intr_mode], h);
35325 }
35326
35327diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
35328index 7fda30e..eb5dfe0 100644
35329--- a/drivers/block/cciss.h
35330+++ b/drivers/block/cciss.h
35331@@ -101,7 +101,7 @@ struct ctlr_info
35332 /* information about each logical volume */
35333 drive_info_struct *drv[CISS_MAX_LUN];
35334
35335- struct access_method access;
35336+ struct access_method *access;
35337
35338 /* queue and queue Info */
35339 struct list_head reqQ;
35340diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
35341index 639d26b..fd6ad1f 100644
35342--- a/drivers/block/cpqarray.c
35343+++ b/drivers/block/cpqarray.c
35344@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
35345 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
35346 goto Enomem4;
35347 }
35348- hba[i]->access.set_intr_mask(hba[i], 0);
35349+ hba[i]->access->set_intr_mask(hba[i], 0);
35350 if (request_irq(hba[i]->intr, do_ida_intr,
35351 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
35352 {
35353@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
35354 add_timer(&hba[i]->timer);
35355
35356 /* Enable IRQ now that spinlock and rate limit timer are set up */
35357- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
35358+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
35359
35360 for(j=0; j<NWD; j++) {
35361 struct gendisk *disk = ida_gendisk[i][j];
35362@@ -694,7 +694,7 @@ DBGINFO(
35363 for(i=0; i<NR_PRODUCTS; i++) {
35364 if (board_id == products[i].board_id) {
35365 c->product_name = products[i].product_name;
35366- c->access = *(products[i].access);
35367+ c->access = products[i].access;
35368 break;
35369 }
35370 }
35371@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
35372 hba[ctlr]->intr = intr;
35373 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
35374 hba[ctlr]->product_name = products[j].product_name;
35375- hba[ctlr]->access = *(products[j].access);
35376+ hba[ctlr]->access = products[j].access;
35377 hba[ctlr]->ctlr = ctlr;
35378 hba[ctlr]->board_id = board_id;
35379 hba[ctlr]->pci_dev = NULL; /* not PCI */
35380@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
35381
35382 while((c = h->reqQ) != NULL) {
35383 /* Can't do anything if we're busy */
35384- if (h->access.fifo_full(h) == 0)
35385+ if (h->access->fifo_full(h) == 0)
35386 return;
35387
35388 /* Get the first entry from the request Q */
35389@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
35390 h->Qdepth--;
35391
35392 /* Tell the controller to do our bidding */
35393- h->access.submit_command(h, c);
35394+ h->access->submit_command(h, c);
35395
35396 /* Get onto the completion Q */
35397 addQ(&h->cmpQ, c);
35398@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
35399 unsigned long flags;
35400 __u32 a,a1;
35401
35402- istat = h->access.intr_pending(h);
35403+ istat = h->access->intr_pending(h);
35404 /* Is this interrupt for us? */
35405 if (istat == 0)
35406 return IRQ_NONE;
35407@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
35408 */
35409 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
35410 if (istat & FIFO_NOT_EMPTY) {
35411- while((a = h->access.command_completed(h))) {
35412+ while((a = h->access->command_completed(h))) {
35413 a1 = a; a &= ~3;
35414 if ((c = h->cmpQ) == NULL)
35415 {
35416@@ -1193,6 +1193,7 @@ out_passthru:
35417 ida_pci_info_struct pciinfo;
35418
35419 if (!arg) return -EINVAL;
35420+ memset(&pciinfo, 0, sizeof(pciinfo));
35421 pciinfo.bus = host->pci_dev->bus->number;
35422 pciinfo.dev_fn = host->pci_dev->devfn;
35423 pciinfo.board_id = host->board_id;
35424@@ -1447,11 +1448,11 @@ static int sendcmd(
35425 /*
35426 * Disable interrupt
35427 */
35428- info_p->access.set_intr_mask(info_p, 0);
35429+ info_p->access->set_intr_mask(info_p, 0);
35430 /* Make sure there is room in the command FIFO */
35431 /* Actually it should be completely empty at this time. */
35432 for (i = 200000; i > 0; i--) {
35433- temp = info_p->access.fifo_full(info_p);
35434+ temp = info_p->access->fifo_full(info_p);
35435 if (temp != 0) {
35436 break;
35437 }
35438@@ -1464,7 +1465,7 @@ DBG(
35439 /*
35440 * Send the cmd
35441 */
35442- info_p->access.submit_command(info_p, c);
35443+ info_p->access->submit_command(info_p, c);
35444 complete = pollcomplete(ctlr);
35445
35446 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
35447@@ -1547,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
35448 * we check the new geometry. Then turn interrupts back on when
35449 * we're done.
35450 */
35451- host->access.set_intr_mask(host, 0);
35452+ host->access->set_intr_mask(host, 0);
35453 getgeometry(ctlr);
35454- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
35455+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
35456
35457 for(i=0; i<NWD; i++) {
35458 struct gendisk *disk = ida_gendisk[ctlr][i];
35459@@ -1589,7 +1590,7 @@ static int pollcomplete(int ctlr)
35460 /* Wait (up to 2 seconds) for a command to complete */
35461
35462 for (i = 200000; i > 0; i--) {
35463- done = hba[ctlr]->access.command_completed(hba[ctlr]);
35464+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
35465 if (done == 0) {
35466 udelay(10); /* a short fixed delay */
35467 } else
35468diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
35469index be73e9d..7fbf140 100644
35470--- a/drivers/block/cpqarray.h
35471+++ b/drivers/block/cpqarray.h
35472@@ -99,7 +99,7 @@ struct ctlr_info {
35473 drv_info_t drv[NWD];
35474 struct proc_dir_entry *proc;
35475
35476- struct access_method access;
35477+ struct access_method *access;
35478
35479 cmdlist_t *reqQ;
35480 cmdlist_t *cmpQ;
35481diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
35482index f943aac..99bfd19 100644
35483--- a/drivers/block/drbd/drbd_int.h
35484+++ b/drivers/block/drbd/drbd_int.h
35485@@ -582,7 +582,7 @@ struct drbd_epoch {
35486 struct drbd_tconn *tconn;
35487 struct list_head list;
35488 unsigned int barrier_nr;
35489- atomic_t epoch_size; /* increased on every request added. */
35490+ atomic_unchecked_t epoch_size; /* increased on every request added. */
35491 atomic_t active; /* increased on every req. added, and dec on every finished. */
35492 unsigned long flags;
35493 };
35494@@ -1021,7 +1021,7 @@ struct drbd_conf {
35495 unsigned int al_tr_number;
35496 int al_tr_cycle;
35497 wait_queue_head_t seq_wait;
35498- atomic_t packet_seq;
35499+ atomic_unchecked_t packet_seq;
35500 unsigned int peer_seq;
35501 spinlock_t peer_seq_lock;
35502 unsigned int minor;
35503@@ -1562,7 +1562,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
35504 char __user *uoptval;
35505 int err;
35506
35507- uoptval = (char __user __force *)optval;
35508+ uoptval = (char __force_user *)optval;
35509
35510 set_fs(KERNEL_DS);
35511 if (level == SOL_SOCKET)
35512diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
35513index a5dca6a..bb27967 100644
35514--- a/drivers/block/drbd/drbd_main.c
35515+++ b/drivers/block/drbd/drbd_main.c
35516@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
35517 p->sector = sector;
35518 p->block_id = block_id;
35519 p->blksize = blksize;
35520- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
35521+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
35522 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
35523 }
35524
35525@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
35526 return -EIO;
35527 p->sector = cpu_to_be64(req->i.sector);
35528 p->block_id = (unsigned long)req;
35529- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
35530+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
35531 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
35532 if (mdev->state.conn >= C_SYNC_SOURCE &&
35533 mdev->state.conn <= C_PAUSED_SYNC_T)
35534@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
35535 {
35536 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
35537
35538- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
35539- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
35540+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
35541+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
35542 kfree(tconn->current_epoch);
35543
35544 idr_destroy(&tconn->volumes);
35545diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
35546index 4222aff..1f79506 100644
35547--- a/drivers/block/drbd/drbd_receiver.c
35548+++ b/drivers/block/drbd/drbd_receiver.c
35549@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
35550 {
35551 int err;
35552
35553- atomic_set(&mdev->packet_seq, 0);
35554+ atomic_set_unchecked(&mdev->packet_seq, 0);
35555 mdev->peer_seq = 0;
35556
35557 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
35558@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
35559 do {
35560 next_epoch = NULL;
35561
35562- epoch_size = atomic_read(&epoch->epoch_size);
35563+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
35564
35565 switch (ev & ~EV_CLEANUP) {
35566 case EV_PUT:
35567@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
35568 rv = FE_DESTROYED;
35569 } else {
35570 epoch->flags = 0;
35571- atomic_set(&epoch->epoch_size, 0);
35572+ atomic_set_unchecked(&epoch->epoch_size, 0);
35573 /* atomic_set(&epoch->active, 0); is already zero */
35574 if (rv == FE_STILL_LIVE)
35575 rv = FE_RECYCLED;
35576@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
35577 conn_wait_active_ee_empty(tconn);
35578 drbd_flush(tconn);
35579
35580- if (atomic_read(&tconn->current_epoch->epoch_size)) {
35581+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
35582 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
35583 if (epoch)
35584 break;
35585@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
35586 }
35587
35588 epoch->flags = 0;
35589- atomic_set(&epoch->epoch_size, 0);
35590+ atomic_set_unchecked(&epoch->epoch_size, 0);
35591 atomic_set(&epoch->active, 0);
35592
35593 spin_lock(&tconn->epoch_lock);
35594- if (atomic_read(&tconn->current_epoch->epoch_size)) {
35595+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
35596 list_add(&epoch->list, &tconn->current_epoch->list);
35597 tconn->current_epoch = epoch;
35598 tconn->epochs++;
35599@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
35600
35601 err = wait_for_and_update_peer_seq(mdev, peer_seq);
35602 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
35603- atomic_inc(&tconn->current_epoch->epoch_size);
35604+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
35605 err2 = drbd_drain_block(mdev, pi->size);
35606 if (!err)
35607 err = err2;
35608@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
35609
35610 spin_lock(&tconn->epoch_lock);
35611 peer_req->epoch = tconn->current_epoch;
35612- atomic_inc(&peer_req->epoch->epoch_size);
35613+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
35614 atomic_inc(&peer_req->epoch->active);
35615 spin_unlock(&tconn->epoch_lock);
35616
35617@@ -4347,7 +4347,7 @@ struct data_cmd {
35618 int expect_payload;
35619 size_t pkt_size;
35620 int (*fn)(struct drbd_tconn *, struct packet_info *);
35621-};
35622+} __do_const;
35623
35624 static struct data_cmd drbd_cmd_handler[] = {
35625 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
35626@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
35627 if (!list_empty(&tconn->current_epoch->list))
35628 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
35629 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
35630- atomic_set(&tconn->current_epoch->epoch_size, 0);
35631+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
35632 tconn->send.seen_any_write_yet = false;
35633
35634 conn_info(tconn, "Connection closed\n");
35635@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
35636 struct asender_cmd {
35637 size_t pkt_size;
35638 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
35639-};
35640+} __do_const;
35641
35642 static struct asender_cmd asender_tbl[] = {
35643 [P_PING] = { 0, got_Ping },
35644diff --git a/drivers/block/loop.c b/drivers/block/loop.c
35645index d92d50f..a7e9d97 100644
35646--- a/drivers/block/loop.c
35647+++ b/drivers/block/loop.c
35648@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
35649
35650 file_start_write(file);
35651 set_fs(get_ds());
35652- bw = file->f_op->write(file, buf, len, &pos);
35653+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
35654 set_fs(old_fs);
35655 file_end_write(file);
35656 if (likely(bw == len))
35657diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
35658index f5d0ea1..c62380a 100644
35659--- a/drivers/block/pktcdvd.c
35660+++ b/drivers/block/pktcdvd.c
35661@@ -84,7 +84,7 @@
35662 #define MAX_SPEED 0xffff
35663
35664 #define ZONE(sector, pd) (((sector) + (pd)->offset) & \
35665- ~(sector_t)((pd)->settings.size - 1))
35666+ ~(sector_t)((pd)->settings.size - 1UL))
35667
35668 static DEFINE_MUTEX(pktcdvd_mutex);
35669 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
35670diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
35671index 8a3aff7..d7538c2 100644
35672--- a/drivers/cdrom/cdrom.c
35673+++ b/drivers/cdrom/cdrom.c
35674@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
35675 ENSURE(reset, CDC_RESET);
35676 ENSURE(generic_packet, CDC_GENERIC_PACKET);
35677 cdi->mc_flags = 0;
35678- cdo->n_minors = 0;
35679 cdi->options = CDO_USE_FFLAGS;
35680
35681 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
35682@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
35683 else
35684 cdi->cdda_method = CDDA_OLD;
35685
35686- if (!cdo->generic_packet)
35687- cdo->generic_packet = cdrom_dummy_generic_packet;
35688+ if (!cdo->generic_packet) {
35689+ pax_open_kernel();
35690+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
35691+ pax_close_kernel();
35692+ }
35693
35694 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
35695 mutex_lock(&cdrom_mutex);
35696@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
35697 if (cdi->exit)
35698 cdi->exit(cdi);
35699
35700- cdi->ops->n_minors--;
35701 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
35702 }
35703
35704@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
35705 */
35706 nr = nframes;
35707 do {
35708- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
35709+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
35710 if (cgc.buffer)
35711 break;
35712
35713@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
35714 struct cdrom_device_info *cdi;
35715 int ret;
35716
35717- ret = scnprintf(info + *pos, max_size - *pos, header);
35718+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
35719 if (!ret)
35720 return 1;
35721
35722diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
35723index 4afcb65..a68a32d 100644
35724--- a/drivers/cdrom/gdrom.c
35725+++ b/drivers/cdrom/gdrom.c
35726@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
35727 .audio_ioctl = gdrom_audio_ioctl,
35728 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
35729 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
35730- .n_minors = 1,
35731 };
35732
35733 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
35734diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
35735index 3bb6fa3..34013fb 100644
35736--- a/drivers/char/Kconfig
35737+++ b/drivers/char/Kconfig
35738@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
35739
35740 config DEVKMEM
35741 bool "/dev/kmem virtual device support"
35742- default y
35743+ default n
35744+ depends on !GRKERNSEC_KMEM
35745 help
35746 Say Y here if you want to support the /dev/kmem device. The
35747 /dev/kmem device is rarely used, but can be used for certain
35748@@ -582,6 +583,7 @@ config DEVPORT
35749 bool
35750 depends on !M68K
35751 depends on ISA || PCI
35752+ depends on !GRKERNSEC_KMEM
35753 default y
35754
35755 source "drivers/s390/char/Kconfig"
35756diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
35757index a48e05b..6bac831 100644
35758--- a/drivers/char/agp/compat_ioctl.c
35759+++ b/drivers/char/agp/compat_ioctl.c
35760@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
35761 return -ENOMEM;
35762 }
35763
35764- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
35765+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
35766 sizeof(*usegment) * ureserve.seg_count)) {
35767 kfree(usegment);
35768 kfree(ksegment);
35769diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
35770index 2e04433..771f2cc 100644
35771--- a/drivers/char/agp/frontend.c
35772+++ b/drivers/char/agp/frontend.c
35773@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
35774 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
35775 return -EFAULT;
35776
35777- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
35778+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
35779 return -EFAULT;
35780
35781 client = agp_find_client_by_pid(reserve.pid);
35782@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
35783 if (segment == NULL)
35784 return -ENOMEM;
35785
35786- if (copy_from_user(segment, (void __user *) reserve.seg_list,
35787+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
35788 sizeof(struct agp_segment) * reserve.seg_count)) {
35789 kfree(segment);
35790 return -EFAULT;
35791diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
35792index 4f94375..413694e 100644
35793--- a/drivers/char/genrtc.c
35794+++ b/drivers/char/genrtc.c
35795@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
35796 switch (cmd) {
35797
35798 case RTC_PLL_GET:
35799+ memset(&pll, 0, sizeof(pll));
35800 if (get_rtc_pll(&pll))
35801 return -EINVAL;
35802 else
35803diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
35804index d784650..e8bfd69 100644
35805--- a/drivers/char/hpet.c
35806+++ b/drivers/char/hpet.c
35807@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
35808 }
35809
35810 static int
35811-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
35812+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
35813 struct hpet_info *info)
35814 {
35815 struct hpet_timer __iomem *timer;
35816diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
35817index 86fe45c..c0ea948 100644
35818--- a/drivers/char/hw_random/intel-rng.c
35819+++ b/drivers/char/hw_random/intel-rng.c
35820@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
35821
35822 if (no_fwh_detect)
35823 return -ENODEV;
35824- printk(warning);
35825+ printk("%s", warning);
35826 return -EBUSY;
35827 }
35828
35829diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
35830index 4445fa1..7c6de37 100644
35831--- a/drivers/char/ipmi/ipmi_msghandler.c
35832+++ b/drivers/char/ipmi/ipmi_msghandler.c
35833@@ -420,7 +420,7 @@ struct ipmi_smi {
35834 struct proc_dir_entry *proc_dir;
35835 char proc_dir_name[10];
35836
35837- atomic_t stats[IPMI_NUM_STATS];
35838+ atomic_unchecked_t stats[IPMI_NUM_STATS];
35839
35840 /*
35841 * run_to_completion duplicate of smb_info, smi_info
35842@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
35843
35844
35845 #define ipmi_inc_stat(intf, stat) \
35846- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
35847+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
35848 #define ipmi_get_stat(intf, stat) \
35849- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
35850+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
35851
35852 static int is_lan_addr(struct ipmi_addr *addr)
35853 {
35854@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
35855 INIT_LIST_HEAD(&intf->cmd_rcvrs);
35856 init_waitqueue_head(&intf->waitq);
35857 for (i = 0; i < IPMI_NUM_STATS; i++)
35858- atomic_set(&intf->stats[i], 0);
35859+ atomic_set_unchecked(&intf->stats[i], 0);
35860
35861 intf->proc_dir = NULL;
35862
35863diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
35864index af4b23f..79806fc 100644
35865--- a/drivers/char/ipmi/ipmi_si_intf.c
35866+++ b/drivers/char/ipmi/ipmi_si_intf.c
35867@@ -275,7 +275,7 @@ struct smi_info {
35868 unsigned char slave_addr;
35869
35870 /* Counters and things for the proc filesystem. */
35871- atomic_t stats[SI_NUM_STATS];
35872+ atomic_unchecked_t stats[SI_NUM_STATS];
35873
35874 struct task_struct *thread;
35875
35876@@ -284,9 +284,9 @@ struct smi_info {
35877 };
35878
35879 #define smi_inc_stat(smi, stat) \
35880- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
35881+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
35882 #define smi_get_stat(smi, stat) \
35883- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
35884+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
35885
35886 #define SI_MAX_PARMS 4
35887
35888@@ -3258,7 +3258,7 @@ static int try_smi_init(struct smi_info *new_smi)
35889 atomic_set(&new_smi->req_events, 0);
35890 new_smi->run_to_completion = 0;
35891 for (i = 0; i < SI_NUM_STATS; i++)
35892- atomic_set(&new_smi->stats[i], 0);
35893+ atomic_set_unchecked(&new_smi->stats[i], 0);
35894
35895 new_smi->interrupt_disabled = 1;
35896 atomic_set(&new_smi->stop_operation, 0);
35897diff --git a/drivers/char/mem.c b/drivers/char/mem.c
35898index 1ccbe94..6ad651a 100644
35899--- a/drivers/char/mem.c
35900+++ b/drivers/char/mem.c
35901@@ -18,6 +18,7 @@
35902 #include <linux/raw.h>
35903 #include <linux/tty.h>
35904 #include <linux/capability.h>
35905+#include <linux/security.h>
35906 #include <linux/ptrace.h>
35907 #include <linux/device.h>
35908 #include <linux/highmem.h>
35909@@ -38,6 +39,10 @@
35910
35911 #define DEVPORT_MINOR 4
35912
35913+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
35914+extern const struct file_operations grsec_fops;
35915+#endif
35916+
35917 static inline unsigned long size_inside_page(unsigned long start,
35918 unsigned long size)
35919 {
35920@@ -69,9 +74,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
35921
35922 while (cursor < to) {
35923 if (!devmem_is_allowed(pfn)) {
35924+#ifdef CONFIG_GRKERNSEC_KMEM
35925+ gr_handle_mem_readwrite(from, to);
35926+#else
35927 printk(KERN_INFO
35928 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
35929 current->comm, from, to);
35930+#endif
35931 return 0;
35932 }
35933 cursor += PAGE_SIZE;
35934@@ -79,6 +88,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
35935 }
35936 return 1;
35937 }
35938+#elif defined(CONFIG_GRKERNSEC_KMEM)
35939+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
35940+{
35941+ return 0;
35942+}
35943 #else
35944 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
35945 {
35946@@ -121,6 +135,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
35947
35948 while (count > 0) {
35949 unsigned long remaining;
35950+ char *temp;
35951
35952 sz = size_inside_page(p, count);
35953
35954@@ -136,7 +151,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
35955 if (!ptr)
35956 return -EFAULT;
35957
35958- remaining = copy_to_user(buf, ptr, sz);
35959+#ifdef CONFIG_PAX_USERCOPY
35960+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
35961+ if (!temp) {
35962+ unxlate_dev_mem_ptr(p, ptr);
35963+ return -ENOMEM;
35964+ }
35965+ memcpy(temp, ptr, sz);
35966+#else
35967+ temp = ptr;
35968+#endif
35969+
35970+ remaining = copy_to_user(buf, temp, sz);
35971+
35972+#ifdef CONFIG_PAX_USERCOPY
35973+ kfree(temp);
35974+#endif
35975+
35976 unxlate_dev_mem_ptr(p, ptr);
35977 if (remaining)
35978 return -EFAULT;
35979@@ -379,7 +410,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
35980 else
35981 csize = count;
35982
35983- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
35984+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
35985 if (rc < 0)
35986 return rc;
35987 buf += csize;
35988@@ -399,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
35989 size_t count, loff_t *ppos)
35990 {
35991 unsigned long p = *ppos;
35992- ssize_t low_count, read, sz;
35993+ ssize_t low_count, read, sz, err = 0;
35994 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
35995- int err = 0;
35996
35997 read = 0;
35998 if (p < (unsigned long) high_memory) {
35999@@ -423,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36000 }
36001 #endif
36002 while (low_count > 0) {
36003+ char *temp;
36004+
36005 sz = size_inside_page(p, low_count);
36006
36007 /*
36008@@ -432,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
36009 */
36010 kbuf = xlate_dev_kmem_ptr((char *)p);
36011
36012- if (copy_to_user(buf, kbuf, sz))
36013+#ifdef CONFIG_PAX_USERCOPY
36014+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
36015+ if (!temp)
36016+ return -ENOMEM;
36017+ memcpy(temp, kbuf, sz);
36018+#else
36019+ temp = kbuf;
36020+#endif
36021+
36022+ err = copy_to_user(buf, temp, sz);
36023+
36024+#ifdef CONFIG_PAX_USERCOPY
36025+ kfree(temp);
36026+#endif
36027+
36028+ if (err)
36029 return -EFAULT;
36030 buf += sz;
36031 p += sz;
36032@@ -869,6 +916,9 @@ static const struct memdev {
36033 #ifdef CONFIG_CRASH_DUMP
36034 [12] = { "oldmem", 0, &oldmem_fops, NULL },
36035 #endif
36036+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
36037+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
36038+#endif
36039 };
36040
36041 static int memory_open(struct inode *inode, struct file *filp)
36042@@ -940,7 +990,7 @@ static int __init chr_dev_init(void)
36043 continue;
36044
36045 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
36046- NULL, devlist[minor].name);
36047+ NULL, "%s", devlist[minor].name);
36048 }
36049
36050 return tty_init();
36051diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
36052index c689697..04e6d6a2 100644
36053--- a/drivers/char/mwave/tp3780i.c
36054+++ b/drivers/char/mwave/tp3780i.c
36055@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
36056 PRINTK_2(TRACE_TP3780I,
36057 "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
36058
36059+ memset(pAbilities, 0, sizeof(*pAbilities));
36060 /* fill out standard constant fields */
36061 pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
36062 pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
36063diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
36064index 9df78e2..01ba9ae 100644
36065--- a/drivers/char/nvram.c
36066+++ b/drivers/char/nvram.c
36067@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
36068
36069 spin_unlock_irq(&rtc_lock);
36070
36071- if (copy_to_user(buf, contents, tmp - contents))
36072+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
36073 return -EFAULT;
36074
36075 *ppos = i;
36076diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
36077index 5c5cc00..ac9edb7 100644
36078--- a/drivers/char/pcmcia/synclink_cs.c
36079+++ b/drivers/char/pcmcia/synclink_cs.c
36080@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36081
36082 if (debug_level >= DEBUG_LEVEL_INFO)
36083 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
36084- __FILE__, __LINE__, info->device_name, port->count);
36085+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
36086
36087- WARN_ON(!port->count);
36088+ WARN_ON(!atomic_read(&port->count));
36089
36090 if (tty_port_close_start(port, tty, filp) == 0)
36091 goto cleanup;
36092@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
36093 cleanup:
36094 if (debug_level >= DEBUG_LEVEL_INFO)
36095 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
36096- tty->driver->name, port->count);
36097+ tty->driver->name, atomic_read(&port->count));
36098 }
36099
36100 /* Wait until the transmitter is empty.
36101@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36102
36103 if (debug_level >= DEBUG_LEVEL_INFO)
36104 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
36105- __FILE__, __LINE__, tty->driver->name, port->count);
36106+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
36107
36108 /* If port is closing, signal caller to try again */
36109 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
36110@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
36111 goto cleanup;
36112 }
36113 spin_lock(&port->lock);
36114- port->count++;
36115+ atomic_inc(&port->count);
36116 spin_unlock(&port->lock);
36117 spin_unlock_irqrestore(&info->netlock, flags);
36118
36119- if (port->count == 1) {
36120+ if (atomic_read(&port->count) == 1) {
36121 /* 1st open on this device, init hardware */
36122 retval = startup(info, tty);
36123 if (retval < 0)
36124@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
36125 unsigned short new_crctype;
36126
36127 /* return error if TTY interface open */
36128- if (info->port.count)
36129+ if (atomic_read(&info->port.count))
36130 return -EBUSY;
36131
36132 switch (encoding)
36133@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
36134
36135 /* arbitrate between network and tty opens */
36136 spin_lock_irqsave(&info->netlock, flags);
36137- if (info->port.count != 0 || info->netcount != 0) {
36138+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
36139 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
36140 spin_unlock_irqrestore(&info->netlock, flags);
36141 return -EBUSY;
36142@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36143 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
36144
36145 /* return error if TTY interface open */
36146- if (info->port.count)
36147+ if (atomic_read(&info->port.count))
36148 return -EBUSY;
36149
36150 if (cmd != SIOCWANDEV)
36151diff --git a/drivers/char/random.c b/drivers/char/random.c
36152index 35487e8..dac8bd1 100644
36153--- a/drivers/char/random.c
36154+++ b/drivers/char/random.c
36155@@ -272,8 +272,13 @@
36156 /*
36157 * Configuration information
36158 */
36159+#ifdef CONFIG_GRKERNSEC_RANDNET
36160+#define INPUT_POOL_WORDS 512
36161+#define OUTPUT_POOL_WORDS 128
36162+#else
36163 #define INPUT_POOL_WORDS 128
36164 #define OUTPUT_POOL_WORDS 32
36165+#endif
36166 #define SEC_XFER_SIZE 512
36167 #define EXTRACT_SIZE 10
36168
36169@@ -313,10 +318,17 @@ static struct poolinfo {
36170 int poolwords;
36171 int tap1, tap2, tap3, tap4, tap5;
36172 } poolinfo_table[] = {
36173+#ifdef CONFIG_GRKERNSEC_RANDNET
36174+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
36175+ { 512, 411, 308, 208, 104, 1 },
36176+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
36177+ { 128, 103, 76, 51, 25, 1 },
36178+#else
36179 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
36180 { 128, 103, 76, 51, 25, 1 },
36181 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
36182 { 32, 26, 20, 14, 7, 1 },
36183+#endif
36184 #if 0
36185 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
36186 { 2048, 1638, 1231, 819, 411, 1 },
36187@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
36188 input_rotate += i ? 7 : 14;
36189 }
36190
36191- ACCESS_ONCE(r->input_rotate) = input_rotate;
36192- ACCESS_ONCE(r->add_ptr) = i;
36193+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
36194+ ACCESS_ONCE_RW(r->add_ptr) = i;
36195 smp_wmb();
36196
36197 if (out)
36198@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
36199
36200 extract_buf(r, tmp);
36201 i = min_t(int, nbytes, EXTRACT_SIZE);
36202- if (copy_to_user(buf, tmp, i)) {
36203+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
36204 ret = -EFAULT;
36205 break;
36206 }
36207@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
36208 #include <linux/sysctl.h>
36209
36210 static int min_read_thresh = 8, min_write_thresh;
36211-static int max_read_thresh = INPUT_POOL_WORDS * 32;
36212+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
36213 static int max_write_thresh = INPUT_POOL_WORDS * 32;
36214 static char sysctl_bootid[16];
36215
36216@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
36217 static int proc_do_uuid(ctl_table *table, int write,
36218 void __user *buffer, size_t *lenp, loff_t *ppos)
36219 {
36220- ctl_table fake_table;
36221+ ctl_table_no_const fake_table;
36222 unsigned char buf[64], tmp_uuid[16], *uuid;
36223
36224 uuid = table->data;
36225diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
36226index bf2349db..5456d53 100644
36227--- a/drivers/char/sonypi.c
36228+++ b/drivers/char/sonypi.c
36229@@ -54,6 +54,7 @@
36230
36231 #include <asm/uaccess.h>
36232 #include <asm/io.h>
36233+#include <asm/local.h>
36234
36235 #include <linux/sonypi.h>
36236
36237@@ -490,7 +491,7 @@ static struct sonypi_device {
36238 spinlock_t fifo_lock;
36239 wait_queue_head_t fifo_proc_list;
36240 struct fasync_struct *fifo_async;
36241- int open_count;
36242+ local_t open_count;
36243 int model;
36244 struct input_dev *input_jog_dev;
36245 struct input_dev *input_key_dev;
36246@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
36247 static int sonypi_misc_release(struct inode *inode, struct file *file)
36248 {
36249 mutex_lock(&sonypi_device.lock);
36250- sonypi_device.open_count--;
36251+ local_dec(&sonypi_device.open_count);
36252 mutex_unlock(&sonypi_device.lock);
36253 return 0;
36254 }
36255@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
36256 {
36257 mutex_lock(&sonypi_device.lock);
36258 /* Flush input queue on first open */
36259- if (!sonypi_device.open_count)
36260+ if (!local_read(&sonypi_device.open_count))
36261 kfifo_reset(&sonypi_device.fifo);
36262- sonypi_device.open_count++;
36263+ local_inc(&sonypi_device.open_count);
36264 mutex_unlock(&sonypi_device.lock);
36265
36266 return 0;
36267diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
36268index 64420b3..5c40b56 100644
36269--- a/drivers/char/tpm/tpm_acpi.c
36270+++ b/drivers/char/tpm/tpm_acpi.c
36271@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
36272 virt = acpi_os_map_memory(start, len);
36273 if (!virt) {
36274 kfree(log->bios_event_log);
36275+ log->bios_event_log = NULL;
36276 printk("%s: ERROR - Unable to map memory\n", __func__);
36277 return -EIO;
36278 }
36279
36280- memcpy_fromio(log->bios_event_log, virt, len);
36281+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
36282
36283 acpi_os_unmap_memory(virt, len);
36284 return 0;
36285diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
36286index 84ddc55..1d32f1e 100644
36287--- a/drivers/char/tpm/tpm_eventlog.c
36288+++ b/drivers/char/tpm/tpm_eventlog.c
36289@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
36290 event = addr;
36291
36292 if ((event->event_type == 0 && event->event_size == 0) ||
36293- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
36294+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
36295 return NULL;
36296
36297 return addr;
36298@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
36299 return NULL;
36300
36301 if ((event->event_type == 0 && event->event_size == 0) ||
36302- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
36303+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
36304 return NULL;
36305
36306 (*pos)++;
36307@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
36308 int i;
36309
36310 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
36311- seq_putc(m, data[i]);
36312+ if (!seq_putc(m, data[i]))
36313+ return -EFAULT;
36314
36315 return 0;
36316 }
36317diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
36318index fc45567..fa2a590 100644
36319--- a/drivers/char/virtio_console.c
36320+++ b/drivers/char/virtio_console.c
36321@@ -682,7 +682,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
36322 if (to_user) {
36323 ssize_t ret;
36324
36325- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
36326+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
36327 if (ret)
36328 return -EFAULT;
36329 } else {
36330@@ -785,7 +785,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
36331 if (!port_has_data(port) && !port->host_connected)
36332 return 0;
36333
36334- return fill_readbuf(port, ubuf, count, true);
36335+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
36336 }
36337
36338 static int wait_port_writable(struct port *port, bool nonblock)
36339diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
36340index a33f46f..a720eed 100644
36341--- a/drivers/clk/clk-composite.c
36342+++ b/drivers/clk/clk-composite.c
36343@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
36344 struct clk *clk;
36345 struct clk_init_data init;
36346 struct clk_composite *composite;
36347- struct clk_ops *clk_composite_ops;
36348+ clk_ops_no_const *clk_composite_ops;
36349
36350 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
36351 if (!composite) {
36352diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
36353index bd11315..7f87098 100644
36354--- a/drivers/clk/socfpga/clk.c
36355+++ b/drivers/clk/socfpga/clk.c
36356@@ -22,6 +22,7 @@
36357 #include <linux/clk-provider.h>
36358 #include <linux/io.h>
36359 #include <linux/of.h>
36360+#include <asm/pgtable.h>
36361
36362 /* Clock Manager offsets */
36363 #define CLKMGR_CTRL 0x0
36364@@ -135,8 +136,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
36365 if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
36366 strcmp(clk_name, "sdram_pll")) {
36367 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
36368- clk_pll_ops.enable = clk_gate_ops.enable;
36369- clk_pll_ops.disable = clk_gate_ops.disable;
36370+ pax_open_kernel();
36371+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
36372+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
36373+ pax_close_kernel();
36374 }
36375
36376 clk = clk_register(NULL, &socfpga_clk->hw.hw);
36377diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
36378index a2b2541..bc1e7ff 100644
36379--- a/drivers/clocksource/arm_arch_timer.c
36380+++ b/drivers/clocksource/arm_arch_timer.c
36381@@ -264,7 +264,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
36382 return NOTIFY_OK;
36383 }
36384
36385-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
36386+static struct notifier_block arch_timer_cpu_nb = {
36387 .notifier_call = arch_timer_cpu_notify,
36388 };
36389
36390diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
36391index 350f493..489479e 100644
36392--- a/drivers/clocksource/bcm_kona_timer.c
36393+++ b/drivers/clocksource/bcm_kona_timer.c
36394@@ -199,7 +199,7 @@ static struct irqaction kona_timer_irq = {
36395 .handler = kona_timer_interrupt,
36396 };
36397
36398-static void __init kona_timer_init(void)
36399+static void __init kona_timer_init(struct device_node *np)
36400 {
36401 kona_timers_init();
36402 kona_timer_clockevents_init();
36403diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
36404index ade7513..069445f 100644
36405--- a/drivers/clocksource/metag_generic.c
36406+++ b/drivers/clocksource/metag_generic.c
36407@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
36408 return NOTIFY_OK;
36409 }
36410
36411-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
36412+static struct notifier_block arch_timer_cpu_nb = {
36413 .notifier_call = arch_timer_cpu_notify,
36414 };
36415
36416diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
36417index edc089e..bc7c0bc 100644
36418--- a/drivers/cpufreq/acpi-cpufreq.c
36419+++ b/drivers/cpufreq/acpi-cpufreq.c
36420@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
36421 return sprintf(buf, "%u\n", boost_enabled);
36422 }
36423
36424-static struct global_attr global_boost = __ATTR(boost, 0644,
36425+static global_attr_no_const global_boost = __ATTR(boost, 0644,
36426 show_global_boost,
36427 store_global_boost);
36428
36429@@ -705,8 +705,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
36430 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
36431 per_cpu(acfreq_data, cpu) = data;
36432
36433- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
36434- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
36435+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
36436+ pax_open_kernel();
36437+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
36438+ pax_close_kernel();
36439+ }
36440
36441 result = acpi_processor_register_performance(data->acpi_data, cpu);
36442 if (result)
36443@@ -832,7 +835,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
36444 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
36445 break;
36446 case ACPI_ADR_SPACE_FIXED_HARDWARE:
36447- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
36448+ pax_open_kernel();
36449+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
36450+ pax_close_kernel();
36451 policy->cur = get_cur_freq_on_cpu(cpu);
36452 break;
36453 default:
36454@@ -843,8 +848,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
36455 acpi_processor_notify_smm(THIS_MODULE);
36456
36457 /* Check for APERF/MPERF support in hardware */
36458- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
36459- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
36460+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
36461+ pax_open_kernel();
36462+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
36463+ pax_close_kernel();
36464+ }
36465
36466 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
36467 for (i = 0; i < perf->state_count; i++)
36468diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
36469index 6485547..477033e 100644
36470--- a/drivers/cpufreq/cpufreq.c
36471+++ b/drivers/cpufreq/cpufreq.c
36472@@ -1854,7 +1854,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
36473 return NOTIFY_OK;
36474 }
36475
36476-static struct notifier_block __refdata cpufreq_cpu_notifier = {
36477+static struct notifier_block cpufreq_cpu_notifier = {
36478 .notifier_call = cpufreq_cpu_callback,
36479 };
36480
36481@@ -1886,8 +1886,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
36482
36483 pr_debug("trying to register driver %s\n", driver_data->name);
36484
36485- if (driver_data->setpolicy)
36486- driver_data->flags |= CPUFREQ_CONST_LOOPS;
36487+ if (driver_data->setpolicy) {
36488+ pax_open_kernel();
36489+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
36490+ pax_close_kernel();
36491+ }
36492
36493 write_lock_irqsave(&cpufreq_driver_lock, flags);
36494 if (cpufreq_driver) {
36495diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
36496index a86ff72..aad2b03 100644
36497--- a/drivers/cpufreq/cpufreq_governor.c
36498+++ b/drivers/cpufreq/cpufreq_governor.c
36499@@ -235,7 +235,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
36500 struct dbs_data *dbs_data;
36501 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
36502 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
36503- struct od_ops *od_ops = NULL;
36504+ const struct od_ops *od_ops = NULL;
36505 struct od_dbs_tuners *od_tuners = NULL;
36506 struct cs_dbs_tuners *cs_tuners = NULL;
36507 struct cpu_dbs_common_info *cpu_cdbs;
36508@@ -298,7 +298,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
36509
36510 if ((cdata->governor == GOV_CONSERVATIVE) &&
36511 (!policy->governor->initialized)) {
36512- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
36513+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
36514
36515 cpufreq_register_notifier(cs_ops->notifier_block,
36516 CPUFREQ_TRANSITION_NOTIFIER);
36517@@ -315,7 +315,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
36518
36519 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
36520 (policy->governor->initialized == 1)) {
36521- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
36522+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
36523
36524 cpufreq_unregister_notifier(cs_ops->notifier_block,
36525 CPUFREQ_TRANSITION_NOTIFIER);
36526diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
36527index 0d9e6be..461fd3b 100644
36528--- a/drivers/cpufreq/cpufreq_governor.h
36529+++ b/drivers/cpufreq/cpufreq_governor.h
36530@@ -204,7 +204,7 @@ struct common_dbs_data {
36531 void (*exit)(struct dbs_data *dbs_data);
36532
36533 /* Governor specific ops, see below */
36534- void *gov_ops;
36535+ const void *gov_ops;
36536 };
36537
36538 /* Governer Per policy data */
36539diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
36540index c087347..dad6268 100644
36541--- a/drivers/cpufreq/cpufreq_ondemand.c
36542+++ b/drivers/cpufreq/cpufreq_ondemand.c
36543@@ -615,14 +615,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
36544 (struct cpufreq_policy *, unsigned int, unsigned int),
36545 unsigned int powersave_bias)
36546 {
36547- od_ops.powersave_bias_target = f;
36548+ pax_open_kernel();
36549+ *(void **)&od_ops.powersave_bias_target = f;
36550+ pax_close_kernel();
36551 od_set_powersave_bias(powersave_bias);
36552 }
36553 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
36554
36555 void od_unregister_powersave_bias_handler(void)
36556 {
36557- od_ops.powersave_bias_target = generic_powersave_bias_target;
36558+ pax_open_kernel();
36559+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
36560+ pax_close_kernel();
36561 od_set_powersave_bias(0);
36562 }
36563 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
36564diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
36565index bfd6273..e39dd63 100644
36566--- a/drivers/cpufreq/cpufreq_stats.c
36567+++ b/drivers/cpufreq/cpufreq_stats.c
36568@@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
36569 }
36570
36571 /* priority=1 so this will get called before cpufreq_remove_dev */
36572-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
36573+static struct notifier_block cpufreq_stat_cpu_notifier = {
36574 .notifier_call = cpufreq_stat_cpu_callback,
36575 .priority = 1,
36576 };
36577diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
36578index 421ef37..e708530c 100644
36579--- a/drivers/cpufreq/p4-clockmod.c
36580+++ b/drivers/cpufreq/p4-clockmod.c
36581@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
36582 case 0x0F: /* Core Duo */
36583 case 0x16: /* Celeron Core */
36584 case 0x1C: /* Atom */
36585- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36586+ pax_open_kernel();
36587+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36588+ pax_close_kernel();
36589 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
36590 case 0x0D: /* Pentium M (Dothan) */
36591- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36592+ pax_open_kernel();
36593+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36594+ pax_close_kernel();
36595 /* fall through */
36596 case 0x09: /* Pentium M (Banias) */
36597 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
36598@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
36599
36600 /* on P-4s, the TSC runs with constant frequency independent whether
36601 * throttling is active or not. */
36602- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36603+ pax_open_kernel();
36604+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
36605+ pax_close_kernel();
36606
36607 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
36608 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
36609diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
36610index c71ee14..7c2e183 100644
36611--- a/drivers/cpufreq/sparc-us3-cpufreq.c
36612+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
36613@@ -18,14 +18,12 @@
36614 #include <asm/head.h>
36615 #include <asm/timer.h>
36616
36617-static struct cpufreq_driver *cpufreq_us3_driver;
36618-
36619 struct us3_freq_percpu_info {
36620 struct cpufreq_frequency_table table[4];
36621 };
36622
36623 /* Indexed by cpu number. */
36624-static struct us3_freq_percpu_info *us3_freq_table;
36625+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
36626
36627 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
36628 * in the Safari config register.
36629@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
36630
36631 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
36632 {
36633- if (cpufreq_us3_driver)
36634- us3_set_cpu_divider_index(policy, 0);
36635+ us3_set_cpu_divider_index(policy->cpu, 0);
36636
36637 return 0;
36638 }
36639
36640+static int __init us3_freq_init(void);
36641+static void __exit us3_freq_exit(void);
36642+
36643+static struct cpufreq_driver cpufreq_us3_driver = {
36644+ .init = us3_freq_cpu_init,
36645+ .verify = us3_freq_verify,
36646+ .target = us3_freq_target,
36647+ .get = us3_freq_get,
36648+ .exit = us3_freq_cpu_exit,
36649+ .owner = THIS_MODULE,
36650+ .name = "UltraSPARC-III",
36651+
36652+};
36653+
36654 static int __init us3_freq_init(void)
36655 {
36656 unsigned long manuf, impl, ver;
36657@@ -208,57 +219,15 @@ static int __init us3_freq_init(void)
36658 (impl == CHEETAH_IMPL ||
36659 impl == CHEETAH_PLUS_IMPL ||
36660 impl == JAGUAR_IMPL ||
36661- impl == PANTHER_IMPL)) {
36662- struct cpufreq_driver *driver;
36663-
36664- ret = -ENOMEM;
36665- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
36666- if (!driver)
36667- goto err_out;
36668-
36669- us3_freq_table = kzalloc(
36670- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
36671- GFP_KERNEL);
36672- if (!us3_freq_table)
36673- goto err_out;
36674-
36675- driver->init = us3_freq_cpu_init;
36676- driver->verify = us3_freq_verify;
36677- driver->target = us3_freq_target;
36678- driver->get = us3_freq_get;
36679- driver->exit = us3_freq_cpu_exit;
36680- driver->owner = THIS_MODULE,
36681- strcpy(driver->name, "UltraSPARC-III");
36682-
36683- cpufreq_us3_driver = driver;
36684- ret = cpufreq_register_driver(driver);
36685- if (ret)
36686- goto err_out;
36687-
36688- return 0;
36689-
36690-err_out:
36691- if (driver) {
36692- kfree(driver);
36693- cpufreq_us3_driver = NULL;
36694- }
36695- kfree(us3_freq_table);
36696- us3_freq_table = NULL;
36697- return ret;
36698- }
36699+ impl == PANTHER_IMPL))
36700+ return cpufreq_register_driver(&cpufreq_us3_driver);
36701
36702 return -ENODEV;
36703 }
36704
36705 static void __exit us3_freq_exit(void)
36706 {
36707- if (cpufreq_us3_driver) {
36708- cpufreq_unregister_driver(cpufreq_us3_driver);
36709- kfree(cpufreq_us3_driver);
36710- cpufreq_us3_driver = NULL;
36711- kfree(us3_freq_table);
36712- us3_freq_table = NULL;
36713- }
36714+ cpufreq_unregister_driver(&cpufreq_us3_driver);
36715 }
36716
36717 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
36718diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
36719index 618e6f4..e89d915 100644
36720--- a/drivers/cpufreq/speedstep-centrino.c
36721+++ b/drivers/cpufreq/speedstep-centrino.c
36722@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
36723 !cpu_has(cpu, X86_FEATURE_EST))
36724 return -ENODEV;
36725
36726- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
36727- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
36728+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
36729+ pax_open_kernel();
36730+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
36731+ pax_close_kernel();
36732+ }
36733
36734 if (policy->cpu != 0)
36735 return -ENODEV;
36736diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
36737index c3a93fe..e808f24 100644
36738--- a/drivers/cpuidle/cpuidle.c
36739+++ b/drivers/cpuidle/cpuidle.c
36740@@ -254,7 +254,7 @@ static int poll_idle(struct cpuidle_device *dev,
36741
36742 static void poll_idle_init(struct cpuidle_driver *drv)
36743 {
36744- struct cpuidle_state *state = &drv->states[0];
36745+ cpuidle_state_no_const *state = &drv->states[0];
36746
36747 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
36748 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
36749diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
36750index ea2f8e7..70ac501 100644
36751--- a/drivers/cpuidle/governor.c
36752+++ b/drivers/cpuidle/governor.c
36753@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
36754 mutex_lock(&cpuidle_lock);
36755 if (__cpuidle_find_governor(gov->name) == NULL) {
36756 ret = 0;
36757- list_add_tail(&gov->governor_list, &cpuidle_governors);
36758+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
36759 if (!cpuidle_curr_governor ||
36760 cpuidle_curr_governor->rating < gov->rating)
36761 cpuidle_switch_governor(gov);
36762@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
36763 new_gov = cpuidle_replace_governor(gov->rating);
36764 cpuidle_switch_governor(new_gov);
36765 }
36766- list_del(&gov->governor_list);
36767+ pax_list_del((struct list_head *)&gov->governor_list);
36768 mutex_unlock(&cpuidle_lock);
36769 }
36770
36771diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
36772index 428754a..8bdf9cc 100644
36773--- a/drivers/cpuidle/sysfs.c
36774+++ b/drivers/cpuidle/sysfs.c
36775@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
36776 NULL
36777 };
36778
36779-static struct attribute_group cpuidle_attr_group = {
36780+static attribute_group_no_const cpuidle_attr_group = {
36781 .attrs = cpuidle_default_attrs,
36782 .name = "cpuidle",
36783 };
36784diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
36785index 3b36797..db0b0c0 100644
36786--- a/drivers/devfreq/devfreq.c
36787+++ b/drivers/devfreq/devfreq.c
36788@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
36789 GFP_KERNEL);
36790 devfreq->last_stat_updated = jiffies;
36791
36792- dev_set_name(&devfreq->dev, dev_name(dev));
36793+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
36794 err = device_register(&devfreq->dev);
36795 if (err) {
36796 put_device(&devfreq->dev);
36797@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
36798 goto err_out;
36799 }
36800
36801- list_add(&governor->node, &devfreq_governor_list);
36802+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
36803
36804 list_for_each_entry(devfreq, &devfreq_list, node) {
36805 int ret = 0;
36806@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
36807 }
36808 }
36809
36810- list_del(&governor->node);
36811+ pax_list_del((struct list_head *)&governor->node);
36812 err_out:
36813 mutex_unlock(&devfreq_list_lock);
36814
36815diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
36816index b70709b..1d8d02a 100644
36817--- a/drivers/dma/sh/shdma.c
36818+++ b/drivers/dma/sh/shdma.c
36819@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
36820 return ret;
36821 }
36822
36823-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
36824+static struct notifier_block sh_dmae_nmi_notifier = {
36825 .notifier_call = sh_dmae_nmi_handler,
36826
36827 /* Run before NMI debug handler and KGDB */
36828diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
36829index c4d700a..0b57abd 100644
36830--- a/drivers/edac/edac_mc_sysfs.c
36831+++ b/drivers/edac/edac_mc_sysfs.c
36832@@ -148,7 +148,7 @@ static const char * const edac_caps[] = {
36833 struct dev_ch_attribute {
36834 struct device_attribute attr;
36835 int channel;
36836-};
36837+} __do_const;
36838
36839 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
36840 struct dev_ch_attribute dev_attr_legacy_##_name = \
36841@@ -1005,14 +1005,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
36842 }
36843
36844 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
36845+ pax_open_kernel();
36846 if (mci->get_sdram_scrub_rate) {
36847- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
36848- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
36849+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
36850+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
36851 }
36852 if (mci->set_sdram_scrub_rate) {
36853- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
36854- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
36855+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
36856+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
36857 }
36858+ pax_close_kernel();
36859 err = device_create_file(&mci->dev,
36860 &dev_attr_sdram_scrub_rate);
36861 if (err) {
36862diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
36863index e8658e4..22746d6 100644
36864--- a/drivers/edac/edac_pci_sysfs.c
36865+++ b/drivers/edac/edac_pci_sysfs.c
36866@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
36867 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
36868 static int edac_pci_poll_msec = 1000; /* one second workq period */
36869
36870-static atomic_t pci_parity_count = ATOMIC_INIT(0);
36871-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
36872+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
36873+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
36874
36875 static struct kobject *edac_pci_top_main_kobj;
36876 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
36877@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
36878 void *value;
36879 ssize_t(*show) (void *, char *);
36880 ssize_t(*store) (void *, const char *, size_t);
36881-};
36882+} __do_const;
36883
36884 /* Set of show/store abstract level functions for PCI Parity object */
36885 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
36886@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36887 edac_printk(KERN_CRIT, EDAC_PCI,
36888 "Signaled System Error on %s\n",
36889 pci_name(dev));
36890- atomic_inc(&pci_nonparity_count);
36891+ atomic_inc_unchecked(&pci_nonparity_count);
36892 }
36893
36894 if (status & (PCI_STATUS_PARITY)) {
36895@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36896 "Master Data Parity Error on %s\n",
36897 pci_name(dev));
36898
36899- atomic_inc(&pci_parity_count);
36900+ atomic_inc_unchecked(&pci_parity_count);
36901 }
36902
36903 if (status & (PCI_STATUS_DETECTED_PARITY)) {
36904@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36905 "Detected Parity Error on %s\n",
36906 pci_name(dev));
36907
36908- atomic_inc(&pci_parity_count);
36909+ atomic_inc_unchecked(&pci_parity_count);
36910 }
36911 }
36912
36913@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36914 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
36915 "Signaled System Error on %s\n",
36916 pci_name(dev));
36917- atomic_inc(&pci_nonparity_count);
36918+ atomic_inc_unchecked(&pci_nonparity_count);
36919 }
36920
36921 if (status & (PCI_STATUS_PARITY)) {
36922@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36923 "Master Data Parity Error on "
36924 "%s\n", pci_name(dev));
36925
36926- atomic_inc(&pci_parity_count);
36927+ atomic_inc_unchecked(&pci_parity_count);
36928 }
36929
36930 if (status & (PCI_STATUS_DETECTED_PARITY)) {
36931@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
36932 "Detected Parity Error on %s\n",
36933 pci_name(dev));
36934
36935- atomic_inc(&pci_parity_count);
36936+ atomic_inc_unchecked(&pci_parity_count);
36937 }
36938 }
36939 }
36940@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
36941 if (!check_pci_errors)
36942 return;
36943
36944- before_count = atomic_read(&pci_parity_count);
36945+ before_count = atomic_read_unchecked(&pci_parity_count);
36946
36947 /* scan all PCI devices looking for a Parity Error on devices and
36948 * bridges.
36949@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
36950 /* Only if operator has selected panic on PCI Error */
36951 if (edac_pci_get_panic_on_pe()) {
36952 /* If the count is different 'after' from 'before' */
36953- if (before_count != atomic_read(&pci_parity_count))
36954+ if (before_count != atomic_read_unchecked(&pci_parity_count))
36955 panic("EDAC: PCI Parity Error");
36956 }
36957 }
36958diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
36959index 51b7e3a..aa8a3e8 100644
36960--- a/drivers/edac/mce_amd.h
36961+++ b/drivers/edac/mce_amd.h
36962@@ -77,7 +77,7 @@ struct amd_decoder_ops {
36963 bool (*mc0_mce)(u16, u8);
36964 bool (*mc1_mce)(u16, u8);
36965 bool (*mc2_mce)(u16, u8);
36966-};
36967+} __no_const;
36968
36969 void amd_report_gart_errors(bool);
36970 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
36971diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
36972index 57ea7f4..789e3c3 100644
36973--- a/drivers/firewire/core-card.c
36974+++ b/drivers/firewire/core-card.c
36975@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
36976
36977 void fw_core_remove_card(struct fw_card *card)
36978 {
36979- struct fw_card_driver dummy_driver = dummy_driver_template;
36980+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
36981
36982 card->driver->update_phy_reg(card, 4,
36983 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
36984diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
36985index 664a6ff..af13580 100644
36986--- a/drivers/firewire/core-device.c
36987+++ b/drivers/firewire/core-device.c
36988@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
36989 struct config_rom_attribute {
36990 struct device_attribute attr;
36991 u32 key;
36992-};
36993+} __do_const;
36994
36995 static ssize_t show_immediate(struct device *dev,
36996 struct device_attribute *dattr, char *buf)
36997diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
36998index 28a94c7..58da63a 100644
36999--- a/drivers/firewire/core-transaction.c
37000+++ b/drivers/firewire/core-transaction.c
37001@@ -38,6 +38,7 @@
37002 #include <linux/timer.h>
37003 #include <linux/types.h>
37004 #include <linux/workqueue.h>
37005+#include <linux/sched.h>
37006
37007 #include <asm/byteorder.h>
37008
37009diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
37010index 515a42c..5ecf3ba 100644
37011--- a/drivers/firewire/core.h
37012+++ b/drivers/firewire/core.h
37013@@ -111,6 +111,7 @@ struct fw_card_driver {
37014
37015 int (*stop_iso)(struct fw_iso_context *ctx);
37016 };
37017+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
37018
37019 void fw_card_initialize(struct fw_card *card,
37020 const struct fw_card_driver *driver, struct device *device);
37021diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
37022index 94a58a0..f5eba42 100644
37023--- a/drivers/firmware/dmi-id.c
37024+++ b/drivers/firmware/dmi-id.c
37025@@ -16,7 +16,7 @@
37026 struct dmi_device_attribute{
37027 struct device_attribute dev_attr;
37028 int field;
37029-};
37030+} __do_const;
37031 #define to_dmi_dev_attr(_dev_attr) \
37032 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
37033
37034diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
37035index b95159b..841ae55 100644
37036--- a/drivers/firmware/dmi_scan.c
37037+++ b/drivers/firmware/dmi_scan.c
37038@@ -497,11 +497,6 @@ void __init dmi_scan_machine(void)
37039 }
37040 }
37041 else {
37042- /*
37043- * no iounmap() for that ioremap(); it would be a no-op, but
37044- * it's so early in setup that sucker gets confused into doing
37045- * what it shouldn't if we actually call it.
37046- */
37047 p = dmi_ioremap(0xF0000, 0x10000);
37048 if (p == NULL)
37049 goto error;
37050@@ -786,7 +781,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
37051 if (buf == NULL)
37052 return -1;
37053
37054- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
37055+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
37056
37057 iounmap(buf);
37058 return 0;
37059diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
37060index 5145fa3..0d3babd 100644
37061--- a/drivers/firmware/efi/efi.c
37062+++ b/drivers/firmware/efi/efi.c
37063@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
37064 };
37065
37066 static struct efivars generic_efivars;
37067-static struct efivar_operations generic_ops;
37068+static efivar_operations_no_const generic_ops __read_only;
37069
37070 static int generic_ops_register(void)
37071 {
37072- generic_ops.get_variable = efi.get_variable;
37073- generic_ops.set_variable = efi.set_variable;
37074- generic_ops.get_next_variable = efi.get_next_variable;
37075- generic_ops.query_variable_store = efi_query_variable_store;
37076+ pax_open_kernel();
37077+ *(void **)&generic_ops.get_variable = efi.get_variable;
37078+ *(void **)&generic_ops.set_variable = efi.set_variable;
37079+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
37080+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
37081+ pax_close_kernel();
37082
37083 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
37084 }
37085diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
37086index 8bd1bb6..c48b0c6 100644
37087--- a/drivers/firmware/efi/efivars.c
37088+++ b/drivers/firmware/efi/efivars.c
37089@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
37090 static int
37091 create_efivars_bin_attributes(void)
37092 {
37093- struct bin_attribute *attr;
37094+ bin_attribute_no_const *attr;
37095 int error;
37096
37097 /* new_var */
37098diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
37099index 2a90ba6..07f3733 100644
37100--- a/drivers/firmware/google/memconsole.c
37101+++ b/drivers/firmware/google/memconsole.c
37102@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
37103 if (!found_memconsole())
37104 return -ENODEV;
37105
37106- memconsole_bin_attr.size = memconsole_length;
37107+ pax_open_kernel();
37108+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
37109+ pax_close_kernel();
37110
37111 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
37112
37113diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
37114index e16d932..f0206ef 100644
37115--- a/drivers/gpio/gpio-ich.c
37116+++ b/drivers/gpio/gpio-ich.c
37117@@ -69,7 +69,7 @@ struct ichx_desc {
37118 /* Some chipsets have quirks, let these use their own request/get */
37119 int (*request)(struct gpio_chip *chip, unsigned offset);
37120 int (*get)(struct gpio_chip *chip, unsigned offset);
37121-};
37122+} __do_const;
37123
37124 static struct {
37125 spinlock_t lock;
37126diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
37127index 9902732..64b62dd 100644
37128--- a/drivers/gpio/gpio-vr41xx.c
37129+++ b/drivers/gpio/gpio-vr41xx.c
37130@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
37131 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
37132 maskl, pendl, maskh, pendh);
37133
37134- atomic_inc(&irq_err_count);
37135+ atomic_inc_unchecked(&irq_err_count);
37136
37137 return -EINVAL;
37138 }
37139diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
37140index ed1334e..ee0dd42 100644
37141--- a/drivers/gpu/drm/drm_crtc_helper.c
37142+++ b/drivers/gpu/drm/drm_crtc_helper.c
37143@@ -321,7 +321,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
37144 struct drm_crtc *tmp;
37145 int crtc_mask = 1;
37146
37147- WARN(!crtc, "checking null crtc?\n");
37148+ BUG_ON(!crtc);
37149
37150 dev = crtc->dev;
37151
37152diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
37153index 9cc247f..36aa285 100644
37154--- a/drivers/gpu/drm/drm_drv.c
37155+++ b/drivers/gpu/drm/drm_drv.c
37156@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
37157 /**
37158 * Copy and IOCTL return string to user space
37159 */
37160-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
37161+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
37162 {
37163 int len;
37164
37165@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
37166 struct drm_file *file_priv = filp->private_data;
37167 struct drm_device *dev;
37168 const struct drm_ioctl_desc *ioctl = NULL;
37169- drm_ioctl_t *func;
37170+ drm_ioctl_no_const_t func;
37171 unsigned int nr = DRM_IOCTL_NR(cmd);
37172 int retcode = -EINVAL;
37173 char stack_kdata[128];
37174@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
37175 return -ENODEV;
37176
37177 atomic_inc(&dev->ioctl_count);
37178- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
37179+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
37180 ++file_priv->ioctl_count;
37181
37182 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
37183diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
37184index 429e07d..e681a2c 100644
37185--- a/drivers/gpu/drm/drm_fops.c
37186+++ b/drivers/gpu/drm/drm_fops.c
37187@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
37188 }
37189
37190 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
37191- atomic_set(&dev->counts[i], 0);
37192+ atomic_set_unchecked(&dev->counts[i], 0);
37193
37194 dev->sigdata.lock = NULL;
37195
37196@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
37197 if (drm_device_is_unplugged(dev))
37198 return -ENODEV;
37199
37200- if (!dev->open_count++)
37201+ if (local_inc_return(&dev->open_count) == 1)
37202 need_setup = 1;
37203 mutex_lock(&dev->struct_mutex);
37204 old_imapping = inode->i_mapping;
37205@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
37206 retcode = drm_open_helper(inode, filp, dev);
37207 if (retcode)
37208 goto err_undo;
37209- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
37210+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
37211 if (need_setup) {
37212 retcode = drm_setup(dev);
37213 if (retcode)
37214@@ -166,7 +166,7 @@ err_undo:
37215 iput(container_of(dev->dev_mapping, struct inode, i_data));
37216 dev->dev_mapping = old_mapping;
37217 mutex_unlock(&dev->struct_mutex);
37218- dev->open_count--;
37219+ local_dec(&dev->open_count);
37220 return retcode;
37221 }
37222 EXPORT_SYMBOL(drm_open);
37223@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
37224
37225 mutex_lock(&drm_global_mutex);
37226
37227- DRM_DEBUG("open_count = %d\n", dev->open_count);
37228+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
37229
37230 if (dev->driver->preclose)
37231 dev->driver->preclose(dev, file_priv);
37232@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
37233 * Begin inline drm_release
37234 */
37235
37236- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
37237+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
37238 task_pid_nr(current),
37239 (long)old_encode_dev(file_priv->minor->device),
37240- dev->open_count);
37241+ local_read(&dev->open_count));
37242
37243 /* Release any auth tokens that might point to this file_priv,
37244 (do that under the drm_global_mutex) */
37245@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
37246 * End inline drm_release
37247 */
37248
37249- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
37250- if (!--dev->open_count) {
37251+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
37252+ if (local_dec_and_test(&dev->open_count)) {
37253 if (atomic_read(&dev->ioctl_count)) {
37254 DRM_ERROR("Device busy: %d\n",
37255 atomic_read(&dev->ioctl_count));
37256diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
37257index f731116..629842c 100644
37258--- a/drivers/gpu/drm/drm_global.c
37259+++ b/drivers/gpu/drm/drm_global.c
37260@@ -36,7 +36,7 @@
37261 struct drm_global_item {
37262 struct mutex mutex;
37263 void *object;
37264- int refcount;
37265+ atomic_t refcount;
37266 };
37267
37268 static struct drm_global_item glob[DRM_GLOBAL_NUM];
37269@@ -49,7 +49,7 @@ void drm_global_init(void)
37270 struct drm_global_item *item = &glob[i];
37271 mutex_init(&item->mutex);
37272 item->object = NULL;
37273- item->refcount = 0;
37274+ atomic_set(&item->refcount, 0);
37275 }
37276 }
37277
37278@@ -59,7 +59,7 @@ void drm_global_release(void)
37279 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
37280 struct drm_global_item *item = &glob[i];
37281 BUG_ON(item->object != NULL);
37282- BUG_ON(item->refcount != 0);
37283+ BUG_ON(atomic_read(&item->refcount) != 0);
37284 }
37285 }
37286
37287@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
37288 void *object;
37289
37290 mutex_lock(&item->mutex);
37291- if (item->refcount == 0) {
37292+ if (atomic_read(&item->refcount) == 0) {
37293 item->object = kzalloc(ref->size, GFP_KERNEL);
37294 if (unlikely(item->object == NULL)) {
37295 ret = -ENOMEM;
37296@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
37297 goto out_err;
37298
37299 }
37300- ++item->refcount;
37301+ atomic_inc(&item->refcount);
37302 ref->object = item->object;
37303 object = item->object;
37304 mutex_unlock(&item->mutex);
37305@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
37306 struct drm_global_item *item = &glob[ref->global_type];
37307
37308 mutex_lock(&item->mutex);
37309- BUG_ON(item->refcount == 0);
37310+ BUG_ON(atomic_read(&item->refcount) == 0);
37311 BUG_ON(ref->object != item->object);
37312- if (--item->refcount == 0) {
37313+ if (atomic_dec_and_test(&item->refcount)) {
37314 ref->release(ref);
37315 item->object = NULL;
37316 }
37317diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
37318index d4b20ce..77a8d41 100644
37319--- a/drivers/gpu/drm/drm_info.c
37320+++ b/drivers/gpu/drm/drm_info.c
37321@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
37322 struct drm_local_map *map;
37323 struct drm_map_list *r_list;
37324
37325- /* Hardcoded from _DRM_FRAME_BUFFER,
37326- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
37327- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
37328- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
37329+ static const char * const types[] = {
37330+ [_DRM_FRAME_BUFFER] = "FB",
37331+ [_DRM_REGISTERS] = "REG",
37332+ [_DRM_SHM] = "SHM",
37333+ [_DRM_AGP] = "AGP",
37334+ [_DRM_SCATTER_GATHER] = "SG",
37335+ [_DRM_CONSISTENT] = "PCI",
37336+ [_DRM_GEM] = "GEM" };
37337 const char *type;
37338 int i;
37339
37340@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
37341 map = r_list->map;
37342 if (!map)
37343 continue;
37344- if (map->type < 0 || map->type > 5)
37345+ if (map->type >= ARRAY_SIZE(types))
37346 type = "??";
37347 else
37348 type = types[map->type];
37349@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
37350 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
37351 vma->vm_flags & VM_LOCKED ? 'l' : '-',
37352 vma->vm_flags & VM_IO ? 'i' : '-',
37353+#ifdef CONFIG_GRKERNSEC_HIDESYM
37354+ 0);
37355+#else
37356 vma->vm_pgoff);
37357+#endif
37358
37359 #if defined(__i386__)
37360 pgprot = pgprot_val(vma->vm_page_prot);
37361diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
37362index 2f4c434..dd12cd2 100644
37363--- a/drivers/gpu/drm/drm_ioc32.c
37364+++ b/drivers/gpu/drm/drm_ioc32.c
37365@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
37366 request = compat_alloc_user_space(nbytes);
37367 if (!access_ok(VERIFY_WRITE, request, nbytes))
37368 return -EFAULT;
37369- list = (struct drm_buf_desc *) (request + 1);
37370+ list = (struct drm_buf_desc __user *) (request + 1);
37371
37372 if (__put_user(count, &request->count)
37373 || __put_user(list, &request->list))
37374@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
37375 request = compat_alloc_user_space(nbytes);
37376 if (!access_ok(VERIFY_WRITE, request, nbytes))
37377 return -EFAULT;
37378- list = (struct drm_buf_pub *) (request + 1);
37379+ list = (struct drm_buf_pub __user *) (request + 1);
37380
37381 if (__put_user(count, &request->count)
37382 || __put_user(list, &request->list))
37383@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
37384 return 0;
37385 }
37386
37387-drm_ioctl_compat_t *drm_compat_ioctls[] = {
37388+drm_ioctl_compat_t drm_compat_ioctls[] = {
37389 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
37390 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
37391 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
37392@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
37393 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
37394 {
37395 unsigned int nr = DRM_IOCTL_NR(cmd);
37396- drm_ioctl_compat_t *fn;
37397 int ret;
37398
37399 /* Assume that ioctls without an explicit compat routine will just
37400@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
37401 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
37402 return drm_ioctl(filp, cmd, arg);
37403
37404- fn = drm_compat_ioctls[nr];
37405-
37406- if (fn != NULL)
37407- ret = (*fn) (filp, cmd, arg);
37408+ if (drm_compat_ioctls[nr] != NULL)
37409+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
37410 else
37411 ret = drm_ioctl(filp, cmd, arg);
37412
37413diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
37414index e77bd8b..1571b85 100644
37415--- a/drivers/gpu/drm/drm_ioctl.c
37416+++ b/drivers/gpu/drm/drm_ioctl.c
37417@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
37418 stats->data[i].value =
37419 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
37420 else
37421- stats->data[i].value = atomic_read(&dev->counts[i]);
37422+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
37423 stats->data[i].type = dev->types[i];
37424 }
37425
37426diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
37427index d752c96..fe08455 100644
37428--- a/drivers/gpu/drm/drm_lock.c
37429+++ b/drivers/gpu/drm/drm_lock.c
37430@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
37431 if (drm_lock_take(&master->lock, lock->context)) {
37432 master->lock.file_priv = file_priv;
37433 master->lock.lock_time = jiffies;
37434- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
37435+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
37436 break; /* Got lock */
37437 }
37438
37439@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
37440 return -EINVAL;
37441 }
37442
37443- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
37444+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
37445
37446 if (drm_lock_free(&master->lock, lock->context)) {
37447 /* FIXME: Should really bail out here. */
37448diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
37449index 16f3ec5..b28f9ca 100644
37450--- a/drivers/gpu/drm/drm_stub.c
37451+++ b/drivers/gpu/drm/drm_stub.c
37452@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
37453
37454 drm_device_set_unplugged(dev);
37455
37456- if (dev->open_count == 0) {
37457+ if (local_read(&dev->open_count) == 0) {
37458 drm_put_dev(dev);
37459 }
37460 mutex_unlock(&drm_global_mutex);
37461diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
37462index 0229665..f61329c 100644
37463--- a/drivers/gpu/drm/drm_sysfs.c
37464+++ b/drivers/gpu/drm/drm_sysfs.c
37465@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
37466 int drm_sysfs_device_add(struct drm_minor *minor)
37467 {
37468 int err;
37469- char *minor_str;
37470+ const char *minor_str;
37471
37472 minor->kdev.parent = minor->dev->dev;
37473
37474diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
37475index 004ecdf..db1f6e0 100644
37476--- a/drivers/gpu/drm/i810/i810_dma.c
37477+++ b/drivers/gpu/drm/i810/i810_dma.c
37478@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
37479 dma->buflist[vertex->idx],
37480 vertex->discard, vertex->used);
37481
37482- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
37483- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
37484+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
37485+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
37486 sarea_priv->last_enqueue = dev_priv->counter - 1;
37487 sarea_priv->last_dispatch = (int)hw_status[5];
37488
37489@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
37490 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
37491 mc->last_render);
37492
37493- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
37494- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
37495+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
37496+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
37497 sarea_priv->last_enqueue = dev_priv->counter - 1;
37498 sarea_priv->last_dispatch = (int)hw_status[5];
37499
37500diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
37501index 6e0acad..93c8289 100644
37502--- a/drivers/gpu/drm/i810/i810_drv.h
37503+++ b/drivers/gpu/drm/i810/i810_drv.h
37504@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
37505 int page_flipping;
37506
37507 wait_queue_head_t irq_queue;
37508- atomic_t irq_received;
37509- atomic_t irq_emitted;
37510+ atomic_unchecked_t irq_received;
37511+ atomic_unchecked_t irq_emitted;
37512
37513 int front_offset;
37514 } drm_i810_private_t;
37515diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
37516index e913d32..4d9b351 100644
37517--- a/drivers/gpu/drm/i915/i915_debugfs.c
37518+++ b/drivers/gpu/drm/i915/i915_debugfs.c
37519@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
37520 I915_READ(GTIMR));
37521 }
37522 seq_printf(m, "Interrupts received: %d\n",
37523- atomic_read(&dev_priv->irq_received));
37524+ atomic_read_unchecked(&dev_priv->irq_received));
37525 for_each_ring(ring, dev_priv, i) {
37526 if (IS_GEN6(dev) || IS_GEN7(dev)) {
37527 seq_printf(m,
37528diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
37529index 17d9b0b..860e6d9 100644
37530--- a/drivers/gpu/drm/i915/i915_dma.c
37531+++ b/drivers/gpu/drm/i915/i915_dma.c
37532@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
37533 bool can_switch;
37534
37535 spin_lock(&dev->count_lock);
37536- can_switch = (dev->open_count == 0);
37537+ can_switch = (local_read(&dev->open_count) == 0);
37538 spin_unlock(&dev->count_lock);
37539 return can_switch;
37540 }
37541diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
37542index 47d8b68..52f5d8d 100644
37543--- a/drivers/gpu/drm/i915/i915_drv.h
37544+++ b/drivers/gpu/drm/i915/i915_drv.h
37545@@ -916,7 +916,7 @@ typedef struct drm_i915_private {
37546 drm_dma_handle_t *status_page_dmah;
37547 struct resource mch_res;
37548
37549- atomic_t irq_received;
37550+ atomic_unchecked_t irq_received;
37551
37552 /* protects the irq masks */
37553 spinlock_t irq_lock;
37554@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
37555 struct drm_i915_private *dev_priv, unsigned port);
37556 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
37557 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
37558-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
37559+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
37560 {
37561 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
37562 }
37563diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
37564index 117ce38..eefd237 100644
37565--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
37566+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
37567@@ -727,9 +727,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
37568
37569 static int
37570 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
37571- int count)
37572+ unsigned int count)
37573 {
37574- int i;
37575+ unsigned int i;
37576 int relocs_total = 0;
37577 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
37578
37579diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
37580index 3c59584..500f2e9 100644
37581--- a/drivers/gpu/drm/i915/i915_ioc32.c
37582+++ b/drivers/gpu/drm/i915/i915_ioc32.c
37583@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
37584 (unsigned long)request);
37585 }
37586
37587-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
37588+static drm_ioctl_compat_t i915_compat_ioctls[] = {
37589 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
37590 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
37591 [DRM_I915_GETPARAM] = compat_i915_getparam,
37592@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
37593 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
37594 {
37595 unsigned int nr = DRM_IOCTL_NR(cmd);
37596- drm_ioctl_compat_t *fn = NULL;
37597 int ret;
37598
37599 if (nr < DRM_COMMAND_BASE)
37600 return drm_compat_ioctl(filp, cmd, arg);
37601
37602- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
37603- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
37604-
37605- if (fn != NULL)
37606+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
37607+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
37608 ret = (*fn) (filp, cmd, arg);
37609- else
37610+ } else
37611 ret = drm_ioctl(filp, cmd, arg);
37612
37613 return ret;
37614diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
37615index e5e32869..1678f36 100644
37616--- a/drivers/gpu/drm/i915/i915_irq.c
37617+++ b/drivers/gpu/drm/i915/i915_irq.c
37618@@ -670,7 +670,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
37619 int pipe;
37620 u32 pipe_stats[I915_MAX_PIPES];
37621
37622- atomic_inc(&dev_priv->irq_received);
37623+ atomic_inc_unchecked(&dev_priv->irq_received);
37624
37625 while (true) {
37626 iir = I915_READ(VLV_IIR);
37627@@ -835,7 +835,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
37628 irqreturn_t ret = IRQ_NONE;
37629 int i;
37630
37631- atomic_inc(&dev_priv->irq_received);
37632+ atomic_inc_unchecked(&dev_priv->irq_received);
37633
37634 /* disable master interrupt before clearing iir */
37635 de_ier = I915_READ(DEIER);
37636@@ -925,7 +925,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
37637 int ret = IRQ_NONE;
37638 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
37639
37640- atomic_inc(&dev_priv->irq_received);
37641+ atomic_inc_unchecked(&dev_priv->irq_received);
37642
37643 /* disable master interrupt before clearing iir */
37644 de_ier = I915_READ(DEIER);
37645@@ -2089,7 +2089,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
37646 {
37647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37648
37649- atomic_set(&dev_priv->irq_received, 0);
37650+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37651
37652 I915_WRITE(HWSTAM, 0xeffe);
37653
37654@@ -2124,7 +2124,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
37655 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37656 int pipe;
37657
37658- atomic_set(&dev_priv->irq_received, 0);
37659+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37660
37661 /* VLV magic */
37662 I915_WRITE(VLV_IMR, 0);
37663@@ -2411,7 +2411,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
37664 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37665 int pipe;
37666
37667- atomic_set(&dev_priv->irq_received, 0);
37668+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37669
37670 for_each_pipe(pipe)
37671 I915_WRITE(PIPESTAT(pipe), 0);
37672@@ -2490,7 +2490,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
37673 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37674 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
37675
37676- atomic_inc(&dev_priv->irq_received);
37677+ atomic_inc_unchecked(&dev_priv->irq_received);
37678
37679 iir = I915_READ16(IIR);
37680 if (iir == 0)
37681@@ -2565,7 +2565,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
37682 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37683 int pipe;
37684
37685- atomic_set(&dev_priv->irq_received, 0);
37686+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37687
37688 if (I915_HAS_HOTPLUG(dev)) {
37689 I915_WRITE(PORT_HOTPLUG_EN, 0);
37690@@ -2664,7 +2664,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
37691 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
37692 int pipe, ret = IRQ_NONE;
37693
37694- atomic_inc(&dev_priv->irq_received);
37695+ atomic_inc_unchecked(&dev_priv->irq_received);
37696
37697 iir = I915_READ(IIR);
37698 do {
37699@@ -2791,7 +2791,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
37700 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
37701 int pipe;
37702
37703- atomic_set(&dev_priv->irq_received, 0);
37704+ atomic_set_unchecked(&dev_priv->irq_received, 0);
37705
37706 I915_WRITE(PORT_HOTPLUG_EN, 0);
37707 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
37708@@ -2898,7 +2898,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
37709 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37710 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
37711
37712- atomic_inc(&dev_priv->irq_received);
37713+ atomic_inc_unchecked(&dev_priv->irq_received);
37714
37715 iir = I915_READ(IIR);
37716
37717diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
37718index eea5982..eeef407 100644
37719--- a/drivers/gpu/drm/i915/intel_display.c
37720+++ b/drivers/gpu/drm/i915/intel_display.c
37721@@ -8935,13 +8935,13 @@ struct intel_quirk {
37722 int subsystem_vendor;
37723 int subsystem_device;
37724 void (*hook)(struct drm_device *dev);
37725-};
37726+} __do_const;
37727
37728 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
37729 struct intel_dmi_quirk {
37730 void (*hook)(struct drm_device *dev);
37731 const struct dmi_system_id (*dmi_id_list)[];
37732-};
37733+} __do_const;
37734
37735 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
37736 {
37737@@ -8949,18 +8949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
37738 return 1;
37739 }
37740
37741-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
37742+static const struct dmi_system_id intel_dmi_quirks_table[] = {
37743 {
37744- .dmi_id_list = &(const struct dmi_system_id[]) {
37745- {
37746- .callback = intel_dmi_reverse_brightness,
37747- .ident = "NCR Corporation",
37748- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
37749- DMI_MATCH(DMI_PRODUCT_NAME, ""),
37750- },
37751- },
37752- { } /* terminating entry */
37753+ .callback = intel_dmi_reverse_brightness,
37754+ .ident = "NCR Corporation",
37755+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
37756+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
37757 },
37758+ },
37759+ { } /* terminating entry */
37760+};
37761+
37762+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
37763+ {
37764+ .dmi_id_list = &intel_dmi_quirks_table,
37765 .hook = quirk_invert_brightness,
37766 },
37767 };
37768diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
37769index 54558a0..2d97005 100644
37770--- a/drivers/gpu/drm/mga/mga_drv.h
37771+++ b/drivers/gpu/drm/mga/mga_drv.h
37772@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
37773 u32 clear_cmd;
37774 u32 maccess;
37775
37776- atomic_t vbl_received; /**< Number of vblanks received. */
37777+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
37778 wait_queue_head_t fence_queue;
37779- atomic_t last_fence_retired;
37780+ atomic_unchecked_t last_fence_retired;
37781 u32 next_fence_to_post;
37782
37783 unsigned int fb_cpp;
37784diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
37785index 709e90d..89a1c0d 100644
37786--- a/drivers/gpu/drm/mga/mga_ioc32.c
37787+++ b/drivers/gpu/drm/mga/mga_ioc32.c
37788@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
37789 return 0;
37790 }
37791
37792-drm_ioctl_compat_t *mga_compat_ioctls[] = {
37793+drm_ioctl_compat_t mga_compat_ioctls[] = {
37794 [DRM_MGA_INIT] = compat_mga_init,
37795 [DRM_MGA_GETPARAM] = compat_mga_getparam,
37796 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
37797@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
37798 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
37799 {
37800 unsigned int nr = DRM_IOCTL_NR(cmd);
37801- drm_ioctl_compat_t *fn = NULL;
37802 int ret;
37803
37804 if (nr < DRM_COMMAND_BASE)
37805 return drm_compat_ioctl(filp, cmd, arg);
37806
37807- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
37808- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
37809-
37810- if (fn != NULL)
37811+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
37812+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
37813 ret = (*fn) (filp, cmd, arg);
37814- else
37815+ } else
37816 ret = drm_ioctl(filp, cmd, arg);
37817
37818 return ret;
37819diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
37820index 598c281..60d590e 100644
37821--- a/drivers/gpu/drm/mga/mga_irq.c
37822+++ b/drivers/gpu/drm/mga/mga_irq.c
37823@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
37824 if (crtc != 0)
37825 return 0;
37826
37827- return atomic_read(&dev_priv->vbl_received);
37828+ return atomic_read_unchecked(&dev_priv->vbl_received);
37829 }
37830
37831
37832@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
37833 /* VBLANK interrupt */
37834 if (status & MGA_VLINEPEN) {
37835 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
37836- atomic_inc(&dev_priv->vbl_received);
37837+ atomic_inc_unchecked(&dev_priv->vbl_received);
37838 drm_handle_vblank(dev, 0);
37839 handled = 1;
37840 }
37841@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
37842 if ((prim_start & ~0x03) != (prim_end & ~0x03))
37843 MGA_WRITE(MGA_PRIMEND, prim_end);
37844
37845- atomic_inc(&dev_priv->last_fence_retired);
37846+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
37847 DRM_WAKEUP(&dev_priv->fence_queue);
37848 handled = 1;
37849 }
37850@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
37851 * using fences.
37852 */
37853 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
37854- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
37855+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
37856 - *sequence) <= (1 << 23)));
37857
37858 *sequence = cur_fence;
37859diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
37860index 6aa2137..fe8dc55 100644
37861--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
37862+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
37863@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
37864 struct bit_table {
37865 const char id;
37866 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
37867-};
37868+} __no_const;
37869
37870 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
37871
37872diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
37873index f2b30f8..d0f9a95 100644
37874--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
37875+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
37876@@ -92,7 +92,7 @@ struct nouveau_drm {
37877 struct drm_global_reference mem_global_ref;
37878 struct ttm_bo_global_ref bo_global_ref;
37879 struct ttm_bo_device bdev;
37880- atomic_t validate_sequence;
37881+ atomic_unchecked_t validate_sequence;
37882 int (*move)(struct nouveau_channel *,
37883 struct ttm_buffer_object *,
37884 struct ttm_mem_reg *, struct ttm_mem_reg *);
37885diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
37886index b4b4d0c..b7edc15 100644
37887--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
37888+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
37889@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
37890 int ret, i;
37891 struct nouveau_bo *res_bo = NULL;
37892
37893- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
37894+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
37895 retry:
37896 if (++trycnt > 100000) {
37897 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
37898@@ -359,7 +359,7 @@ retry:
37899 if (ret) {
37900 validate_fini(op, NULL);
37901 if (unlikely(ret == -EAGAIN)) {
37902- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
37903+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
37904 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
37905 sequence);
37906 if (!ret)
37907diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
37908index 08214bc..9208577 100644
37909--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
37910+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
37911@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
37912 unsigned long arg)
37913 {
37914 unsigned int nr = DRM_IOCTL_NR(cmd);
37915- drm_ioctl_compat_t *fn = NULL;
37916+ drm_ioctl_compat_t fn = NULL;
37917 int ret;
37918
37919 if (nr < DRM_COMMAND_BASE)
37920diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
37921index 25d3495..d81aaf6 100644
37922--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
37923+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
37924@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
37925 bool can_switch;
37926
37927 spin_lock(&dev->count_lock);
37928- can_switch = (dev->open_count == 0);
37929+ can_switch = (local_read(&dev->open_count) == 0);
37930 spin_unlock(&dev->count_lock);
37931 return can_switch;
37932 }
37933diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
37934index 489cb8c..0b8d0d3 100644
37935--- a/drivers/gpu/drm/qxl/qxl_ttm.c
37936+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
37937@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
37938 }
37939 }
37940
37941-static struct vm_operations_struct qxl_ttm_vm_ops;
37942+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
37943 static const struct vm_operations_struct *ttm_vm_ops;
37944
37945 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37946@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
37947 return r;
37948 if (unlikely(ttm_vm_ops == NULL)) {
37949 ttm_vm_ops = vma->vm_ops;
37950+ pax_open_kernel();
37951 qxl_ttm_vm_ops = *ttm_vm_ops;
37952 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
37953+ pax_close_kernel();
37954 }
37955 vma->vm_ops = &qxl_ttm_vm_ops;
37956 return 0;
37957@@ -556,25 +558,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
37958 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
37959 {
37960 #if defined(CONFIG_DEBUG_FS)
37961- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
37962- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
37963- unsigned i;
37964+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
37965+ {
37966+ .name = "qxl_mem_mm",
37967+ .show = &qxl_mm_dump_table,
37968+ },
37969+ {
37970+ .name = "qxl_surf_mm",
37971+ .show = &qxl_mm_dump_table,
37972+ }
37973+ };
37974
37975- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
37976- if (i == 0)
37977- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
37978- else
37979- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
37980- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
37981- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
37982- qxl_mem_types_list[i].driver_features = 0;
37983- if (i == 0)
37984- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
37985- else
37986- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
37987+ pax_open_kernel();
37988+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
37989+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
37990+ pax_close_kernel();
37991
37992- }
37993- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
37994+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
37995 #else
37996 return 0;
37997 #endif
37998diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
37999index d4660cf..70dbe65 100644
38000--- a/drivers/gpu/drm/r128/r128_cce.c
38001+++ b/drivers/gpu/drm/r128/r128_cce.c
38002@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
38003
38004 /* GH: Simple idle check.
38005 */
38006- atomic_set(&dev_priv->idle_count, 0);
38007+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38008
38009 /* We don't support anything other than bus-mastering ring mode,
38010 * but the ring can be in either AGP or PCI space for the ring
38011diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
38012index 930c71b..499aded 100644
38013--- a/drivers/gpu/drm/r128/r128_drv.h
38014+++ b/drivers/gpu/drm/r128/r128_drv.h
38015@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
38016 int is_pci;
38017 unsigned long cce_buffers_offset;
38018
38019- atomic_t idle_count;
38020+ atomic_unchecked_t idle_count;
38021
38022 int page_flipping;
38023 int current_page;
38024 u32 crtc_offset;
38025 u32 crtc_offset_cntl;
38026
38027- atomic_t vbl_received;
38028+ atomic_unchecked_t vbl_received;
38029
38030 u32 color_fmt;
38031 unsigned int front_offset;
38032diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
38033index a954c54..9cc595c 100644
38034--- a/drivers/gpu/drm/r128/r128_ioc32.c
38035+++ b/drivers/gpu/drm/r128/r128_ioc32.c
38036@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
38037 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
38038 }
38039
38040-drm_ioctl_compat_t *r128_compat_ioctls[] = {
38041+drm_ioctl_compat_t r128_compat_ioctls[] = {
38042 [DRM_R128_INIT] = compat_r128_init,
38043 [DRM_R128_DEPTH] = compat_r128_depth,
38044 [DRM_R128_STIPPLE] = compat_r128_stipple,
38045@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
38046 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38047 {
38048 unsigned int nr = DRM_IOCTL_NR(cmd);
38049- drm_ioctl_compat_t *fn = NULL;
38050 int ret;
38051
38052 if (nr < DRM_COMMAND_BASE)
38053 return drm_compat_ioctl(filp, cmd, arg);
38054
38055- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
38056- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38057-
38058- if (fn != NULL)
38059+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
38060+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
38061 ret = (*fn) (filp, cmd, arg);
38062- else
38063+ } else
38064 ret = drm_ioctl(filp, cmd, arg);
38065
38066 return ret;
38067diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
38068index 2ea4f09..d391371 100644
38069--- a/drivers/gpu/drm/r128/r128_irq.c
38070+++ b/drivers/gpu/drm/r128/r128_irq.c
38071@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
38072 if (crtc != 0)
38073 return 0;
38074
38075- return atomic_read(&dev_priv->vbl_received);
38076+ return atomic_read_unchecked(&dev_priv->vbl_received);
38077 }
38078
38079 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38080@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
38081 /* VBLANK interrupt */
38082 if (status & R128_CRTC_VBLANK_INT) {
38083 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
38084- atomic_inc(&dev_priv->vbl_received);
38085+ atomic_inc_unchecked(&dev_priv->vbl_received);
38086 drm_handle_vblank(dev, 0);
38087 return IRQ_HANDLED;
38088 }
38089diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
38090index 19bb7e6..de7e2a2 100644
38091--- a/drivers/gpu/drm/r128/r128_state.c
38092+++ b/drivers/gpu/drm/r128/r128_state.c
38093@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
38094
38095 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
38096 {
38097- if (atomic_read(&dev_priv->idle_count) == 0)
38098+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
38099 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
38100 else
38101- atomic_set(&dev_priv->idle_count, 0);
38102+ atomic_set_unchecked(&dev_priv->idle_count, 0);
38103 }
38104
38105 #endif
38106diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
38107index 5a82b6b..9e69c73 100644
38108--- a/drivers/gpu/drm/radeon/mkregtable.c
38109+++ b/drivers/gpu/drm/radeon/mkregtable.c
38110@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
38111 regex_t mask_rex;
38112 regmatch_t match[4];
38113 char buf[1024];
38114- size_t end;
38115+ long end;
38116 int len;
38117 int done = 0;
38118 int r;
38119 unsigned o;
38120 struct offset *offset;
38121 char last_reg_s[10];
38122- int last_reg;
38123+ unsigned long last_reg;
38124
38125 if (regcomp
38126 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
38127diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
38128index b0dc0b6..a9bfe9c 100644
38129--- a/drivers/gpu/drm/radeon/radeon_device.c
38130+++ b/drivers/gpu/drm/radeon/radeon_device.c
38131@@ -1014,7 +1014,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
38132 bool can_switch;
38133
38134 spin_lock(&dev->count_lock);
38135- can_switch = (dev->open_count == 0);
38136+ can_switch = (local_read(&dev->open_count) == 0);
38137 spin_unlock(&dev->count_lock);
38138 return can_switch;
38139 }
38140diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
38141index b369d42..8dd04eb 100644
38142--- a/drivers/gpu/drm/radeon/radeon_drv.h
38143+++ b/drivers/gpu/drm/radeon/radeon_drv.h
38144@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
38145
38146 /* SW interrupt */
38147 wait_queue_head_t swi_queue;
38148- atomic_t swi_emitted;
38149+ atomic_unchecked_t swi_emitted;
38150 int vblank_crtc;
38151 uint32_t irq_enable_reg;
38152 uint32_t r500_disp_irq_reg;
38153diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
38154index c180df8..5fd8186 100644
38155--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
38156+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
38157@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38158 request = compat_alloc_user_space(sizeof(*request));
38159 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
38160 || __put_user(req32.param, &request->param)
38161- || __put_user((void __user *)(unsigned long)req32.value,
38162+ || __put_user((unsigned long)req32.value,
38163 &request->value))
38164 return -EFAULT;
38165
38166@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
38167 #define compat_radeon_cp_setparam NULL
38168 #endif /* X86_64 || IA64 */
38169
38170-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38171+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
38172 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
38173 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
38174 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
38175@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
38176 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38177 {
38178 unsigned int nr = DRM_IOCTL_NR(cmd);
38179- drm_ioctl_compat_t *fn = NULL;
38180 int ret;
38181
38182 if (nr < DRM_COMMAND_BASE)
38183 return drm_compat_ioctl(filp, cmd, arg);
38184
38185- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
38186- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
38187-
38188- if (fn != NULL)
38189+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
38190+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
38191 ret = (*fn) (filp, cmd, arg);
38192- else
38193+ } else
38194 ret = drm_ioctl(filp, cmd, arg);
38195
38196 return ret;
38197diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
38198index 8d68e97..9dcfed8 100644
38199--- a/drivers/gpu/drm/radeon/radeon_irq.c
38200+++ b/drivers/gpu/drm/radeon/radeon_irq.c
38201@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
38202 unsigned int ret;
38203 RING_LOCALS;
38204
38205- atomic_inc(&dev_priv->swi_emitted);
38206- ret = atomic_read(&dev_priv->swi_emitted);
38207+ atomic_inc_unchecked(&dev_priv->swi_emitted);
38208+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
38209
38210 BEGIN_RING(4);
38211 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
38212@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
38213 drm_radeon_private_t *dev_priv =
38214 (drm_radeon_private_t *) dev->dev_private;
38215
38216- atomic_set(&dev_priv->swi_emitted, 0);
38217+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
38218 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
38219
38220 dev->max_vblank_count = 0x001fffff;
38221diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
38222index 4d20910..6726b6d 100644
38223--- a/drivers/gpu/drm/radeon/radeon_state.c
38224+++ b/drivers/gpu/drm/radeon/radeon_state.c
38225@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
38226 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
38227 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
38228
38229- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
38230+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
38231 sarea_priv->nbox * sizeof(depth_boxes[0])))
38232 return -EFAULT;
38233
38234@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
38235 {
38236 drm_radeon_private_t *dev_priv = dev->dev_private;
38237 drm_radeon_getparam_t *param = data;
38238- int value;
38239+ int value = 0;
38240
38241 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
38242
38243diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
38244index 6c0ce89..57a2529 100644
38245--- a/drivers/gpu/drm/radeon/radeon_ttm.c
38246+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
38247@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
38248 man->size = size >> PAGE_SHIFT;
38249 }
38250
38251-static struct vm_operations_struct radeon_ttm_vm_ops;
38252+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
38253 static const struct vm_operations_struct *ttm_vm_ops = NULL;
38254
38255 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38256@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
38257 }
38258 if (unlikely(ttm_vm_ops == NULL)) {
38259 ttm_vm_ops = vma->vm_ops;
38260+ pax_open_kernel();
38261 radeon_ttm_vm_ops = *ttm_vm_ops;
38262 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
38263+ pax_close_kernel();
38264 }
38265 vma->vm_ops = &radeon_ttm_vm_ops;
38266 return 0;
38267@@ -853,38 +855,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
38268 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
38269 {
38270 #if defined(CONFIG_DEBUG_FS)
38271- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
38272- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
38273+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
38274+ {
38275+ .name = "radeon_vram_mm",
38276+ .show = &radeon_mm_dump_table,
38277+ },
38278+ {
38279+ .name = "radeon_gtt_mm",
38280+ .show = &radeon_mm_dump_table,
38281+ },
38282+ {
38283+ .name = "ttm_page_pool",
38284+ .show = &ttm_page_alloc_debugfs,
38285+ },
38286+ {
38287+ .name = "ttm_dma_page_pool",
38288+ .show = &ttm_dma_page_alloc_debugfs,
38289+ },
38290+ };
38291 unsigned i;
38292
38293- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
38294- if (i == 0)
38295- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
38296- else
38297- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
38298- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
38299- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
38300- radeon_mem_types_list[i].driver_features = 0;
38301- if (i == 0)
38302- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
38303- else
38304- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
38305-
38306- }
38307- /* Add ttm page pool to debugfs */
38308- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
38309- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
38310- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
38311- radeon_mem_types_list[i].driver_features = 0;
38312- radeon_mem_types_list[i++].data = NULL;
38313+ pax_open_kernel();
38314+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
38315+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
38316+ pax_close_kernel();
38317 #ifdef CONFIG_SWIOTLB
38318- if (swiotlb_nr_tbl()) {
38319- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
38320- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
38321- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
38322- radeon_mem_types_list[i].driver_features = 0;
38323- radeon_mem_types_list[i++].data = NULL;
38324- }
38325+ if (swiotlb_nr_tbl())
38326+ i++;
38327 #endif
38328 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
38329
38330diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
38331index 55880d5..9e95342 100644
38332--- a/drivers/gpu/drm/radeon/rs690.c
38333+++ b/drivers/gpu/drm/radeon/rs690.c
38334@@ -327,9 +327,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
38335 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
38336 rdev->pm.sideport_bandwidth.full)
38337 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
38338- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
38339+ read_delay_latency.full = dfixed_const(800 * 1000);
38340 read_delay_latency.full = dfixed_div(read_delay_latency,
38341 rdev->pm.igp_sideport_mclk);
38342+ a.full = dfixed_const(370);
38343+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
38344 } else {
38345 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
38346 rdev->pm.k8_bandwidth.full)
38347diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
38348index dbc2def..0a9f710 100644
38349--- a/drivers/gpu/drm/ttm/ttm_memory.c
38350+++ b/drivers/gpu/drm/ttm/ttm_memory.c
38351@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
38352 zone->glob = glob;
38353 glob->zone_kernel = zone;
38354 ret = kobject_init_and_add(
38355- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
38356+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
38357 if (unlikely(ret != 0)) {
38358 kobject_put(&zone->kobj);
38359 return ret;
38360@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
38361 zone->glob = glob;
38362 glob->zone_dma32 = zone;
38363 ret = kobject_init_and_add(
38364- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
38365+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
38366 if (unlikely(ret != 0)) {
38367 kobject_put(&zone->kobj);
38368 return ret;
38369diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
38370index bd2a3b4..122d9ad 100644
38371--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
38372+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
38373@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
38374 static int ttm_pool_mm_shrink(struct shrinker *shrink,
38375 struct shrink_control *sc)
38376 {
38377- static atomic_t start_pool = ATOMIC_INIT(0);
38378+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
38379 unsigned i;
38380- unsigned pool_offset = atomic_add_return(1, &start_pool);
38381+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
38382 struct ttm_page_pool *pool;
38383 int shrink_pages = sc->nr_to_scan;
38384
38385diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
38386index dc0c065..58a0782 100644
38387--- a/drivers/gpu/drm/udl/udl_fb.c
38388+++ b/drivers/gpu/drm/udl/udl_fb.c
38389@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
38390 fb_deferred_io_cleanup(info);
38391 kfree(info->fbdefio);
38392 info->fbdefio = NULL;
38393- info->fbops->fb_mmap = udl_fb_mmap;
38394 }
38395
38396 pr_warn("released /dev/fb%d user=%d count=%d\n",
38397diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
38398index 893a650..6190d3b 100644
38399--- a/drivers/gpu/drm/via/via_drv.h
38400+++ b/drivers/gpu/drm/via/via_drv.h
38401@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
38402 typedef uint32_t maskarray_t[5];
38403
38404 typedef struct drm_via_irq {
38405- atomic_t irq_received;
38406+ atomic_unchecked_t irq_received;
38407 uint32_t pending_mask;
38408 uint32_t enable_mask;
38409 wait_queue_head_t irq_queue;
38410@@ -75,7 +75,7 @@ typedef struct drm_via_private {
38411 struct timeval last_vblank;
38412 int last_vblank_valid;
38413 unsigned usec_per_vblank;
38414- atomic_t vbl_received;
38415+ atomic_unchecked_t vbl_received;
38416 drm_via_state_t hc_state;
38417 char pci_buf[VIA_PCI_BUF_SIZE];
38418 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
38419diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
38420index ac98964..5dbf512 100644
38421--- a/drivers/gpu/drm/via/via_irq.c
38422+++ b/drivers/gpu/drm/via/via_irq.c
38423@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
38424 if (crtc != 0)
38425 return 0;
38426
38427- return atomic_read(&dev_priv->vbl_received);
38428+ return atomic_read_unchecked(&dev_priv->vbl_received);
38429 }
38430
38431 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
38432@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
38433
38434 status = VIA_READ(VIA_REG_INTERRUPT);
38435 if (status & VIA_IRQ_VBLANK_PENDING) {
38436- atomic_inc(&dev_priv->vbl_received);
38437- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
38438+ atomic_inc_unchecked(&dev_priv->vbl_received);
38439+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
38440 do_gettimeofday(&cur_vblank);
38441 if (dev_priv->last_vblank_valid) {
38442 dev_priv->usec_per_vblank =
38443@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
38444 dev_priv->last_vblank = cur_vblank;
38445 dev_priv->last_vblank_valid = 1;
38446 }
38447- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
38448+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
38449 DRM_DEBUG("US per vblank is: %u\n",
38450 dev_priv->usec_per_vblank);
38451 }
38452@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
38453
38454 for (i = 0; i < dev_priv->num_irqs; ++i) {
38455 if (status & cur_irq->pending_mask) {
38456- atomic_inc(&cur_irq->irq_received);
38457+ atomic_inc_unchecked(&cur_irq->irq_received);
38458 DRM_WAKEUP(&cur_irq->irq_queue);
38459 handled = 1;
38460 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
38461@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
38462 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
38463 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
38464 masks[irq][4]));
38465- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
38466+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
38467 } else {
38468 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
38469 (((cur_irq_sequence =
38470- atomic_read(&cur_irq->irq_received)) -
38471+ atomic_read_unchecked(&cur_irq->irq_received)) -
38472 *sequence) <= (1 << 23)));
38473 }
38474 *sequence = cur_irq_sequence;
38475@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
38476 }
38477
38478 for (i = 0; i < dev_priv->num_irqs; ++i) {
38479- atomic_set(&cur_irq->irq_received, 0);
38480+ atomic_set_unchecked(&cur_irq->irq_received, 0);
38481 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
38482 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
38483 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
38484@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
38485 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
38486 case VIA_IRQ_RELATIVE:
38487 irqwait->request.sequence +=
38488- atomic_read(&cur_irq->irq_received);
38489+ atomic_read_unchecked(&cur_irq->irq_received);
38490 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
38491 case VIA_IRQ_ABSOLUTE:
38492 break;
38493diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
38494index 13aeda7..4a952d1 100644
38495--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
38496+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
38497@@ -290,7 +290,7 @@ struct vmw_private {
38498 * Fencing and IRQs.
38499 */
38500
38501- atomic_t marker_seq;
38502+ atomic_unchecked_t marker_seq;
38503 wait_queue_head_t fence_queue;
38504 wait_queue_head_t fifo_queue;
38505 int fence_queue_waiters; /* Protected by hw_mutex */
38506diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
38507index 3eb1486..0a47ee9 100644
38508--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
38509+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
38510@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
38511 (unsigned int) min,
38512 (unsigned int) fifo->capabilities);
38513
38514- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
38515+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
38516 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
38517 vmw_marker_queue_init(&fifo->marker_queue);
38518 return vmw_fifo_send_fence(dev_priv, &dummy);
38519@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
38520 if (reserveable)
38521 iowrite32(bytes, fifo_mem +
38522 SVGA_FIFO_RESERVED);
38523- return fifo_mem + (next_cmd >> 2);
38524+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
38525 } else {
38526 need_bounce = true;
38527 }
38528@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
38529
38530 fm = vmw_fifo_reserve(dev_priv, bytes);
38531 if (unlikely(fm == NULL)) {
38532- *seqno = atomic_read(&dev_priv->marker_seq);
38533+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
38534 ret = -ENOMEM;
38535 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
38536 false, 3*HZ);
38537@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
38538 }
38539
38540 do {
38541- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
38542+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
38543 } while (*seqno == 0);
38544
38545 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
38546diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
38547index c509d40..3b640c3 100644
38548--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
38549+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
38550@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
38551 int ret;
38552
38553 num_clips = arg->num_clips;
38554- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
38555+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
38556
38557 if (unlikely(num_clips == 0))
38558 return 0;
38559@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
38560 int ret;
38561
38562 num_clips = arg->num_clips;
38563- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
38564+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
38565
38566 if (unlikely(num_clips == 0))
38567 return 0;
38568diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
38569index 4640adb..e1384ed 100644
38570--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
38571+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
38572@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
38573 * emitted. Then the fence is stale and signaled.
38574 */
38575
38576- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
38577+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
38578 > VMW_FENCE_WRAP);
38579
38580 return ret;
38581@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
38582
38583 if (fifo_idle)
38584 down_read(&fifo_state->rwsem);
38585- signal_seq = atomic_read(&dev_priv->marker_seq);
38586+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
38587 ret = 0;
38588
38589 for (;;) {
38590diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
38591index 8a8725c2..afed796 100644
38592--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
38593+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
38594@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
38595 while (!vmw_lag_lt(queue, us)) {
38596 spin_lock(&queue->lock);
38597 if (list_empty(&queue->head))
38598- seqno = atomic_read(&dev_priv->marker_seq);
38599+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
38600 else {
38601 marker = list_first_entry(&queue->head,
38602 struct vmw_marker, head);
38603diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
38604index 8c04943..4370ed9 100644
38605--- a/drivers/gpu/host1x/drm/dc.c
38606+++ b/drivers/gpu/host1x/drm/dc.c
38607@@ -999,7 +999,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
38608 }
38609
38610 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
38611- dc->debugfs_files[i].data = dc;
38612+ *(void **)&dc->debugfs_files[i].data = dc;
38613
38614 err = drm_debugfs_create_files(dc->debugfs_files,
38615 ARRAY_SIZE(debugfs_files),
38616diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
38617index 402f486..f862d7e 100644
38618--- a/drivers/hid/hid-core.c
38619+++ b/drivers/hid/hid-core.c
38620@@ -2275,7 +2275,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
38621
38622 int hid_add_device(struct hid_device *hdev)
38623 {
38624- static atomic_t id = ATOMIC_INIT(0);
38625+ static atomic_unchecked_t id = ATOMIC_INIT(0);
38626 int ret;
38627
38628 if (WARN_ON(hdev->status & HID_STAT_ADDED))
38629@@ -2309,7 +2309,7 @@ int hid_add_device(struct hid_device *hdev)
38630 /* XXX hack, any other cleaner solution after the driver core
38631 * is converted to allow more than 20 bytes as the device name? */
38632 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
38633- hdev->vendor, hdev->product, atomic_inc_return(&id));
38634+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
38635
38636 hid_debug_register(hdev, dev_name(&hdev->dev));
38637 ret = device_add(&hdev->dev);
38638diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
38639index 90124ff..3761764 100644
38640--- a/drivers/hid/hid-wiimote-debug.c
38641+++ b/drivers/hid/hid-wiimote-debug.c
38642@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
38643 else if (size == 0)
38644 return -EIO;
38645
38646- if (copy_to_user(u, buf, size))
38647+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
38648 return -EFAULT;
38649
38650 *off += size;
38651diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
38652index 0b122f8..b1d8160 100644
38653--- a/drivers/hv/channel.c
38654+++ b/drivers/hv/channel.c
38655@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
38656 int ret = 0;
38657 int t;
38658
38659- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
38660- atomic_inc(&vmbus_connection.next_gpadl_handle);
38661+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
38662+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
38663
38664 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
38665 if (ret)
38666diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
38667index ae49237..380d4c9 100644
38668--- a/drivers/hv/hv.c
38669+++ b/drivers/hv/hv.c
38670@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
38671 u64 output_address = (output) ? virt_to_phys(output) : 0;
38672 u32 output_address_hi = output_address >> 32;
38673 u32 output_address_lo = output_address & 0xFFFFFFFF;
38674- void *hypercall_page = hv_context.hypercall_page;
38675+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
38676
38677 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
38678 "=a"(hv_status_lo) : "d" (control_hi),
38679diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
38680index 12f2f9e..679603c 100644
38681--- a/drivers/hv/hyperv_vmbus.h
38682+++ b/drivers/hv/hyperv_vmbus.h
38683@@ -591,7 +591,7 @@ enum vmbus_connect_state {
38684 struct vmbus_connection {
38685 enum vmbus_connect_state conn_state;
38686
38687- atomic_t next_gpadl_handle;
38688+ atomic_unchecked_t next_gpadl_handle;
38689
38690 /*
38691 * Represents channel interrupts. Each bit position represents a
38692diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
38693index 4004e54..c2de226 100644
38694--- a/drivers/hv/vmbus_drv.c
38695+++ b/drivers/hv/vmbus_drv.c
38696@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
38697 {
38698 int ret = 0;
38699
38700- static atomic_t device_num = ATOMIC_INIT(0);
38701+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
38702
38703 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
38704- atomic_inc_return(&device_num));
38705+ atomic_inc_return_unchecked(&device_num));
38706
38707 child_device_obj->device.bus = &hv_bus;
38708 child_device_obj->device.parent = &hv_acpi_dev->dev;
38709diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
38710index 6351aba..dc4aaf4 100644
38711--- a/drivers/hwmon/acpi_power_meter.c
38712+++ b/drivers/hwmon/acpi_power_meter.c
38713@@ -117,7 +117,7 @@ struct sensor_template {
38714 struct device_attribute *devattr,
38715 const char *buf, size_t count);
38716 int index;
38717-};
38718+} __do_const;
38719
38720 /* Averaging interval */
38721 static int update_avg_interval(struct acpi_power_meter_resource *resource)
38722@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
38723 struct sensor_template *attrs)
38724 {
38725 struct device *dev = &resource->acpi_dev->dev;
38726- struct sensor_device_attribute *sensors =
38727+ sensor_device_attribute_no_const *sensors =
38728 &resource->sensors[resource->num_sensors];
38729 int res = 0;
38730
38731diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
38732index 62c2e32..8f2859a 100644
38733--- a/drivers/hwmon/applesmc.c
38734+++ b/drivers/hwmon/applesmc.c
38735@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
38736 {
38737 struct applesmc_node_group *grp;
38738 struct applesmc_dev_attr *node;
38739- struct attribute *attr;
38740+ attribute_no_const *attr;
38741 int ret, i;
38742
38743 for (grp = groups; grp->format; grp++) {
38744diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
38745index b25c643..a13460d 100644
38746--- a/drivers/hwmon/asus_atk0110.c
38747+++ b/drivers/hwmon/asus_atk0110.c
38748@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
38749 struct atk_sensor_data {
38750 struct list_head list;
38751 struct atk_data *data;
38752- struct device_attribute label_attr;
38753- struct device_attribute input_attr;
38754- struct device_attribute limit1_attr;
38755- struct device_attribute limit2_attr;
38756+ device_attribute_no_const label_attr;
38757+ device_attribute_no_const input_attr;
38758+ device_attribute_no_const limit1_attr;
38759+ device_attribute_no_const limit2_attr;
38760 char label_attr_name[ATTR_NAME_SIZE];
38761 char input_attr_name[ATTR_NAME_SIZE];
38762 char limit1_attr_name[ATTR_NAME_SIZE];
38763@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
38764 static struct device_attribute atk_name_attr =
38765 __ATTR(name, 0444, atk_name_show, NULL);
38766
38767-static void atk_init_attribute(struct device_attribute *attr, char *name,
38768+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
38769 sysfs_show_func show)
38770 {
38771 sysfs_attr_init(&attr->attr);
38772diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
38773index 658ce3a..0d0c2f3 100644
38774--- a/drivers/hwmon/coretemp.c
38775+++ b/drivers/hwmon/coretemp.c
38776@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
38777 return NOTIFY_OK;
38778 }
38779
38780-static struct notifier_block coretemp_cpu_notifier __refdata = {
38781+static struct notifier_block coretemp_cpu_notifier = {
38782 .notifier_call = coretemp_cpu_callback,
38783 };
38784
38785diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
38786index 1429f6e..ee03d59 100644
38787--- a/drivers/hwmon/ibmaem.c
38788+++ b/drivers/hwmon/ibmaem.c
38789@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
38790 struct aem_rw_sensor_template *rw)
38791 {
38792 struct device *dev = &data->pdev->dev;
38793- struct sensor_device_attribute *sensors = data->sensors;
38794+ sensor_device_attribute_no_const *sensors = data->sensors;
38795 int err;
38796
38797 /* Set up read-only sensors */
38798diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
38799index 52b77af..aed1ddf 100644
38800--- a/drivers/hwmon/iio_hwmon.c
38801+++ b/drivers/hwmon/iio_hwmon.c
38802@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
38803 {
38804 struct device *dev = &pdev->dev;
38805 struct iio_hwmon_state *st;
38806- struct sensor_device_attribute *a;
38807+ sensor_device_attribute_no_const *a;
38808 int ret, i;
38809 int in_i = 1, temp_i = 1, curr_i = 1;
38810 enum iio_chan_type type;
38811diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
38812index 9add6092..ee7ba3f 100644
38813--- a/drivers/hwmon/pmbus/pmbus_core.c
38814+++ b/drivers/hwmon/pmbus/pmbus_core.c
38815@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
38816 return 0;
38817 }
38818
38819-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
38820+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
38821 const char *name,
38822 umode_t mode,
38823 ssize_t (*show)(struct device *dev,
38824@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
38825 dev_attr->store = store;
38826 }
38827
38828-static void pmbus_attr_init(struct sensor_device_attribute *a,
38829+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
38830 const char *name,
38831 umode_t mode,
38832 ssize_t (*show)(struct device *dev,
38833@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
38834 u16 reg, u8 mask)
38835 {
38836 struct pmbus_boolean *boolean;
38837- struct sensor_device_attribute *a;
38838+ sensor_device_attribute_no_const *a;
38839
38840 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
38841 if (!boolean)
38842@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
38843 bool update, bool readonly)
38844 {
38845 struct pmbus_sensor *sensor;
38846- struct device_attribute *a;
38847+ device_attribute_no_const *a;
38848
38849 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
38850 if (!sensor)
38851@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
38852 const char *lstring, int index)
38853 {
38854 struct pmbus_label *label;
38855- struct device_attribute *a;
38856+ device_attribute_no_const *a;
38857
38858 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
38859 if (!label)
38860diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
38861index 2507f90..1645765 100644
38862--- a/drivers/hwmon/sht15.c
38863+++ b/drivers/hwmon/sht15.c
38864@@ -169,7 +169,7 @@ struct sht15_data {
38865 int supply_uv;
38866 bool supply_uv_valid;
38867 struct work_struct update_supply_work;
38868- atomic_t interrupt_handled;
38869+ atomic_unchecked_t interrupt_handled;
38870 };
38871
38872 /**
38873@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
38874 ret = gpio_direction_input(data->pdata->gpio_data);
38875 if (ret)
38876 return ret;
38877- atomic_set(&data->interrupt_handled, 0);
38878+ atomic_set_unchecked(&data->interrupt_handled, 0);
38879
38880 enable_irq(gpio_to_irq(data->pdata->gpio_data));
38881 if (gpio_get_value(data->pdata->gpio_data) == 0) {
38882 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
38883 /* Only relevant if the interrupt hasn't occurred. */
38884- if (!atomic_read(&data->interrupt_handled))
38885+ if (!atomic_read_unchecked(&data->interrupt_handled))
38886 schedule_work(&data->read_work);
38887 }
38888 ret = wait_event_timeout(data->wait_queue,
38889@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
38890
38891 /* First disable the interrupt */
38892 disable_irq_nosync(irq);
38893- atomic_inc(&data->interrupt_handled);
38894+ atomic_inc_unchecked(&data->interrupt_handled);
38895 /* Then schedule a reading work struct */
38896 if (data->state != SHT15_READING_NOTHING)
38897 schedule_work(&data->read_work);
38898@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
38899 * If not, then start the interrupt again - care here as could
38900 * have gone low in meantime so verify it hasn't!
38901 */
38902- atomic_set(&data->interrupt_handled, 0);
38903+ atomic_set_unchecked(&data->interrupt_handled, 0);
38904 enable_irq(gpio_to_irq(data->pdata->gpio_data));
38905 /* If still not occurred or another handler was scheduled */
38906 if (gpio_get_value(data->pdata->gpio_data)
38907- || atomic_read(&data->interrupt_handled))
38908+ || atomic_read_unchecked(&data->interrupt_handled))
38909 return;
38910 }
38911
38912diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
38913index 76f157b..9c0db1b 100644
38914--- a/drivers/hwmon/via-cputemp.c
38915+++ b/drivers/hwmon/via-cputemp.c
38916@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
38917 return NOTIFY_OK;
38918 }
38919
38920-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
38921+static struct notifier_block via_cputemp_cpu_notifier = {
38922 .notifier_call = via_cputemp_cpu_callback,
38923 };
38924
38925diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
38926index 07f01ac..d79ad3d 100644
38927--- a/drivers/i2c/busses/i2c-amd756-s4882.c
38928+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
38929@@ -43,7 +43,7 @@
38930 extern struct i2c_adapter amd756_smbus;
38931
38932 static struct i2c_adapter *s4882_adapter;
38933-static struct i2c_algorithm *s4882_algo;
38934+static i2c_algorithm_no_const *s4882_algo;
38935
38936 /* Wrapper access functions for multiplexed SMBus */
38937 static DEFINE_MUTEX(amd756_lock);
38938diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
38939index 2ca268d..c6acbdf 100644
38940--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
38941+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
38942@@ -41,7 +41,7 @@
38943 extern struct i2c_adapter *nforce2_smbus;
38944
38945 static struct i2c_adapter *s4985_adapter;
38946-static struct i2c_algorithm *s4985_algo;
38947+static i2c_algorithm_no_const *s4985_algo;
38948
38949 /* Wrapper access functions for multiplexed SMBus */
38950 static DEFINE_MUTEX(nforce2_lock);
38951diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
38952index c3ccdea..5b3dc1a 100644
38953--- a/drivers/i2c/i2c-dev.c
38954+++ b/drivers/i2c/i2c-dev.c
38955@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
38956 break;
38957 }
38958
38959- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
38960+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
38961 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
38962 if (IS_ERR(rdwr_pa[i].buf)) {
38963 res = PTR_ERR(rdwr_pa[i].buf);
38964diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
38965index 2ff6204..218c16e 100644
38966--- a/drivers/ide/ide-cd.c
38967+++ b/drivers/ide/ide-cd.c
38968@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
38969 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
38970 if ((unsigned long)buf & alignment
38971 || blk_rq_bytes(rq) & q->dma_pad_mask
38972- || object_is_on_stack(buf))
38973+ || object_starts_on_stack(buf))
38974 drive->dma = 0;
38975 }
38976 }
38977diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
38978index e145931..08bfc59 100644
38979--- a/drivers/iio/industrialio-core.c
38980+++ b/drivers/iio/industrialio-core.c
38981@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
38982 }
38983
38984 static
38985-int __iio_device_attr_init(struct device_attribute *dev_attr,
38986+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
38987 const char *postfix,
38988 struct iio_chan_spec const *chan,
38989 ssize_t (*readfunc)(struct device *dev,
38990diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
38991index 784b97c..c9ceadf 100644
38992--- a/drivers/infiniband/core/cm.c
38993+++ b/drivers/infiniband/core/cm.c
38994@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
38995
38996 struct cm_counter_group {
38997 struct kobject obj;
38998- atomic_long_t counter[CM_ATTR_COUNT];
38999+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
39000 };
39001
39002 struct cm_counter_attribute {
39003@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
39004 struct ib_mad_send_buf *msg = NULL;
39005 int ret;
39006
39007- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39008+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39009 counter[CM_REQ_COUNTER]);
39010
39011 /* Quick state check to discard duplicate REQs. */
39012@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
39013 if (!cm_id_priv)
39014 return;
39015
39016- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39017+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39018 counter[CM_REP_COUNTER]);
39019 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
39020 if (ret)
39021@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
39022 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
39023 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
39024 spin_unlock_irq(&cm_id_priv->lock);
39025- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39026+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39027 counter[CM_RTU_COUNTER]);
39028 goto out;
39029 }
39030@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
39031 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
39032 dreq_msg->local_comm_id);
39033 if (!cm_id_priv) {
39034- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39035+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39036 counter[CM_DREQ_COUNTER]);
39037 cm_issue_drep(work->port, work->mad_recv_wc);
39038 return -EINVAL;
39039@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
39040 case IB_CM_MRA_REP_RCVD:
39041 break;
39042 case IB_CM_TIMEWAIT:
39043- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39044+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39045 counter[CM_DREQ_COUNTER]);
39046 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
39047 goto unlock;
39048@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
39049 cm_free_msg(msg);
39050 goto deref;
39051 case IB_CM_DREQ_RCVD:
39052- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39053+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39054 counter[CM_DREQ_COUNTER]);
39055 goto unlock;
39056 default:
39057@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
39058 ib_modify_mad(cm_id_priv->av.port->mad_agent,
39059 cm_id_priv->msg, timeout)) {
39060 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
39061- atomic_long_inc(&work->port->
39062+ atomic_long_inc_unchecked(&work->port->
39063 counter_group[CM_RECV_DUPLICATES].
39064 counter[CM_MRA_COUNTER]);
39065 goto out;
39066@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
39067 break;
39068 case IB_CM_MRA_REQ_RCVD:
39069 case IB_CM_MRA_REP_RCVD:
39070- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39071+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39072 counter[CM_MRA_COUNTER]);
39073 /* fall through */
39074 default:
39075@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
39076 case IB_CM_LAP_IDLE:
39077 break;
39078 case IB_CM_MRA_LAP_SENT:
39079- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39080+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39081 counter[CM_LAP_COUNTER]);
39082 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
39083 goto unlock;
39084@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
39085 cm_free_msg(msg);
39086 goto deref;
39087 case IB_CM_LAP_RCVD:
39088- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39089+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39090 counter[CM_LAP_COUNTER]);
39091 goto unlock;
39092 default:
39093@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
39094 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
39095 if (cur_cm_id_priv) {
39096 spin_unlock_irq(&cm.lock);
39097- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
39098+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
39099 counter[CM_SIDR_REQ_COUNTER]);
39100 goto out; /* Duplicate message. */
39101 }
39102@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
39103 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
39104 msg->retries = 1;
39105
39106- atomic_long_add(1 + msg->retries,
39107+ atomic_long_add_unchecked(1 + msg->retries,
39108 &port->counter_group[CM_XMIT].counter[attr_index]);
39109 if (msg->retries)
39110- atomic_long_add(msg->retries,
39111+ atomic_long_add_unchecked(msg->retries,
39112 &port->counter_group[CM_XMIT_RETRIES].
39113 counter[attr_index]);
39114
39115@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
39116 }
39117
39118 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
39119- atomic_long_inc(&port->counter_group[CM_RECV].
39120+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
39121 counter[attr_id - CM_ATTR_ID_OFFSET]);
39122
39123 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
39124@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
39125 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
39126
39127 return sprintf(buf, "%ld\n",
39128- atomic_long_read(&group->counter[cm_attr->index]));
39129+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
39130 }
39131
39132 static const struct sysfs_ops cm_counter_ops = {
39133diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
39134index 9f5ad7c..588cd84 100644
39135--- a/drivers/infiniband/core/fmr_pool.c
39136+++ b/drivers/infiniband/core/fmr_pool.c
39137@@ -98,8 +98,8 @@ struct ib_fmr_pool {
39138
39139 struct task_struct *thread;
39140
39141- atomic_t req_ser;
39142- atomic_t flush_ser;
39143+ atomic_unchecked_t req_ser;
39144+ atomic_unchecked_t flush_ser;
39145
39146 wait_queue_head_t force_wait;
39147 };
39148@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
39149 struct ib_fmr_pool *pool = pool_ptr;
39150
39151 do {
39152- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
39153+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
39154 ib_fmr_batch_release(pool);
39155
39156- atomic_inc(&pool->flush_ser);
39157+ atomic_inc_unchecked(&pool->flush_ser);
39158 wake_up_interruptible(&pool->force_wait);
39159
39160 if (pool->flush_function)
39161@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
39162 }
39163
39164 set_current_state(TASK_INTERRUPTIBLE);
39165- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
39166+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
39167 !kthread_should_stop())
39168 schedule();
39169 __set_current_state(TASK_RUNNING);
39170@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
39171 pool->dirty_watermark = params->dirty_watermark;
39172 pool->dirty_len = 0;
39173 spin_lock_init(&pool->pool_lock);
39174- atomic_set(&pool->req_ser, 0);
39175- atomic_set(&pool->flush_ser, 0);
39176+ atomic_set_unchecked(&pool->req_ser, 0);
39177+ atomic_set_unchecked(&pool->flush_ser, 0);
39178 init_waitqueue_head(&pool->force_wait);
39179
39180 pool->thread = kthread_run(ib_fmr_cleanup_thread,
39181@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
39182 }
39183 spin_unlock_irq(&pool->pool_lock);
39184
39185- serial = atomic_inc_return(&pool->req_ser);
39186+ serial = atomic_inc_return_unchecked(&pool->req_ser);
39187 wake_up_process(pool->thread);
39188
39189 if (wait_event_interruptible(pool->force_wait,
39190- atomic_read(&pool->flush_ser) - serial >= 0))
39191+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
39192 return -EINTR;
39193
39194 return 0;
39195@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
39196 } else {
39197 list_add_tail(&fmr->list, &pool->dirty_list);
39198 if (++pool->dirty_len >= pool->dirty_watermark) {
39199- atomic_inc(&pool->req_ser);
39200+ atomic_inc_unchecked(&pool->req_ser);
39201 wake_up_process(pool->thread);
39202 }
39203 }
39204diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
39205index 4cb8eb2..146bf60 100644
39206--- a/drivers/infiniband/hw/cxgb4/mem.c
39207+++ b/drivers/infiniband/hw/cxgb4/mem.c
39208@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
39209 int err;
39210 struct fw_ri_tpte tpt;
39211 u32 stag_idx;
39212- static atomic_t key;
39213+ static atomic_unchecked_t key;
39214
39215 if (c4iw_fatal_error(rdev))
39216 return -EIO;
39217@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
39218 if (rdev->stats.stag.cur > rdev->stats.stag.max)
39219 rdev->stats.stag.max = rdev->stats.stag.cur;
39220 mutex_unlock(&rdev->stats.lock);
39221- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
39222+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
39223 }
39224 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
39225 __func__, stag_state, type, pdid, stag_idx);
39226diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
39227index 79b3dbc..96e5fcc 100644
39228--- a/drivers/infiniband/hw/ipath/ipath_rc.c
39229+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
39230@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
39231 struct ib_atomic_eth *ateth;
39232 struct ipath_ack_entry *e;
39233 u64 vaddr;
39234- atomic64_t *maddr;
39235+ atomic64_unchecked_t *maddr;
39236 u64 sdata;
39237 u32 rkey;
39238 u8 next;
39239@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
39240 IB_ACCESS_REMOTE_ATOMIC)))
39241 goto nack_acc_unlck;
39242 /* Perform atomic OP and save result. */
39243- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
39244+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
39245 sdata = be64_to_cpu(ateth->swap_data);
39246 e = &qp->s_ack_queue[qp->r_head_ack_queue];
39247 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
39248- (u64) atomic64_add_return(sdata, maddr) - sdata :
39249+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
39250 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
39251 be64_to_cpu(ateth->compare_data),
39252 sdata);
39253diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
39254index 1f95bba..9530f87 100644
39255--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
39256+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
39257@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
39258 unsigned long flags;
39259 struct ib_wc wc;
39260 u64 sdata;
39261- atomic64_t *maddr;
39262+ atomic64_unchecked_t *maddr;
39263 enum ib_wc_status send_status;
39264
39265 /*
39266@@ -382,11 +382,11 @@ again:
39267 IB_ACCESS_REMOTE_ATOMIC)))
39268 goto acc_err;
39269 /* Perform atomic OP and save result. */
39270- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
39271+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
39272 sdata = wqe->wr.wr.atomic.compare_add;
39273 *(u64 *) sqp->s_sge.sge.vaddr =
39274 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
39275- (u64) atomic64_add_return(sdata, maddr) - sdata :
39276+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
39277 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
39278 sdata, wqe->wr.wr.atomic.swap);
39279 goto send_comp;
39280diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
39281index 9d3e5c1..d9afe4a 100644
39282--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
39283+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
39284@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
39285 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
39286 }
39287
39288-int mthca_QUERY_FW(struct mthca_dev *dev)
39289+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
39290 {
39291 struct mthca_mailbox *mailbox;
39292 u32 *outbox;
39293diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
39294index ed9a989..e0c5871 100644
39295--- a/drivers/infiniband/hw/mthca/mthca_mr.c
39296+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
39297@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
39298 return key;
39299 }
39300
39301-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
39302+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
39303 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
39304 {
39305 struct mthca_mailbox *mailbox;
39306diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
39307index 4291410..d2ab1fb 100644
39308--- a/drivers/infiniband/hw/nes/nes.c
39309+++ b/drivers/infiniband/hw/nes/nes.c
39310@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
39311 LIST_HEAD(nes_adapter_list);
39312 static LIST_HEAD(nes_dev_list);
39313
39314-atomic_t qps_destroyed;
39315+atomic_unchecked_t qps_destroyed;
39316
39317 static unsigned int ee_flsh_adapter;
39318 static unsigned int sysfs_nonidx_addr;
39319@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
39320 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
39321 struct nes_adapter *nesadapter = nesdev->nesadapter;
39322
39323- atomic_inc(&qps_destroyed);
39324+ atomic_inc_unchecked(&qps_destroyed);
39325
39326 /* Free the control structures */
39327
39328diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
39329index 33cc589..3bd6538 100644
39330--- a/drivers/infiniband/hw/nes/nes.h
39331+++ b/drivers/infiniband/hw/nes/nes.h
39332@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
39333 extern unsigned int wqm_quanta;
39334 extern struct list_head nes_adapter_list;
39335
39336-extern atomic_t cm_connects;
39337-extern atomic_t cm_accepts;
39338-extern atomic_t cm_disconnects;
39339-extern atomic_t cm_closes;
39340-extern atomic_t cm_connecteds;
39341-extern atomic_t cm_connect_reqs;
39342-extern atomic_t cm_rejects;
39343-extern atomic_t mod_qp_timouts;
39344-extern atomic_t qps_created;
39345-extern atomic_t qps_destroyed;
39346-extern atomic_t sw_qps_destroyed;
39347+extern atomic_unchecked_t cm_connects;
39348+extern atomic_unchecked_t cm_accepts;
39349+extern atomic_unchecked_t cm_disconnects;
39350+extern atomic_unchecked_t cm_closes;
39351+extern atomic_unchecked_t cm_connecteds;
39352+extern atomic_unchecked_t cm_connect_reqs;
39353+extern atomic_unchecked_t cm_rejects;
39354+extern atomic_unchecked_t mod_qp_timouts;
39355+extern atomic_unchecked_t qps_created;
39356+extern atomic_unchecked_t qps_destroyed;
39357+extern atomic_unchecked_t sw_qps_destroyed;
39358 extern u32 mh_detected;
39359 extern u32 mh_pauses_sent;
39360 extern u32 cm_packets_sent;
39361@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
39362 extern u32 cm_packets_received;
39363 extern u32 cm_packets_dropped;
39364 extern u32 cm_packets_retrans;
39365-extern atomic_t cm_listens_created;
39366-extern atomic_t cm_listens_destroyed;
39367+extern atomic_unchecked_t cm_listens_created;
39368+extern atomic_unchecked_t cm_listens_destroyed;
39369 extern u32 cm_backlog_drops;
39370-extern atomic_t cm_loopbacks;
39371-extern atomic_t cm_nodes_created;
39372-extern atomic_t cm_nodes_destroyed;
39373-extern atomic_t cm_accel_dropped_pkts;
39374-extern atomic_t cm_resets_recvd;
39375-extern atomic_t pau_qps_created;
39376-extern atomic_t pau_qps_destroyed;
39377+extern atomic_unchecked_t cm_loopbacks;
39378+extern atomic_unchecked_t cm_nodes_created;
39379+extern atomic_unchecked_t cm_nodes_destroyed;
39380+extern atomic_unchecked_t cm_accel_dropped_pkts;
39381+extern atomic_unchecked_t cm_resets_recvd;
39382+extern atomic_unchecked_t pau_qps_created;
39383+extern atomic_unchecked_t pau_qps_destroyed;
39384
39385 extern u32 int_mod_timer_init;
39386 extern u32 int_mod_cq_depth_256;
39387diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
39388index 24b9f1a..00fd004 100644
39389--- a/drivers/infiniband/hw/nes/nes_cm.c
39390+++ b/drivers/infiniband/hw/nes/nes_cm.c
39391@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
39392 u32 cm_packets_retrans;
39393 u32 cm_packets_created;
39394 u32 cm_packets_received;
39395-atomic_t cm_listens_created;
39396-atomic_t cm_listens_destroyed;
39397+atomic_unchecked_t cm_listens_created;
39398+atomic_unchecked_t cm_listens_destroyed;
39399 u32 cm_backlog_drops;
39400-atomic_t cm_loopbacks;
39401-atomic_t cm_nodes_created;
39402-atomic_t cm_nodes_destroyed;
39403-atomic_t cm_accel_dropped_pkts;
39404-atomic_t cm_resets_recvd;
39405+atomic_unchecked_t cm_loopbacks;
39406+atomic_unchecked_t cm_nodes_created;
39407+atomic_unchecked_t cm_nodes_destroyed;
39408+atomic_unchecked_t cm_accel_dropped_pkts;
39409+atomic_unchecked_t cm_resets_recvd;
39410
39411 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
39412 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
39413@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
39414
39415 static struct nes_cm_core *g_cm_core;
39416
39417-atomic_t cm_connects;
39418-atomic_t cm_accepts;
39419-atomic_t cm_disconnects;
39420-atomic_t cm_closes;
39421-atomic_t cm_connecteds;
39422-atomic_t cm_connect_reqs;
39423-atomic_t cm_rejects;
39424+atomic_unchecked_t cm_connects;
39425+atomic_unchecked_t cm_accepts;
39426+atomic_unchecked_t cm_disconnects;
39427+atomic_unchecked_t cm_closes;
39428+atomic_unchecked_t cm_connecteds;
39429+atomic_unchecked_t cm_connect_reqs;
39430+atomic_unchecked_t cm_rejects;
39431
39432 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
39433 {
39434@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
39435 kfree(listener);
39436 listener = NULL;
39437 ret = 0;
39438- atomic_inc(&cm_listens_destroyed);
39439+ atomic_inc_unchecked(&cm_listens_destroyed);
39440 } else {
39441 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
39442 }
39443@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
39444 cm_node->rem_mac);
39445
39446 add_hte_node(cm_core, cm_node);
39447- atomic_inc(&cm_nodes_created);
39448+ atomic_inc_unchecked(&cm_nodes_created);
39449
39450 return cm_node;
39451 }
39452@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
39453 }
39454
39455 atomic_dec(&cm_core->node_cnt);
39456- atomic_inc(&cm_nodes_destroyed);
39457+ atomic_inc_unchecked(&cm_nodes_destroyed);
39458 nesqp = cm_node->nesqp;
39459 if (nesqp) {
39460 nesqp->cm_node = NULL;
39461@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
39462
39463 static void drop_packet(struct sk_buff *skb)
39464 {
39465- atomic_inc(&cm_accel_dropped_pkts);
39466+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
39467 dev_kfree_skb_any(skb);
39468 }
39469
39470@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
39471 {
39472
39473 int reset = 0; /* whether to send reset in case of err.. */
39474- atomic_inc(&cm_resets_recvd);
39475+ atomic_inc_unchecked(&cm_resets_recvd);
39476 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
39477 " refcnt=%d\n", cm_node, cm_node->state,
39478 atomic_read(&cm_node->ref_count));
39479@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
39480 rem_ref_cm_node(cm_node->cm_core, cm_node);
39481 return NULL;
39482 }
39483- atomic_inc(&cm_loopbacks);
39484+ atomic_inc_unchecked(&cm_loopbacks);
39485 loopbackremotenode->loopbackpartner = cm_node;
39486 loopbackremotenode->tcp_cntxt.rcv_wscale =
39487 NES_CM_DEFAULT_RCV_WND_SCALE;
39488@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
39489 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
39490 else {
39491 rem_ref_cm_node(cm_core, cm_node);
39492- atomic_inc(&cm_accel_dropped_pkts);
39493+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
39494 dev_kfree_skb_any(skb);
39495 }
39496 break;
39497@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
39498
39499 if ((cm_id) && (cm_id->event_handler)) {
39500 if (issue_disconn) {
39501- atomic_inc(&cm_disconnects);
39502+ atomic_inc_unchecked(&cm_disconnects);
39503 cm_event.event = IW_CM_EVENT_DISCONNECT;
39504 cm_event.status = disconn_status;
39505 cm_event.local_addr = cm_id->local_addr;
39506@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
39507 }
39508
39509 if (issue_close) {
39510- atomic_inc(&cm_closes);
39511+ atomic_inc_unchecked(&cm_closes);
39512 nes_disconnect(nesqp, 1);
39513
39514 cm_id->provider_data = nesqp;
39515@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
39516
39517 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
39518 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
39519- atomic_inc(&cm_accepts);
39520+ atomic_inc_unchecked(&cm_accepts);
39521
39522 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
39523 netdev_refcnt_read(nesvnic->netdev));
39524@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
39525 struct nes_cm_core *cm_core;
39526 u8 *start_buff;
39527
39528- atomic_inc(&cm_rejects);
39529+ atomic_inc_unchecked(&cm_rejects);
39530 cm_node = (struct nes_cm_node *)cm_id->provider_data;
39531 loopback = cm_node->loopbackpartner;
39532 cm_core = cm_node->cm_core;
39533@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
39534 ntohl(cm_id->local_addr.sin_addr.s_addr),
39535 ntohs(cm_id->local_addr.sin_port));
39536
39537- atomic_inc(&cm_connects);
39538+ atomic_inc_unchecked(&cm_connects);
39539 nesqp->active_conn = 1;
39540
39541 /* cache the cm_id in the qp */
39542@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
39543 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
39544 return err;
39545 }
39546- atomic_inc(&cm_listens_created);
39547+ atomic_inc_unchecked(&cm_listens_created);
39548 }
39549
39550 cm_id->add_ref(cm_id);
39551@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
39552
39553 if (nesqp->destroyed)
39554 return;
39555- atomic_inc(&cm_connecteds);
39556+ atomic_inc_unchecked(&cm_connecteds);
39557 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
39558 " local port 0x%04X. jiffies = %lu.\n",
39559 nesqp->hwqp.qp_id,
39560@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
39561
39562 cm_id->add_ref(cm_id);
39563 ret = cm_id->event_handler(cm_id, &cm_event);
39564- atomic_inc(&cm_closes);
39565+ atomic_inc_unchecked(&cm_closes);
39566 cm_event.event = IW_CM_EVENT_CLOSE;
39567 cm_event.status = 0;
39568 cm_event.provider_data = cm_id->provider_data;
39569@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
39570 return;
39571 cm_id = cm_node->cm_id;
39572
39573- atomic_inc(&cm_connect_reqs);
39574+ atomic_inc_unchecked(&cm_connect_reqs);
39575 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
39576 cm_node, cm_id, jiffies);
39577
39578@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
39579 return;
39580 cm_id = cm_node->cm_id;
39581
39582- atomic_inc(&cm_connect_reqs);
39583+ atomic_inc_unchecked(&cm_connect_reqs);
39584 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
39585 cm_node, cm_id, jiffies);
39586
39587diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
39588index 4166452..fc952c3 100644
39589--- a/drivers/infiniband/hw/nes/nes_mgt.c
39590+++ b/drivers/infiniband/hw/nes/nes_mgt.c
39591@@ -40,8 +40,8 @@
39592 #include "nes.h"
39593 #include "nes_mgt.h"
39594
39595-atomic_t pau_qps_created;
39596-atomic_t pau_qps_destroyed;
39597+atomic_unchecked_t pau_qps_created;
39598+atomic_unchecked_t pau_qps_destroyed;
39599
39600 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
39601 {
39602@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
39603 {
39604 struct sk_buff *skb;
39605 unsigned long flags;
39606- atomic_inc(&pau_qps_destroyed);
39607+ atomic_inc_unchecked(&pau_qps_destroyed);
39608
39609 /* Free packets that have not yet been forwarded */
39610 /* Lock is acquired by skb_dequeue when removing the skb */
39611@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
39612 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
39613 skb_queue_head_init(&nesqp->pau_list);
39614 spin_lock_init(&nesqp->pau_lock);
39615- atomic_inc(&pau_qps_created);
39616+ atomic_inc_unchecked(&pau_qps_created);
39617 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
39618 }
39619
39620diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
39621index 49eb511..a774366 100644
39622--- a/drivers/infiniband/hw/nes/nes_nic.c
39623+++ b/drivers/infiniband/hw/nes/nes_nic.c
39624@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
39625 target_stat_values[++index] = mh_detected;
39626 target_stat_values[++index] = mh_pauses_sent;
39627 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
39628- target_stat_values[++index] = atomic_read(&cm_connects);
39629- target_stat_values[++index] = atomic_read(&cm_accepts);
39630- target_stat_values[++index] = atomic_read(&cm_disconnects);
39631- target_stat_values[++index] = atomic_read(&cm_connecteds);
39632- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
39633- target_stat_values[++index] = atomic_read(&cm_rejects);
39634- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
39635- target_stat_values[++index] = atomic_read(&qps_created);
39636- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
39637- target_stat_values[++index] = atomic_read(&qps_destroyed);
39638- target_stat_values[++index] = atomic_read(&cm_closes);
39639+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
39640+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
39641+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
39642+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
39643+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
39644+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
39645+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
39646+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
39647+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
39648+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
39649+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
39650 target_stat_values[++index] = cm_packets_sent;
39651 target_stat_values[++index] = cm_packets_bounced;
39652 target_stat_values[++index] = cm_packets_created;
39653 target_stat_values[++index] = cm_packets_received;
39654 target_stat_values[++index] = cm_packets_dropped;
39655 target_stat_values[++index] = cm_packets_retrans;
39656- target_stat_values[++index] = atomic_read(&cm_listens_created);
39657- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
39658+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
39659+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
39660 target_stat_values[++index] = cm_backlog_drops;
39661- target_stat_values[++index] = atomic_read(&cm_loopbacks);
39662- target_stat_values[++index] = atomic_read(&cm_nodes_created);
39663- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
39664- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
39665- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
39666+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
39667+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
39668+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
39669+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
39670+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
39671 target_stat_values[++index] = nesadapter->free_4kpbl;
39672 target_stat_values[++index] = nesadapter->free_256pbl;
39673 target_stat_values[++index] = int_mod_timer_init;
39674 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
39675 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
39676 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
39677- target_stat_values[++index] = atomic_read(&pau_qps_created);
39678- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
39679+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
39680+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
39681 }
39682
39683 /**
39684diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
39685index 8f67fe2..8960859 100644
39686--- a/drivers/infiniband/hw/nes/nes_verbs.c
39687+++ b/drivers/infiniband/hw/nes/nes_verbs.c
39688@@ -46,9 +46,9 @@
39689
39690 #include <rdma/ib_umem.h>
39691
39692-atomic_t mod_qp_timouts;
39693-atomic_t qps_created;
39694-atomic_t sw_qps_destroyed;
39695+atomic_unchecked_t mod_qp_timouts;
39696+atomic_unchecked_t qps_created;
39697+atomic_unchecked_t sw_qps_destroyed;
39698
39699 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
39700
39701@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
39702 if (init_attr->create_flags)
39703 return ERR_PTR(-EINVAL);
39704
39705- atomic_inc(&qps_created);
39706+ atomic_inc_unchecked(&qps_created);
39707 switch (init_attr->qp_type) {
39708 case IB_QPT_RC:
39709 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
39710@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
39711 struct iw_cm_event cm_event;
39712 int ret = 0;
39713
39714- atomic_inc(&sw_qps_destroyed);
39715+ atomic_inc_unchecked(&sw_qps_destroyed);
39716 nesqp->destroyed = 1;
39717
39718 /* Blow away the connection if it exists. */
39719diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
39720index 4d11575..3e890e5 100644
39721--- a/drivers/infiniband/hw/qib/qib.h
39722+++ b/drivers/infiniband/hw/qib/qib.h
39723@@ -51,6 +51,7 @@
39724 #include <linux/completion.h>
39725 #include <linux/kref.h>
39726 #include <linux/sched.h>
39727+#include <linux/slab.h>
39728
39729 #include "qib_common.h"
39730 #include "qib_verbs.h"
39731diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
39732index da739d9..da1c7f4 100644
39733--- a/drivers/input/gameport/gameport.c
39734+++ b/drivers/input/gameport/gameport.c
39735@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
39736 */
39737 static void gameport_init_port(struct gameport *gameport)
39738 {
39739- static atomic_t gameport_no = ATOMIC_INIT(0);
39740+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
39741
39742 __module_get(THIS_MODULE);
39743
39744 mutex_init(&gameport->drv_mutex);
39745 device_initialize(&gameport->dev);
39746 dev_set_name(&gameport->dev, "gameport%lu",
39747- (unsigned long)atomic_inc_return(&gameport_no) - 1);
39748+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
39749 gameport->dev.bus = &gameport_bus;
39750 gameport->dev.release = gameport_release_port;
39751 if (gameport->parent)
39752diff --git a/drivers/input/input.c b/drivers/input/input.c
39753index c044699..174d71a 100644
39754--- a/drivers/input/input.c
39755+++ b/drivers/input/input.c
39756@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
39757 */
39758 int input_register_device(struct input_dev *dev)
39759 {
39760- static atomic_t input_no = ATOMIC_INIT(0);
39761+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
39762 struct input_devres *devres = NULL;
39763 struct input_handler *handler;
39764 unsigned int packet_size;
39765@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
39766 dev->setkeycode = input_default_setkeycode;
39767
39768 dev_set_name(&dev->dev, "input%ld",
39769- (unsigned long) atomic_inc_return(&input_no) - 1);
39770+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
39771
39772 error = device_add(&dev->dev);
39773 if (error)
39774diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
39775index 04c69af..5f92d00 100644
39776--- a/drivers/input/joystick/sidewinder.c
39777+++ b/drivers/input/joystick/sidewinder.c
39778@@ -30,6 +30,7 @@
39779 #include <linux/kernel.h>
39780 #include <linux/module.h>
39781 #include <linux/slab.h>
39782+#include <linux/sched.h>
39783 #include <linux/init.h>
39784 #include <linux/input.h>
39785 #include <linux/gameport.h>
39786diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
39787index fa061d4..4a6957c 100644
39788--- a/drivers/input/joystick/xpad.c
39789+++ b/drivers/input/joystick/xpad.c
39790@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
39791
39792 static int xpad_led_probe(struct usb_xpad *xpad)
39793 {
39794- static atomic_t led_seq = ATOMIC_INIT(0);
39795+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
39796 long led_no;
39797 struct xpad_led *led;
39798 struct led_classdev *led_cdev;
39799@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
39800 if (!led)
39801 return -ENOMEM;
39802
39803- led_no = (long)atomic_inc_return(&led_seq) - 1;
39804+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
39805
39806 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
39807 led->xpad = xpad;
39808diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
39809index 2f0b39d..7370f13 100644
39810--- a/drivers/input/mouse/psmouse.h
39811+++ b/drivers/input/mouse/psmouse.h
39812@@ -116,7 +116,7 @@ struct psmouse_attribute {
39813 ssize_t (*set)(struct psmouse *psmouse, void *data,
39814 const char *buf, size_t count);
39815 bool protect;
39816-};
39817+} __do_const;
39818 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
39819
39820 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
39821diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
39822index 4c842c3..590b0bf 100644
39823--- a/drivers/input/mousedev.c
39824+++ b/drivers/input/mousedev.c
39825@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
39826
39827 spin_unlock_irq(&client->packet_lock);
39828
39829- if (copy_to_user(buffer, data, count))
39830+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
39831 return -EFAULT;
39832
39833 return count;
39834diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
39835index 25fc597..558bf3b3 100644
39836--- a/drivers/input/serio/serio.c
39837+++ b/drivers/input/serio/serio.c
39838@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
39839 */
39840 static void serio_init_port(struct serio *serio)
39841 {
39842- static atomic_t serio_no = ATOMIC_INIT(0);
39843+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
39844
39845 __module_get(THIS_MODULE);
39846
39847@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
39848 mutex_init(&serio->drv_mutex);
39849 device_initialize(&serio->dev);
39850 dev_set_name(&serio->dev, "serio%ld",
39851- (long)atomic_inc_return(&serio_no) - 1);
39852+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
39853 serio->dev.bus = &serio_bus;
39854 serio->dev.release = serio_release_port;
39855 serio->dev.groups = serio_device_attr_groups;
39856diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
39857index d8f98b1..f62a640 100644
39858--- a/drivers/iommu/iommu.c
39859+++ b/drivers/iommu/iommu.c
39860@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
39861 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
39862 {
39863 bus_register_notifier(bus, &iommu_bus_nb);
39864- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
39865+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
39866 }
39867
39868 /**
39869diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
39870index dcfea4e..f4226b2 100644
39871--- a/drivers/iommu/irq_remapping.c
39872+++ b/drivers/iommu/irq_remapping.c
39873@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
39874 void panic_if_irq_remap(const char *msg)
39875 {
39876 if (irq_remapping_enabled)
39877- panic(msg);
39878+ panic("%s", msg);
39879 }
39880
39881 static void ir_ack_apic_edge(struct irq_data *data)
39882@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
39883
39884 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
39885 {
39886- chip->irq_print_chip = ir_print_prefix;
39887- chip->irq_ack = ir_ack_apic_edge;
39888- chip->irq_eoi = ir_ack_apic_level;
39889- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
39890+ pax_open_kernel();
39891+ *(void **)&chip->irq_print_chip = ir_print_prefix;
39892+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
39893+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
39894+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
39895+ pax_close_kernel();
39896 }
39897
39898 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
39899diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
39900index 19ceaa6..3625818 100644
39901--- a/drivers/irqchip/irq-gic.c
39902+++ b/drivers/irqchip/irq-gic.c
39903@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
39904 * Supported arch specific GIC irq extension.
39905 * Default make them NULL.
39906 */
39907-struct irq_chip gic_arch_extn = {
39908+irq_chip_no_const gic_arch_extn = {
39909 .irq_eoi = NULL,
39910 .irq_mask = NULL,
39911 .irq_unmask = NULL,
39912@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
39913 chained_irq_exit(chip, desc);
39914 }
39915
39916-static struct irq_chip gic_chip = {
39917+static irq_chip_no_const gic_chip __read_only = {
39918 .name = "GIC",
39919 .irq_mask = gic_mask_irq,
39920 .irq_unmask = gic_unmask_irq,
39921diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
39922index ac6f72b..81150f2 100644
39923--- a/drivers/isdn/capi/capi.c
39924+++ b/drivers/isdn/capi/capi.c
39925@@ -81,8 +81,8 @@ struct capiminor {
39926
39927 struct capi20_appl *ap;
39928 u32 ncci;
39929- atomic_t datahandle;
39930- atomic_t msgid;
39931+ atomic_unchecked_t datahandle;
39932+ atomic_unchecked_t msgid;
39933
39934 struct tty_port port;
39935 int ttyinstop;
39936@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
39937 capimsg_setu16(s, 2, mp->ap->applid);
39938 capimsg_setu8 (s, 4, CAPI_DATA_B3);
39939 capimsg_setu8 (s, 5, CAPI_RESP);
39940- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
39941+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
39942 capimsg_setu32(s, 8, mp->ncci);
39943 capimsg_setu16(s, 12, datahandle);
39944 }
39945@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
39946 mp->outbytes -= len;
39947 spin_unlock_bh(&mp->outlock);
39948
39949- datahandle = atomic_inc_return(&mp->datahandle);
39950+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
39951 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
39952 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
39953 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
39954 capimsg_setu16(skb->data, 2, mp->ap->applid);
39955 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
39956 capimsg_setu8 (skb->data, 5, CAPI_REQ);
39957- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
39958+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
39959 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
39960 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
39961 capimsg_setu16(skb->data, 16, len); /* Data length */
39962diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
39963index 600c79b..3752bab 100644
39964--- a/drivers/isdn/gigaset/interface.c
39965+++ b/drivers/isdn/gigaset/interface.c
39966@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
39967 }
39968 tty->driver_data = cs;
39969
39970- ++cs->port.count;
39971+ atomic_inc(&cs->port.count);
39972
39973- if (cs->port.count == 1) {
39974+ if (atomic_read(&cs->port.count) == 1) {
39975 tty_port_tty_set(&cs->port, tty);
39976 cs->port.low_latency = 1;
39977 }
39978@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
39979
39980 if (!cs->connected)
39981 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
39982- else if (!cs->port.count)
39983+ else if (!atomic_read(&cs->port.count))
39984 dev_warn(cs->dev, "%s: device not opened\n", __func__);
39985- else if (!--cs->port.count)
39986+ else if (!atomic_dec_return(&cs->port.count))
39987 tty_port_tty_set(&cs->port, NULL);
39988
39989 mutex_unlock(&cs->mutex);
39990diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
39991index 4d9b195..455075c 100644
39992--- a/drivers/isdn/hardware/avm/b1.c
39993+++ b/drivers/isdn/hardware/avm/b1.c
39994@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
39995 }
39996 if (left) {
39997 if (t4file->user) {
39998- if (copy_from_user(buf, dp, left))
39999+ if (left > sizeof buf || copy_from_user(buf, dp, left))
40000 return -EFAULT;
40001 } else {
40002 memcpy(buf, dp, left);
40003@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
40004 }
40005 if (left) {
40006 if (config->user) {
40007- if (copy_from_user(buf, dp, left))
40008+ if (left > sizeof buf || copy_from_user(buf, dp, left))
40009 return -EFAULT;
40010 } else {
40011 memcpy(buf, dp, left);
40012diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
40013index 3c5f249..5fac4d0 100644
40014--- a/drivers/isdn/i4l/isdn_tty.c
40015+++ b/drivers/isdn/i4l/isdn_tty.c
40016@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
40017
40018 #ifdef ISDN_DEBUG_MODEM_OPEN
40019 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
40020- port->count);
40021+ atomic_read(&port->count));
40022 #endif
40023- port->count++;
40024+ atomic_inc(&port->count);
40025 port->tty = tty;
40026 /*
40027 * Start up serial port
40028@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
40029 #endif
40030 return;
40031 }
40032- if ((tty->count == 1) && (port->count != 1)) {
40033+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
40034 /*
40035 * Uh, oh. tty->count is 1, which means that the tty
40036 * structure will be freed. Info->count should always
40037@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
40038 * serial port won't be shutdown.
40039 */
40040 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
40041- "info->count is %d\n", port->count);
40042- port->count = 1;
40043+ "info->count is %d\n", atomic_read(&port->count));
40044+ atomic_set(&port->count, 1);
40045 }
40046- if (--port->count < 0) {
40047+ if (atomic_dec_return(&port->count) < 0) {
40048 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
40049- info->line, port->count);
40050- port->count = 0;
40051+ info->line, atomic_read(&port->count));
40052+ atomic_set(&port->count, 0);
40053 }
40054- if (port->count) {
40055+ if (atomic_read(&port->count)) {
40056 #ifdef ISDN_DEBUG_MODEM_OPEN
40057 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
40058 #endif
40059@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
40060 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
40061 return;
40062 isdn_tty_shutdown(info);
40063- port->count = 0;
40064+ atomic_set(&port->count, 0);
40065 port->flags &= ~ASYNC_NORMAL_ACTIVE;
40066 port->tty = NULL;
40067 wake_up_interruptible(&port->open_wait);
40068@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
40069 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
40070 modem_info *info = &dev->mdm.info[i];
40071
40072- if (info->port.count == 0)
40073+ if (atomic_read(&info->port.count) == 0)
40074 continue;
40075 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
40076 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
40077diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
40078index e74df7c..03a03ba 100644
40079--- a/drivers/isdn/icn/icn.c
40080+++ b/drivers/isdn/icn/icn.c
40081@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
40082 if (count > len)
40083 count = len;
40084 if (user) {
40085- if (copy_from_user(msg, buf, count))
40086+ if (count > sizeof msg || copy_from_user(msg, buf, count))
40087 return -EFAULT;
40088 } else
40089 memcpy(msg, buf, count);
40090diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
40091index 6a8405d..0bd1c7e 100644
40092--- a/drivers/leds/leds-clevo-mail.c
40093+++ b/drivers/leds/leds-clevo-mail.c
40094@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
40095 * detected as working, but in reality it is not) as low as
40096 * possible.
40097 */
40098-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
40099+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
40100 {
40101 .callback = clevo_mail_led_dmi_callback,
40102 .ident = "Clevo D410J",
40103diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
40104index 64e204e..c6bf189 100644
40105--- a/drivers/leds/leds-ss4200.c
40106+++ b/drivers/leds/leds-ss4200.c
40107@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
40108 * detected as working, but in reality it is not) as low as
40109 * possible.
40110 */
40111-static struct dmi_system_id __initdata nas_led_whitelist[] = {
40112+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
40113 {
40114 .callback = ss4200_led_dmi_callback,
40115 .ident = "Intel SS4200-E",
40116diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
40117index 0bf1e4e..b4bf44e 100644
40118--- a/drivers/lguest/core.c
40119+++ b/drivers/lguest/core.c
40120@@ -97,9 +97,17 @@ static __init int map_switcher(void)
40121 * The end address needs +1 because __get_vm_area allocates an
40122 * extra guard page, so we need space for that.
40123 */
40124+
40125+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
40126+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
40127+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
40128+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
40129+#else
40130 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
40131 VM_ALLOC, switcher_addr, switcher_addr
40132 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
40133+#endif
40134+
40135 if (!switcher_vma) {
40136 err = -ENOMEM;
40137 printk("lguest: could not map switcher pages high\n");
40138@@ -124,7 +132,7 @@ static __init int map_switcher(void)
40139 * Now the Switcher is mapped at the right address, we can't fail!
40140 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
40141 */
40142- memcpy(switcher_vma->addr, start_switcher_text,
40143+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
40144 end_switcher_text - start_switcher_text);
40145
40146 printk(KERN_INFO "lguest: mapped switcher at %p\n",
40147diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
40148index 5b9ac32..2ef4f26 100644
40149--- a/drivers/lguest/page_tables.c
40150+++ b/drivers/lguest/page_tables.c
40151@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
40152 /*:*/
40153
40154 #ifdef CONFIG_X86_PAE
40155-static void release_pmd(pmd_t *spmd)
40156+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
40157 {
40158 /* If the entry's not present, there's nothing to release. */
40159 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
40160diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
40161index f0a3347..f6608b2 100644
40162--- a/drivers/lguest/x86/core.c
40163+++ b/drivers/lguest/x86/core.c
40164@@ -59,7 +59,7 @@ static struct {
40165 /* Offset from where switcher.S was compiled to where we've copied it */
40166 static unsigned long switcher_offset(void)
40167 {
40168- return switcher_addr - (unsigned long)start_switcher_text;
40169+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
40170 }
40171
40172 /* This cpu's struct lguest_pages (after the Switcher text page) */
40173@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
40174 * These copies are pretty cheap, so we do them unconditionally: */
40175 /* Save the current Host top-level page directory.
40176 */
40177+
40178+#ifdef CONFIG_PAX_PER_CPU_PGD
40179+ pages->state.host_cr3 = read_cr3();
40180+#else
40181 pages->state.host_cr3 = __pa(current->mm->pgd);
40182+#endif
40183+
40184 /*
40185 * Set up the Guest's page tables to see this CPU's pages (and no
40186 * other CPU's pages).
40187@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
40188 * compiled-in switcher code and the high-mapped copy we just made.
40189 */
40190 for (i = 0; i < IDT_ENTRIES; i++)
40191- default_idt_entries[i] += switcher_offset();
40192+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
40193
40194 /*
40195 * Set up the Switcher's per-cpu areas.
40196@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
40197 * it will be undisturbed when we switch. To change %cs and jump we
40198 * need this structure to feed to Intel's "lcall" instruction.
40199 */
40200- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
40201+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
40202 lguest_entry.segment = LGUEST_CS;
40203
40204 /*
40205diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
40206index 40634b0..4f5855e 100644
40207--- a/drivers/lguest/x86/switcher_32.S
40208+++ b/drivers/lguest/x86/switcher_32.S
40209@@ -87,6 +87,7 @@
40210 #include <asm/page.h>
40211 #include <asm/segment.h>
40212 #include <asm/lguest.h>
40213+#include <asm/processor-flags.h>
40214
40215 // We mark the start of the code to copy
40216 // It's placed in .text tho it's never run here
40217@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
40218 // Changes type when we load it: damn Intel!
40219 // For after we switch over our page tables
40220 // That entry will be read-only: we'd crash.
40221+
40222+#ifdef CONFIG_PAX_KERNEXEC
40223+ mov %cr0, %edx
40224+ xor $X86_CR0_WP, %edx
40225+ mov %edx, %cr0
40226+#endif
40227+
40228 movl $(GDT_ENTRY_TSS*8), %edx
40229 ltr %dx
40230
40231@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
40232 // Let's clear it again for our return.
40233 // The GDT descriptor of the Host
40234 // Points to the table after two "size" bytes
40235- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
40236+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
40237 // Clear "used" from type field (byte 5, bit 2)
40238- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
40239+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
40240+
40241+#ifdef CONFIG_PAX_KERNEXEC
40242+ mov %cr0, %eax
40243+ xor $X86_CR0_WP, %eax
40244+ mov %eax, %cr0
40245+#endif
40246
40247 // Once our page table's switched, the Guest is live!
40248 // The Host fades as we run this final step.
40249@@ -295,13 +309,12 @@ deliver_to_host:
40250 // I consulted gcc, and it gave
40251 // These instructions, which I gladly credit:
40252 leal (%edx,%ebx,8), %eax
40253- movzwl (%eax),%edx
40254- movl 4(%eax), %eax
40255- xorw %ax, %ax
40256- orl %eax, %edx
40257+ movl 4(%eax), %edx
40258+ movw (%eax), %dx
40259 // Now the address of the handler's in %edx
40260 // We call it now: its "iret" drops us home.
40261- jmp *%edx
40262+ ljmp $__KERNEL_CS, $1f
40263+1: jmp *%edx
40264
40265 // Every interrupt can come to us here
40266 // But we must truly tell each apart.
40267diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
40268index 0003992..854bbce 100644
40269--- a/drivers/md/bcache/closure.h
40270+++ b/drivers/md/bcache/closure.h
40271@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
40272 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
40273 struct workqueue_struct *wq)
40274 {
40275- BUG_ON(object_is_on_stack(cl));
40276+ BUG_ON(object_starts_on_stack(cl));
40277 closure_set_ip(cl);
40278 cl->fn = fn;
40279 cl->wq = wq;
40280diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
40281index 5a2c754..0fa55db 100644
40282--- a/drivers/md/bitmap.c
40283+++ b/drivers/md/bitmap.c
40284@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
40285 chunk_kb ? "KB" : "B");
40286 if (bitmap->storage.file) {
40287 seq_printf(seq, ", file: ");
40288- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
40289+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
40290 }
40291
40292 seq_printf(seq, "\n");
40293diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
40294index 81a79b7..87a0f73 100644
40295--- a/drivers/md/dm-ioctl.c
40296+++ b/drivers/md/dm-ioctl.c
40297@@ -1697,7 +1697,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
40298 cmd == DM_LIST_VERSIONS_CMD)
40299 return 0;
40300
40301- if ((cmd == DM_DEV_CREATE_CMD)) {
40302+ if (cmd == DM_DEV_CREATE_CMD) {
40303 if (!*param->name) {
40304 DMWARN("name not supplied when creating device");
40305 return -EINVAL;
40306diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
40307index 699b5be..eac0a15 100644
40308--- a/drivers/md/dm-raid1.c
40309+++ b/drivers/md/dm-raid1.c
40310@@ -40,7 +40,7 @@ enum dm_raid1_error {
40311
40312 struct mirror {
40313 struct mirror_set *ms;
40314- atomic_t error_count;
40315+ atomic_unchecked_t error_count;
40316 unsigned long error_type;
40317 struct dm_dev *dev;
40318 sector_t offset;
40319@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
40320 struct mirror *m;
40321
40322 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
40323- if (!atomic_read(&m->error_count))
40324+ if (!atomic_read_unchecked(&m->error_count))
40325 return m;
40326
40327 return NULL;
40328@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
40329 * simple way to tell if a device has encountered
40330 * errors.
40331 */
40332- atomic_inc(&m->error_count);
40333+ atomic_inc_unchecked(&m->error_count);
40334
40335 if (test_and_set_bit(error_type, &m->error_type))
40336 return;
40337@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
40338 struct mirror *m = get_default_mirror(ms);
40339
40340 do {
40341- if (likely(!atomic_read(&m->error_count)))
40342+ if (likely(!atomic_read_unchecked(&m->error_count)))
40343 return m;
40344
40345 if (m-- == ms->mirror)
40346@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
40347 {
40348 struct mirror *default_mirror = get_default_mirror(m->ms);
40349
40350- return !atomic_read(&default_mirror->error_count);
40351+ return !atomic_read_unchecked(&default_mirror->error_count);
40352 }
40353
40354 static int mirror_available(struct mirror_set *ms, struct bio *bio)
40355@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
40356 */
40357 if (likely(region_in_sync(ms, region, 1)))
40358 m = choose_mirror(ms, bio->bi_sector);
40359- else if (m && atomic_read(&m->error_count))
40360+ else if (m && atomic_read_unchecked(&m->error_count))
40361 m = NULL;
40362
40363 if (likely(m))
40364@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
40365 }
40366
40367 ms->mirror[mirror].ms = ms;
40368- atomic_set(&(ms->mirror[mirror].error_count), 0);
40369+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
40370 ms->mirror[mirror].error_type = 0;
40371 ms->mirror[mirror].offset = offset;
40372
40373@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
40374 */
40375 static char device_status_char(struct mirror *m)
40376 {
40377- if (!atomic_read(&(m->error_count)))
40378+ if (!atomic_read_unchecked(&(m->error_count)))
40379 return 'A';
40380
40381 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
40382diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
40383index d907ca6..cfb8384 100644
40384--- a/drivers/md/dm-stripe.c
40385+++ b/drivers/md/dm-stripe.c
40386@@ -20,7 +20,7 @@ struct stripe {
40387 struct dm_dev *dev;
40388 sector_t physical_start;
40389
40390- atomic_t error_count;
40391+ atomic_unchecked_t error_count;
40392 };
40393
40394 struct stripe_c {
40395@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
40396 kfree(sc);
40397 return r;
40398 }
40399- atomic_set(&(sc->stripe[i].error_count), 0);
40400+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
40401 }
40402
40403 ti->private = sc;
40404@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
40405 DMEMIT("%d ", sc->stripes);
40406 for (i = 0; i < sc->stripes; i++) {
40407 DMEMIT("%s ", sc->stripe[i].dev->name);
40408- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
40409+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
40410 'D' : 'A';
40411 }
40412 buffer[i] = '\0';
40413@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
40414 */
40415 for (i = 0; i < sc->stripes; i++)
40416 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
40417- atomic_inc(&(sc->stripe[i].error_count));
40418- if (atomic_read(&(sc->stripe[i].error_count)) <
40419+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
40420+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
40421 DM_IO_ERROR_THRESHOLD)
40422 schedule_work(&sc->trigger_event);
40423 }
40424diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
40425index 1ff252a..ee384c1 100644
40426--- a/drivers/md/dm-table.c
40427+++ b/drivers/md/dm-table.c
40428@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
40429 if (!dev_size)
40430 return 0;
40431
40432- if ((start >= dev_size) || (start + len > dev_size)) {
40433+ if ((start >= dev_size) || (len > dev_size - start)) {
40434 DMWARN("%s: %s too small for target: "
40435 "start=%llu, len=%llu, dev_size=%llu",
40436 dm_device_name(ti->table->md), bdevname(bdev, b),
40437diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
40438index 60bce43..9b997d0 100644
40439--- a/drivers/md/dm-thin-metadata.c
40440+++ b/drivers/md/dm-thin-metadata.c
40441@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
40442 {
40443 pmd->info.tm = pmd->tm;
40444 pmd->info.levels = 2;
40445- pmd->info.value_type.context = pmd->data_sm;
40446+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
40447 pmd->info.value_type.size = sizeof(__le64);
40448 pmd->info.value_type.inc = data_block_inc;
40449 pmd->info.value_type.dec = data_block_dec;
40450@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
40451
40452 pmd->bl_info.tm = pmd->tm;
40453 pmd->bl_info.levels = 1;
40454- pmd->bl_info.value_type.context = pmd->data_sm;
40455+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
40456 pmd->bl_info.value_type.size = sizeof(__le64);
40457 pmd->bl_info.value_type.inc = data_block_inc;
40458 pmd->bl_info.value_type.dec = data_block_dec;
40459diff --git a/drivers/md/dm.c b/drivers/md/dm.c
40460index 33f2010..23fb84c 100644
40461--- a/drivers/md/dm.c
40462+++ b/drivers/md/dm.c
40463@@ -169,9 +169,9 @@ struct mapped_device {
40464 /*
40465 * Event handling.
40466 */
40467- atomic_t event_nr;
40468+ atomic_unchecked_t event_nr;
40469 wait_queue_head_t eventq;
40470- atomic_t uevent_seq;
40471+ atomic_unchecked_t uevent_seq;
40472 struct list_head uevent_list;
40473 spinlock_t uevent_lock; /* Protect access to uevent_list */
40474
40475@@ -1884,8 +1884,8 @@ static struct mapped_device *alloc_dev(int minor)
40476 rwlock_init(&md->map_lock);
40477 atomic_set(&md->holders, 1);
40478 atomic_set(&md->open_count, 0);
40479- atomic_set(&md->event_nr, 0);
40480- atomic_set(&md->uevent_seq, 0);
40481+ atomic_set_unchecked(&md->event_nr, 0);
40482+ atomic_set_unchecked(&md->uevent_seq, 0);
40483 INIT_LIST_HEAD(&md->uevent_list);
40484 spin_lock_init(&md->uevent_lock);
40485
40486@@ -2033,7 +2033,7 @@ static void event_callback(void *context)
40487
40488 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
40489
40490- atomic_inc(&md->event_nr);
40491+ atomic_inc_unchecked(&md->event_nr);
40492 wake_up(&md->eventq);
40493 }
40494
40495@@ -2690,18 +2690,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
40496
40497 uint32_t dm_next_uevent_seq(struct mapped_device *md)
40498 {
40499- return atomic_add_return(1, &md->uevent_seq);
40500+ return atomic_add_return_unchecked(1, &md->uevent_seq);
40501 }
40502
40503 uint32_t dm_get_event_nr(struct mapped_device *md)
40504 {
40505- return atomic_read(&md->event_nr);
40506+ return atomic_read_unchecked(&md->event_nr);
40507 }
40508
40509 int dm_wait_event(struct mapped_device *md, int event_nr)
40510 {
40511 return wait_event_interruptible(md->eventq,
40512- (event_nr != atomic_read(&md->event_nr)));
40513+ (event_nr != atomic_read_unchecked(&md->event_nr)));
40514 }
40515
40516 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
40517diff --git a/drivers/md/md.c b/drivers/md/md.c
40518index 51f0345..c77810e 100644
40519--- a/drivers/md/md.c
40520+++ b/drivers/md/md.c
40521@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
40522 * start build, activate spare
40523 */
40524 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
40525-static atomic_t md_event_count;
40526+static atomic_unchecked_t md_event_count;
40527 void md_new_event(struct mddev *mddev)
40528 {
40529- atomic_inc(&md_event_count);
40530+ atomic_inc_unchecked(&md_event_count);
40531 wake_up(&md_event_waiters);
40532 }
40533 EXPORT_SYMBOL_GPL(md_new_event);
40534@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
40535 */
40536 static void md_new_event_inintr(struct mddev *mddev)
40537 {
40538- atomic_inc(&md_event_count);
40539+ atomic_inc_unchecked(&md_event_count);
40540 wake_up(&md_event_waiters);
40541 }
40542
40543@@ -1501,7 +1501,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
40544 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
40545 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
40546 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
40547- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
40548+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
40549
40550 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
40551 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
40552@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
40553 else
40554 sb->resync_offset = cpu_to_le64(0);
40555
40556- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
40557+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
40558
40559 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
40560 sb->size = cpu_to_le64(mddev->dev_sectors);
40561@@ -2750,7 +2750,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
40562 static ssize_t
40563 errors_show(struct md_rdev *rdev, char *page)
40564 {
40565- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
40566+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
40567 }
40568
40569 static ssize_t
40570@@ -2759,7 +2759,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
40571 char *e;
40572 unsigned long n = simple_strtoul(buf, &e, 10);
40573 if (*buf && (*e == 0 || *e == '\n')) {
40574- atomic_set(&rdev->corrected_errors, n);
40575+ atomic_set_unchecked(&rdev->corrected_errors, n);
40576 return len;
40577 }
40578 return -EINVAL;
40579@@ -3207,8 +3207,8 @@ int md_rdev_init(struct md_rdev *rdev)
40580 rdev->sb_loaded = 0;
40581 rdev->bb_page = NULL;
40582 atomic_set(&rdev->nr_pending, 0);
40583- atomic_set(&rdev->read_errors, 0);
40584- atomic_set(&rdev->corrected_errors, 0);
40585+ atomic_set_unchecked(&rdev->read_errors, 0);
40586+ atomic_set_unchecked(&rdev->corrected_errors, 0);
40587
40588 INIT_LIST_HEAD(&rdev->same_set);
40589 init_waitqueue_head(&rdev->blocked_wait);
40590@@ -7009,7 +7009,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
40591
40592 spin_unlock(&pers_lock);
40593 seq_printf(seq, "\n");
40594- seq->poll_event = atomic_read(&md_event_count);
40595+ seq->poll_event = atomic_read_unchecked(&md_event_count);
40596 return 0;
40597 }
40598 if (v == (void*)2) {
40599@@ -7112,7 +7112,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
40600 return error;
40601
40602 seq = file->private_data;
40603- seq->poll_event = atomic_read(&md_event_count);
40604+ seq->poll_event = atomic_read_unchecked(&md_event_count);
40605 return error;
40606 }
40607
40608@@ -7126,7 +7126,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
40609 /* always allow read */
40610 mask = POLLIN | POLLRDNORM;
40611
40612- if (seq->poll_event != atomic_read(&md_event_count))
40613+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
40614 mask |= POLLERR | POLLPRI;
40615 return mask;
40616 }
40617@@ -7170,7 +7170,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
40618 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
40619 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
40620 (int)part_stat_read(&disk->part0, sectors[1]) -
40621- atomic_read(&disk->sync_io);
40622+ atomic_read_unchecked(&disk->sync_io);
40623 /* sync IO will cause sync_io to increase before the disk_stats
40624 * as sync_io is counted when a request starts, and
40625 * disk_stats is counted when it completes.
40626diff --git a/drivers/md/md.h b/drivers/md/md.h
40627index 653f992b6..6af6c40 100644
40628--- a/drivers/md/md.h
40629+++ b/drivers/md/md.h
40630@@ -94,13 +94,13 @@ struct md_rdev {
40631 * only maintained for arrays that
40632 * support hot removal
40633 */
40634- atomic_t read_errors; /* number of consecutive read errors that
40635+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
40636 * we have tried to ignore.
40637 */
40638 struct timespec last_read_error; /* monotonic time since our
40639 * last read error
40640 */
40641- atomic_t corrected_errors; /* number of corrected read errors,
40642+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
40643 * for reporting to userspace and storing
40644 * in superblock.
40645 */
40646@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
40647
40648 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
40649 {
40650- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
40651+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
40652 }
40653
40654 struct md_personality
40655diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
40656index 3e6d115..ffecdeb 100644
40657--- a/drivers/md/persistent-data/dm-space-map.h
40658+++ b/drivers/md/persistent-data/dm-space-map.h
40659@@ -71,6 +71,7 @@ struct dm_space_map {
40660 dm_sm_threshold_fn fn,
40661 void *context);
40662 };
40663+typedef struct dm_space_map __no_const dm_space_map_no_const;
40664
40665 /*----------------------------------------------------------------*/
40666
40667diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
40668index 6f48244..7d29145 100644
40669--- a/drivers/md/raid1.c
40670+++ b/drivers/md/raid1.c
40671@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
40672 if (r1_sync_page_io(rdev, sect, s,
40673 bio->bi_io_vec[idx].bv_page,
40674 READ) != 0)
40675- atomic_add(s, &rdev->corrected_errors);
40676+ atomic_add_unchecked(s, &rdev->corrected_errors);
40677 }
40678 sectors -= s;
40679 sect += s;
40680@@ -2049,7 +2049,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
40681 test_bit(In_sync, &rdev->flags)) {
40682 if (r1_sync_page_io(rdev, sect, s,
40683 conf->tmppage, READ)) {
40684- atomic_add(s, &rdev->corrected_errors);
40685+ atomic_add_unchecked(s, &rdev->corrected_errors);
40686 printk(KERN_INFO
40687 "md/raid1:%s: read error corrected "
40688 "(%d sectors at %llu on %s)\n",
40689diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
40690index 081bb33..3c4b287 100644
40691--- a/drivers/md/raid10.c
40692+++ b/drivers/md/raid10.c
40693@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
40694 /* The write handler will notice the lack of
40695 * R10BIO_Uptodate and record any errors etc
40696 */
40697- atomic_add(r10_bio->sectors,
40698+ atomic_add_unchecked(r10_bio->sectors,
40699 &conf->mirrors[d].rdev->corrected_errors);
40700
40701 /* for reconstruct, we always reschedule after a read.
40702@@ -2298,7 +2298,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
40703 {
40704 struct timespec cur_time_mon;
40705 unsigned long hours_since_last;
40706- unsigned int read_errors = atomic_read(&rdev->read_errors);
40707+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
40708
40709 ktime_get_ts(&cur_time_mon);
40710
40711@@ -2320,9 +2320,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
40712 * overflowing the shift of read_errors by hours_since_last.
40713 */
40714 if (hours_since_last >= 8 * sizeof(read_errors))
40715- atomic_set(&rdev->read_errors, 0);
40716+ atomic_set_unchecked(&rdev->read_errors, 0);
40717 else
40718- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
40719+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
40720 }
40721
40722 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
40723@@ -2376,8 +2376,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
40724 return;
40725
40726 check_decay_read_errors(mddev, rdev);
40727- atomic_inc(&rdev->read_errors);
40728- if (atomic_read(&rdev->read_errors) > max_read_errors) {
40729+ atomic_inc_unchecked(&rdev->read_errors);
40730+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
40731 char b[BDEVNAME_SIZE];
40732 bdevname(rdev->bdev, b);
40733
40734@@ -2385,7 +2385,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
40735 "md/raid10:%s: %s: Raid device exceeded "
40736 "read_error threshold [cur %d:max %d]\n",
40737 mdname(mddev), b,
40738- atomic_read(&rdev->read_errors), max_read_errors);
40739+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
40740 printk(KERN_NOTICE
40741 "md/raid10:%s: %s: Failing raid device\n",
40742 mdname(mddev), b);
40743@@ -2540,7 +2540,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
40744 sect +
40745 choose_data_offset(r10_bio, rdev)),
40746 bdevname(rdev->bdev, b));
40747- atomic_add(s, &rdev->corrected_errors);
40748+ atomic_add_unchecked(s, &rdev->corrected_errors);
40749 }
40750
40751 rdev_dec_pending(rdev, mddev);
40752diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
40753index a35b846..e295c6d 100644
40754--- a/drivers/md/raid5.c
40755+++ b/drivers/md/raid5.c
40756@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
40757 mdname(conf->mddev), STRIPE_SECTORS,
40758 (unsigned long long)s,
40759 bdevname(rdev->bdev, b));
40760- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
40761+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
40762 clear_bit(R5_ReadError, &sh->dev[i].flags);
40763 clear_bit(R5_ReWrite, &sh->dev[i].flags);
40764 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
40765 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
40766
40767- if (atomic_read(&rdev->read_errors))
40768- atomic_set(&rdev->read_errors, 0);
40769+ if (atomic_read_unchecked(&rdev->read_errors))
40770+ atomic_set_unchecked(&rdev->read_errors, 0);
40771 } else {
40772 const char *bdn = bdevname(rdev->bdev, b);
40773 int retry = 0;
40774 int set_bad = 0;
40775
40776 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
40777- atomic_inc(&rdev->read_errors);
40778+ atomic_inc_unchecked(&rdev->read_errors);
40779 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
40780 printk_ratelimited(
40781 KERN_WARNING
40782@@ -1806,7 +1806,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
40783 mdname(conf->mddev),
40784 (unsigned long long)s,
40785 bdn);
40786- } else if (atomic_read(&rdev->read_errors)
40787+ } else if (atomic_read_unchecked(&rdev->read_errors)
40788 > conf->max_nr_stripes)
40789 printk(KERN_WARNING
40790 "md/raid:%s: Too many read errors, failing device %s.\n",
40791diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
40792index 401ef64..836e563 100644
40793--- a/drivers/media/dvb-core/dvbdev.c
40794+++ b/drivers/media/dvb-core/dvbdev.c
40795@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
40796 const struct dvb_device *template, void *priv, int type)
40797 {
40798 struct dvb_device *dvbdev;
40799- struct file_operations *dvbdevfops;
40800+ file_operations_no_const *dvbdevfops;
40801 struct device *clsdev;
40802 int minor;
40803 int id;
40804diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
40805index 9b6c3bb..baeb5c7 100644
40806--- a/drivers/media/dvb-frontends/dib3000.h
40807+++ b/drivers/media/dvb-frontends/dib3000.h
40808@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
40809 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
40810 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
40811 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
40812-};
40813+} __no_const;
40814
40815 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
40816 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
40817diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
40818index c7a9be1..683f6f8 100644
40819--- a/drivers/media/pci/cx88/cx88-video.c
40820+++ b/drivers/media/pci/cx88/cx88-video.c
40821@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
40822
40823 /* ------------------------------------------------------------------ */
40824
40825-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40826-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40827-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40828+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40829+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40830+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
40831
40832 module_param_array(video_nr, int, NULL, 0444);
40833 module_param_array(vbi_nr, int, NULL, 0444);
40834diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
40835index d338b19..aae4f9e 100644
40836--- a/drivers/media/platform/omap/omap_vout.c
40837+++ b/drivers/media/platform/omap/omap_vout.c
40838@@ -63,7 +63,6 @@ enum omap_vout_channels {
40839 OMAP_VIDEO2,
40840 };
40841
40842-static struct videobuf_queue_ops video_vbq_ops;
40843 /* Variables configurable through module params*/
40844 static u32 video1_numbuffers = 3;
40845 static u32 video2_numbuffers = 3;
40846@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
40847 {
40848 struct videobuf_queue *q;
40849 struct omap_vout_device *vout = NULL;
40850+ static struct videobuf_queue_ops video_vbq_ops = {
40851+ .buf_setup = omap_vout_buffer_setup,
40852+ .buf_prepare = omap_vout_buffer_prepare,
40853+ .buf_release = omap_vout_buffer_release,
40854+ .buf_queue = omap_vout_buffer_queue,
40855+ };
40856
40857 vout = video_drvdata(file);
40858 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
40859@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
40860 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
40861
40862 q = &vout->vbq;
40863- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
40864- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
40865- video_vbq_ops.buf_release = omap_vout_buffer_release;
40866- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
40867 spin_lock_init(&vout->vbq_lock);
40868
40869 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
40870diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
40871index 04e6490..2df65bf 100644
40872--- a/drivers/media/platform/s5p-tv/mixer.h
40873+++ b/drivers/media/platform/s5p-tv/mixer.h
40874@@ -156,7 +156,7 @@ struct mxr_layer {
40875 /** layer index (unique identifier) */
40876 int idx;
40877 /** callbacks for layer methods */
40878- struct mxr_layer_ops ops;
40879+ struct mxr_layer_ops *ops;
40880 /** format array */
40881 const struct mxr_format **fmt_array;
40882 /** size of format array */
40883diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
40884index b93a21f..2535195 100644
40885--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
40886+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
40887@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
40888 {
40889 struct mxr_layer *layer;
40890 int ret;
40891- struct mxr_layer_ops ops = {
40892+ static struct mxr_layer_ops ops = {
40893 .release = mxr_graph_layer_release,
40894 .buffer_set = mxr_graph_buffer_set,
40895 .stream_set = mxr_graph_stream_set,
40896diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
40897index b713403..53cb5ad 100644
40898--- a/drivers/media/platform/s5p-tv/mixer_reg.c
40899+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
40900@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
40901 layer->update_buf = next;
40902 }
40903
40904- layer->ops.buffer_set(layer, layer->update_buf);
40905+ layer->ops->buffer_set(layer, layer->update_buf);
40906
40907 if (done && done != layer->shadow_buf)
40908 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
40909diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
40910index ef0efdf..8c78eb6 100644
40911--- a/drivers/media/platform/s5p-tv/mixer_video.c
40912+++ b/drivers/media/platform/s5p-tv/mixer_video.c
40913@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
40914 layer->geo.src.height = layer->geo.src.full_height;
40915
40916 mxr_geometry_dump(mdev, &layer->geo);
40917- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
40918+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
40919 mxr_geometry_dump(mdev, &layer->geo);
40920 }
40921
40922@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
40923 layer->geo.dst.full_width = mbus_fmt.width;
40924 layer->geo.dst.full_height = mbus_fmt.height;
40925 layer->geo.dst.field = mbus_fmt.field;
40926- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
40927+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
40928
40929 mxr_geometry_dump(mdev, &layer->geo);
40930 }
40931@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
40932 /* set source size to highest accepted value */
40933 geo->src.full_width = max(geo->dst.full_width, pix->width);
40934 geo->src.full_height = max(geo->dst.full_height, pix->height);
40935- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
40936+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
40937 mxr_geometry_dump(mdev, &layer->geo);
40938 /* set cropping to total visible screen */
40939 geo->src.width = pix->width;
40940@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
40941 geo->src.x_offset = 0;
40942 geo->src.y_offset = 0;
40943 /* assure consistency of geometry */
40944- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
40945+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
40946 mxr_geometry_dump(mdev, &layer->geo);
40947 /* set full size to lowest possible value */
40948 geo->src.full_width = 0;
40949 geo->src.full_height = 0;
40950- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
40951+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
40952 mxr_geometry_dump(mdev, &layer->geo);
40953
40954 /* returning results */
40955@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
40956 target->width = s->r.width;
40957 target->height = s->r.height;
40958
40959- layer->ops.fix_geometry(layer, stage, s->flags);
40960+ layer->ops->fix_geometry(layer, stage, s->flags);
40961
40962 /* retrieve update selection rectangle */
40963 res.left = target->x_offset;
40964@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
40965 mxr_output_get(mdev);
40966
40967 mxr_layer_update_output(layer);
40968- layer->ops.format_set(layer);
40969+ layer->ops->format_set(layer);
40970 /* enabling layer in hardware */
40971 spin_lock_irqsave(&layer->enq_slock, flags);
40972 layer->state = MXR_LAYER_STREAMING;
40973 spin_unlock_irqrestore(&layer->enq_slock, flags);
40974
40975- layer->ops.stream_set(layer, MXR_ENABLE);
40976+ layer->ops->stream_set(layer, MXR_ENABLE);
40977 mxr_streamer_get(mdev);
40978
40979 return 0;
40980@@ -1030,7 +1030,7 @@ static int stop_streaming(struct vb2_queue *vq)
40981 spin_unlock_irqrestore(&layer->enq_slock, flags);
40982
40983 /* disabling layer in hardware */
40984- layer->ops.stream_set(layer, MXR_DISABLE);
40985+ layer->ops->stream_set(layer, MXR_DISABLE);
40986 /* remove one streamer */
40987 mxr_streamer_put(mdev);
40988 /* allow changes in output configuration */
40989@@ -1069,8 +1069,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
40990
40991 void mxr_layer_release(struct mxr_layer *layer)
40992 {
40993- if (layer->ops.release)
40994- layer->ops.release(layer);
40995+ if (layer->ops->release)
40996+ layer->ops->release(layer);
40997 }
40998
40999 void mxr_base_layer_release(struct mxr_layer *layer)
41000@@ -1096,7 +1096,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
41001
41002 layer->mdev = mdev;
41003 layer->idx = idx;
41004- layer->ops = *ops;
41005+ layer->ops = ops;
41006
41007 spin_lock_init(&layer->enq_slock);
41008 INIT_LIST_HEAD(&layer->enq_list);
41009diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
41010index 3d13a63..da31bf1 100644
41011--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
41012+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
41013@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
41014 {
41015 struct mxr_layer *layer;
41016 int ret;
41017- struct mxr_layer_ops ops = {
41018+ static struct mxr_layer_ops ops = {
41019 .release = mxr_vp_layer_release,
41020 .buffer_set = mxr_vp_buffer_set,
41021 .stream_set = mxr_vp_stream_set,
41022diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
41023index 545c04c..a14bded 100644
41024--- a/drivers/media/radio/radio-cadet.c
41025+++ b/drivers/media/radio/radio-cadet.c
41026@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
41027 unsigned char readbuf[RDS_BUFFER];
41028 int i = 0;
41029
41030+ if (count > RDS_BUFFER)
41031+ return -EFAULT;
41032 mutex_lock(&dev->lock);
41033 if (dev->rdsstat == 0)
41034 cadet_start_rds(dev);
41035@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
41036 while (i < count && dev->rdsin != dev->rdsout)
41037 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
41038
41039- if (i && copy_to_user(data, readbuf, i))
41040+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
41041 i = -EFAULT;
41042 unlock:
41043 mutex_unlock(&dev->lock);
41044diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
41045index 3940bb0..fb3952a 100644
41046--- a/drivers/media/usb/dvb-usb/cxusb.c
41047+++ b/drivers/media/usb/dvb-usb/cxusb.c
41048@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
41049
41050 struct dib0700_adapter_state {
41051 int (*set_param_save) (struct dvb_frontend *);
41052-};
41053+} __no_const;
41054
41055 static int dib7070_set_param_override(struct dvb_frontend *fe)
41056 {
41057diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
41058index 6e237b6..dc25556 100644
41059--- a/drivers/media/usb/dvb-usb/dw2102.c
41060+++ b/drivers/media/usb/dvb-usb/dw2102.c
41061@@ -118,7 +118,7 @@ struct su3000_state {
41062
41063 struct s6x0_state {
41064 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
41065-};
41066+} __no_const;
41067
41068 /* debug */
41069 static int dvb_usb_dw2102_debug;
41070diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
41071index f129551..ecf6514 100644
41072--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
41073+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
41074@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
41075 __u32 reserved;
41076 };
41077
41078-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
41079+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
41080 enum v4l2_memory memory)
41081 {
41082 void __user *up_pln;
41083@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
41084 return 0;
41085 }
41086
41087-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
41088+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
41089 enum v4l2_memory memory)
41090 {
41091 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
41092@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
41093 put_user(kp->start_block, &up->start_block) ||
41094 put_user(kp->blocks, &up->blocks) ||
41095 put_user(tmp, &up->edid) ||
41096- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
41097+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
41098 return -EFAULT;
41099 return 0;
41100 }
41101diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
41102index 7658586..1079260 100644
41103--- a/drivers/media/v4l2-core/v4l2-ioctl.c
41104+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
41105@@ -1995,7 +1995,8 @@ struct v4l2_ioctl_info {
41106 struct file *file, void *fh, void *p);
41107 } u;
41108 void (*debug)(const void *arg, bool write_only);
41109-};
41110+} __do_const;
41111+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
41112
41113 /* This control needs a priority check */
41114 #define INFO_FL_PRIO (1 << 0)
41115@@ -2177,7 +2178,7 @@ static long __video_do_ioctl(struct file *file,
41116 struct video_device *vfd = video_devdata(file);
41117 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
41118 bool write_only = false;
41119- struct v4l2_ioctl_info default_info;
41120+ v4l2_ioctl_info_no_const default_info;
41121 const struct v4l2_ioctl_info *info;
41122 void *fh = file->private_data;
41123 struct v4l2_fh *vfh = NULL;
41124@@ -2251,7 +2252,7 @@ done:
41125 }
41126
41127 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
41128- void * __user *user_ptr, void ***kernel_ptr)
41129+ void __user **user_ptr, void ***kernel_ptr)
41130 {
41131 int ret = 0;
41132
41133@@ -2267,7 +2268,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
41134 ret = -EINVAL;
41135 break;
41136 }
41137- *user_ptr = (void __user *)buf->m.planes;
41138+ *user_ptr = (void __force_user *)buf->m.planes;
41139 *kernel_ptr = (void *)&buf->m.planes;
41140 *array_size = sizeof(struct v4l2_plane) * buf->length;
41141 ret = 1;
41142@@ -2302,7 +2303,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
41143 ret = -EINVAL;
41144 break;
41145 }
41146- *user_ptr = (void __user *)ctrls->controls;
41147+ *user_ptr = (void __force_user *)ctrls->controls;
41148 *kernel_ptr = (void *)&ctrls->controls;
41149 *array_size = sizeof(struct v4l2_ext_control)
41150 * ctrls->count;
41151diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
41152index 767ff4d..c69d259 100644
41153--- a/drivers/message/fusion/mptbase.c
41154+++ b/drivers/message/fusion/mptbase.c
41155@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
41156 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
41157 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
41158
41159+#ifdef CONFIG_GRKERNSEC_HIDESYM
41160+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
41161+#else
41162 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
41163 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
41164+#endif
41165+
41166 /*
41167 * Rounding UP to nearest 4-kB boundary here...
41168 */
41169@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
41170 ioc->facts.GlobalCredits);
41171
41172 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
41173+#ifdef CONFIG_GRKERNSEC_HIDESYM
41174+ NULL, NULL);
41175+#else
41176 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
41177+#endif
41178 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
41179 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
41180 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
41181diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
41182index dd239bd..689c4f7 100644
41183--- a/drivers/message/fusion/mptsas.c
41184+++ b/drivers/message/fusion/mptsas.c
41185@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
41186 return 0;
41187 }
41188
41189+static inline void
41190+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
41191+{
41192+ if (phy_info->port_details) {
41193+ phy_info->port_details->rphy = rphy;
41194+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
41195+ ioc->name, rphy));
41196+ }
41197+
41198+ if (rphy) {
41199+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
41200+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
41201+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
41202+ ioc->name, rphy, rphy->dev.release));
41203+ }
41204+}
41205+
41206 /* no mutex */
41207 static void
41208 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
41209@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
41210 return NULL;
41211 }
41212
41213-static inline void
41214-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
41215-{
41216- if (phy_info->port_details) {
41217- phy_info->port_details->rphy = rphy;
41218- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
41219- ioc->name, rphy));
41220- }
41221-
41222- if (rphy) {
41223- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
41224- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
41225- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
41226- ioc->name, rphy, rphy->dev.release));
41227- }
41228-}
41229-
41230 static inline struct sas_port *
41231 mptsas_get_port(struct mptsas_phyinfo *phy_info)
41232 {
41233diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
41234index 727819c..ad74694 100644
41235--- a/drivers/message/fusion/mptscsih.c
41236+++ b/drivers/message/fusion/mptscsih.c
41237@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
41238
41239 h = shost_priv(SChost);
41240
41241- if (h) {
41242- if (h->info_kbuf == NULL)
41243- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
41244- return h->info_kbuf;
41245- h->info_kbuf[0] = '\0';
41246+ if (!h)
41247+ return NULL;
41248
41249- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
41250- h->info_kbuf[size-1] = '\0';
41251- }
41252+ if (h->info_kbuf == NULL)
41253+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
41254+ return h->info_kbuf;
41255+ h->info_kbuf[0] = '\0';
41256+
41257+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
41258+ h->info_kbuf[size-1] = '\0';
41259
41260 return h->info_kbuf;
41261 }
41262diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
41263index b7d87cd..9890039 100644
41264--- a/drivers/message/i2o/i2o_proc.c
41265+++ b/drivers/message/i2o/i2o_proc.c
41266@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
41267 "Array Controller Device"
41268 };
41269
41270-static char *chtostr(char *tmp, u8 *chars, int n)
41271-{
41272- tmp[0] = 0;
41273- return strncat(tmp, (char *)chars, n);
41274-}
41275-
41276 static int i2o_report_query_status(struct seq_file *seq, int block_status,
41277 char *group)
41278 {
41279@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
41280 } *result;
41281
41282 i2o_exec_execute_ddm_table ddm_table;
41283- char tmp[28 + 1];
41284
41285 result = kmalloc(sizeof(*result), GFP_KERNEL);
41286 if (!result)
41287@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
41288
41289 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
41290 seq_printf(seq, "%-#8x", ddm_table.module_id);
41291- seq_printf(seq, "%-29s",
41292- chtostr(tmp, ddm_table.module_name_version, 28));
41293+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
41294 seq_printf(seq, "%9d ", ddm_table.data_size);
41295 seq_printf(seq, "%8d", ddm_table.code_size);
41296
41297@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
41298
41299 i2o_driver_result_table *result;
41300 i2o_driver_store_table *dst;
41301- char tmp[28 + 1];
41302
41303 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
41304 if (result == NULL)
41305@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
41306
41307 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
41308 seq_printf(seq, "%-#8x", dst->module_id);
41309- seq_printf(seq, "%-29s",
41310- chtostr(tmp, dst->module_name_version, 28));
41311- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
41312+ seq_printf(seq, "%-.28s", dst->module_name_version);
41313+ seq_printf(seq, "%-.8s", dst->date);
41314 seq_printf(seq, "%8d ", dst->module_size);
41315 seq_printf(seq, "%8d ", dst->mpb_size);
41316 seq_printf(seq, "0x%04x", dst->module_flags);
41317@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
41318 // == (allow) 512d bytes (max)
41319 static u16 *work16 = (u16 *) work32;
41320 int token;
41321- char tmp[16 + 1];
41322
41323 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
41324
41325@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
41326 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
41327 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
41328 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
41329- seq_printf(seq, "Vendor info : %s\n",
41330- chtostr(tmp, (u8 *) (work32 + 2), 16));
41331- seq_printf(seq, "Product info : %s\n",
41332- chtostr(tmp, (u8 *) (work32 + 6), 16));
41333- seq_printf(seq, "Description : %s\n",
41334- chtostr(tmp, (u8 *) (work32 + 10), 16));
41335- seq_printf(seq, "Product rev. : %s\n",
41336- chtostr(tmp, (u8 *) (work32 + 14), 8));
41337+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
41338+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
41339+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
41340+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
41341
41342 seq_printf(seq, "Serial number : ");
41343 print_serial_number(seq, (u8 *) (work32 + 16),
41344@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
41345 u8 pad[256]; // allow up to 256 byte (max) serial number
41346 } result;
41347
41348- char tmp[24 + 1];
41349-
41350 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
41351
41352 if (token < 0) {
41353@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
41354 }
41355
41356 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
41357- seq_printf(seq, "Module name : %s\n",
41358- chtostr(tmp, result.module_name, 24));
41359- seq_printf(seq, "Module revision : %s\n",
41360- chtostr(tmp, result.module_rev, 8));
41361+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
41362+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
41363
41364 seq_printf(seq, "Serial number : ");
41365 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
41366@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
41367 u8 instance_number[4];
41368 } result;
41369
41370- char tmp[64 + 1];
41371-
41372 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
41373
41374 if (token < 0) {
41375@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
41376 return 0;
41377 }
41378
41379- seq_printf(seq, "Device name : %s\n",
41380- chtostr(tmp, result.device_name, 64));
41381- seq_printf(seq, "Service name : %s\n",
41382- chtostr(tmp, result.service_name, 64));
41383- seq_printf(seq, "Physical name : %s\n",
41384- chtostr(tmp, result.physical_location, 64));
41385- seq_printf(seq, "Instance number : %s\n",
41386- chtostr(tmp, result.instance_number, 4));
41387+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
41388+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
41389+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
41390+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
41391
41392 return 0;
41393 }
41394diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
41395index a8c08f3..155fe3d 100644
41396--- a/drivers/message/i2o/iop.c
41397+++ b/drivers/message/i2o/iop.c
41398@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
41399
41400 spin_lock_irqsave(&c->context_list_lock, flags);
41401
41402- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
41403- atomic_inc(&c->context_list_counter);
41404+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
41405+ atomic_inc_unchecked(&c->context_list_counter);
41406
41407- entry->context = atomic_read(&c->context_list_counter);
41408+ entry->context = atomic_read_unchecked(&c->context_list_counter);
41409
41410 list_add(&entry->list, &c->context_list);
41411
41412@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
41413
41414 #if BITS_PER_LONG == 64
41415 spin_lock_init(&c->context_list_lock);
41416- atomic_set(&c->context_list_counter, 0);
41417+ atomic_set_unchecked(&c->context_list_counter, 0);
41418 INIT_LIST_HEAD(&c->context_list);
41419 #endif
41420
41421diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
41422index 45ece11..8efa218 100644
41423--- a/drivers/mfd/janz-cmodio.c
41424+++ b/drivers/mfd/janz-cmodio.c
41425@@ -13,6 +13,7 @@
41426
41427 #include <linux/kernel.h>
41428 #include <linux/module.h>
41429+#include <linux/slab.h>
41430 #include <linux/init.h>
41431 #include <linux/pci.h>
41432 #include <linux/interrupt.h>
41433diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
41434index a5f9888..1c0ed56 100644
41435--- a/drivers/mfd/twl4030-irq.c
41436+++ b/drivers/mfd/twl4030-irq.c
41437@@ -35,6 +35,7 @@
41438 #include <linux/of.h>
41439 #include <linux/irqdomain.h>
41440 #include <linux/i2c/twl.h>
41441+#include <asm/pgtable.h>
41442
41443 #include "twl-core.h"
41444
41445@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
41446 * Install an irq handler for each of the SIH modules;
41447 * clone dummy irq_chip since PIH can't *do* anything
41448 */
41449- twl4030_irq_chip = dummy_irq_chip;
41450- twl4030_irq_chip.name = "twl4030";
41451+ pax_open_kernel();
41452+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
41453+ *(const char **)&twl4030_irq_chip.name = "twl4030";
41454
41455- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
41456+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
41457+ pax_close_kernel();
41458
41459 for (i = irq_base; i < irq_end; i++) {
41460 irq_set_chip_and_handler(i, &twl4030_irq_chip,
41461diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
41462index 277a8db..0e0b754 100644
41463--- a/drivers/mfd/twl6030-irq.c
41464+++ b/drivers/mfd/twl6030-irq.c
41465@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
41466 * install an irq handler for each of the modules;
41467 * clone dummy irq_chip since PIH can't *do* anything
41468 */
41469- twl6030_irq_chip = dummy_irq_chip;
41470- twl6030_irq_chip.name = "twl6030";
41471- twl6030_irq_chip.irq_set_type = NULL;
41472- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
41473+ pax_open_kernel();
41474+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
41475+ *(const char **)&twl6030_irq_chip.name = "twl6030";
41476+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
41477+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
41478+ pax_close_kernel();
41479
41480 for (i = irq_base; i < irq_end; i++) {
41481 irq_set_chip_and_handler(i, &twl6030_irq_chip,
41482diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
41483index f32550a..e3e52a2 100644
41484--- a/drivers/misc/c2port/core.c
41485+++ b/drivers/misc/c2port/core.c
41486@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
41487 mutex_init(&c2dev->mutex);
41488
41489 /* Create binary file */
41490- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
41491+ pax_open_kernel();
41492+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
41493+ pax_close_kernel();
41494 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
41495 if (unlikely(ret))
41496 goto error_device_create_bin_file;
41497diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
41498index 36f5d52..32311c3 100644
41499--- a/drivers/misc/kgdbts.c
41500+++ b/drivers/misc/kgdbts.c
41501@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
41502 char before[BREAK_INSTR_SIZE];
41503 char after[BREAK_INSTR_SIZE];
41504
41505- probe_kernel_read(before, (char *)kgdbts_break_test,
41506+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
41507 BREAK_INSTR_SIZE);
41508 init_simple_test();
41509 ts.tst = plant_and_detach_test;
41510@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
41511 /* Activate test with initial breakpoint */
41512 if (!is_early)
41513 kgdb_breakpoint();
41514- probe_kernel_read(after, (char *)kgdbts_break_test,
41515+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
41516 BREAK_INSTR_SIZE);
41517 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
41518 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
41519diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
41520index 4cd4a3d..b48cbc7 100644
41521--- a/drivers/misc/lis3lv02d/lis3lv02d.c
41522+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
41523@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
41524 * the lid is closed. This leads to interrupts as soon as a little move
41525 * is done.
41526 */
41527- atomic_inc(&lis3->count);
41528+ atomic_inc_unchecked(&lis3->count);
41529
41530 wake_up_interruptible(&lis3->misc_wait);
41531 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
41532@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
41533 if (lis3->pm_dev)
41534 pm_runtime_get_sync(lis3->pm_dev);
41535
41536- atomic_set(&lis3->count, 0);
41537+ atomic_set_unchecked(&lis3->count, 0);
41538 return 0;
41539 }
41540
41541@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
41542 add_wait_queue(&lis3->misc_wait, &wait);
41543 while (true) {
41544 set_current_state(TASK_INTERRUPTIBLE);
41545- data = atomic_xchg(&lis3->count, 0);
41546+ data = atomic_xchg_unchecked(&lis3->count, 0);
41547 if (data)
41548 break;
41549
41550@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
41551 struct lis3lv02d, miscdev);
41552
41553 poll_wait(file, &lis3->misc_wait, wait);
41554- if (atomic_read(&lis3->count))
41555+ if (atomic_read_unchecked(&lis3->count))
41556 return POLLIN | POLLRDNORM;
41557 return 0;
41558 }
41559diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
41560index c439c82..1f20f57 100644
41561--- a/drivers/misc/lis3lv02d/lis3lv02d.h
41562+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
41563@@ -297,7 +297,7 @@ struct lis3lv02d {
41564 struct input_polled_dev *idev; /* input device */
41565 struct platform_device *pdev; /* platform device */
41566 struct regulator_bulk_data regulators[2];
41567- atomic_t count; /* interrupt count after last read */
41568+ atomic_unchecked_t count; /* interrupt count after last read */
41569 union axis_conversion ac; /* hw -> logical axis */
41570 int mapped_btns[3];
41571
41572diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
41573index 2f30bad..c4c13d0 100644
41574--- a/drivers/misc/sgi-gru/gruhandles.c
41575+++ b/drivers/misc/sgi-gru/gruhandles.c
41576@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
41577 unsigned long nsec;
41578
41579 nsec = CLKS2NSEC(clks);
41580- atomic_long_inc(&mcs_op_statistics[op].count);
41581- atomic_long_add(nsec, &mcs_op_statistics[op].total);
41582+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
41583+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
41584 if (mcs_op_statistics[op].max < nsec)
41585 mcs_op_statistics[op].max = nsec;
41586 }
41587diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
41588index 797d796..ae8f01e 100644
41589--- a/drivers/misc/sgi-gru/gruprocfs.c
41590+++ b/drivers/misc/sgi-gru/gruprocfs.c
41591@@ -32,9 +32,9 @@
41592
41593 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
41594
41595-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
41596+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
41597 {
41598- unsigned long val = atomic_long_read(v);
41599+ unsigned long val = atomic_long_read_unchecked(v);
41600
41601 seq_printf(s, "%16lu %s\n", val, id);
41602 }
41603@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
41604
41605 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
41606 for (op = 0; op < mcsop_last; op++) {
41607- count = atomic_long_read(&mcs_op_statistics[op].count);
41608- total = atomic_long_read(&mcs_op_statistics[op].total);
41609+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
41610+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
41611 max = mcs_op_statistics[op].max;
41612 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
41613 count ? total / count : 0, max);
41614diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
41615index 5c3ce24..4915ccb 100644
41616--- a/drivers/misc/sgi-gru/grutables.h
41617+++ b/drivers/misc/sgi-gru/grutables.h
41618@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
41619 * GRU statistics.
41620 */
41621 struct gru_stats_s {
41622- atomic_long_t vdata_alloc;
41623- atomic_long_t vdata_free;
41624- atomic_long_t gts_alloc;
41625- atomic_long_t gts_free;
41626- atomic_long_t gms_alloc;
41627- atomic_long_t gms_free;
41628- atomic_long_t gts_double_allocate;
41629- atomic_long_t assign_context;
41630- atomic_long_t assign_context_failed;
41631- atomic_long_t free_context;
41632- atomic_long_t load_user_context;
41633- atomic_long_t load_kernel_context;
41634- atomic_long_t lock_kernel_context;
41635- atomic_long_t unlock_kernel_context;
41636- atomic_long_t steal_user_context;
41637- atomic_long_t steal_kernel_context;
41638- atomic_long_t steal_context_failed;
41639- atomic_long_t nopfn;
41640- atomic_long_t asid_new;
41641- atomic_long_t asid_next;
41642- atomic_long_t asid_wrap;
41643- atomic_long_t asid_reuse;
41644- atomic_long_t intr;
41645- atomic_long_t intr_cbr;
41646- atomic_long_t intr_tfh;
41647- atomic_long_t intr_spurious;
41648- atomic_long_t intr_mm_lock_failed;
41649- atomic_long_t call_os;
41650- atomic_long_t call_os_wait_queue;
41651- atomic_long_t user_flush_tlb;
41652- atomic_long_t user_unload_context;
41653- atomic_long_t user_exception;
41654- atomic_long_t set_context_option;
41655- atomic_long_t check_context_retarget_intr;
41656- atomic_long_t check_context_unload;
41657- atomic_long_t tlb_dropin;
41658- atomic_long_t tlb_preload_page;
41659- atomic_long_t tlb_dropin_fail_no_asid;
41660- atomic_long_t tlb_dropin_fail_upm;
41661- atomic_long_t tlb_dropin_fail_invalid;
41662- atomic_long_t tlb_dropin_fail_range_active;
41663- atomic_long_t tlb_dropin_fail_idle;
41664- atomic_long_t tlb_dropin_fail_fmm;
41665- atomic_long_t tlb_dropin_fail_no_exception;
41666- atomic_long_t tfh_stale_on_fault;
41667- atomic_long_t mmu_invalidate_range;
41668- atomic_long_t mmu_invalidate_page;
41669- atomic_long_t flush_tlb;
41670- atomic_long_t flush_tlb_gru;
41671- atomic_long_t flush_tlb_gru_tgh;
41672- atomic_long_t flush_tlb_gru_zero_asid;
41673+ atomic_long_unchecked_t vdata_alloc;
41674+ atomic_long_unchecked_t vdata_free;
41675+ atomic_long_unchecked_t gts_alloc;
41676+ atomic_long_unchecked_t gts_free;
41677+ atomic_long_unchecked_t gms_alloc;
41678+ atomic_long_unchecked_t gms_free;
41679+ atomic_long_unchecked_t gts_double_allocate;
41680+ atomic_long_unchecked_t assign_context;
41681+ atomic_long_unchecked_t assign_context_failed;
41682+ atomic_long_unchecked_t free_context;
41683+ atomic_long_unchecked_t load_user_context;
41684+ atomic_long_unchecked_t load_kernel_context;
41685+ atomic_long_unchecked_t lock_kernel_context;
41686+ atomic_long_unchecked_t unlock_kernel_context;
41687+ atomic_long_unchecked_t steal_user_context;
41688+ atomic_long_unchecked_t steal_kernel_context;
41689+ atomic_long_unchecked_t steal_context_failed;
41690+ atomic_long_unchecked_t nopfn;
41691+ atomic_long_unchecked_t asid_new;
41692+ atomic_long_unchecked_t asid_next;
41693+ atomic_long_unchecked_t asid_wrap;
41694+ atomic_long_unchecked_t asid_reuse;
41695+ atomic_long_unchecked_t intr;
41696+ atomic_long_unchecked_t intr_cbr;
41697+ atomic_long_unchecked_t intr_tfh;
41698+ atomic_long_unchecked_t intr_spurious;
41699+ atomic_long_unchecked_t intr_mm_lock_failed;
41700+ atomic_long_unchecked_t call_os;
41701+ atomic_long_unchecked_t call_os_wait_queue;
41702+ atomic_long_unchecked_t user_flush_tlb;
41703+ atomic_long_unchecked_t user_unload_context;
41704+ atomic_long_unchecked_t user_exception;
41705+ atomic_long_unchecked_t set_context_option;
41706+ atomic_long_unchecked_t check_context_retarget_intr;
41707+ atomic_long_unchecked_t check_context_unload;
41708+ atomic_long_unchecked_t tlb_dropin;
41709+ atomic_long_unchecked_t tlb_preload_page;
41710+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
41711+ atomic_long_unchecked_t tlb_dropin_fail_upm;
41712+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
41713+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
41714+ atomic_long_unchecked_t tlb_dropin_fail_idle;
41715+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
41716+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
41717+ atomic_long_unchecked_t tfh_stale_on_fault;
41718+ atomic_long_unchecked_t mmu_invalidate_range;
41719+ atomic_long_unchecked_t mmu_invalidate_page;
41720+ atomic_long_unchecked_t flush_tlb;
41721+ atomic_long_unchecked_t flush_tlb_gru;
41722+ atomic_long_unchecked_t flush_tlb_gru_tgh;
41723+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
41724
41725- atomic_long_t copy_gpa;
41726- atomic_long_t read_gpa;
41727+ atomic_long_unchecked_t copy_gpa;
41728+ atomic_long_unchecked_t read_gpa;
41729
41730- atomic_long_t mesq_receive;
41731- atomic_long_t mesq_receive_none;
41732- atomic_long_t mesq_send;
41733- atomic_long_t mesq_send_failed;
41734- atomic_long_t mesq_noop;
41735- atomic_long_t mesq_send_unexpected_error;
41736- atomic_long_t mesq_send_lb_overflow;
41737- atomic_long_t mesq_send_qlimit_reached;
41738- atomic_long_t mesq_send_amo_nacked;
41739- atomic_long_t mesq_send_put_nacked;
41740- atomic_long_t mesq_page_overflow;
41741- atomic_long_t mesq_qf_locked;
41742- atomic_long_t mesq_qf_noop_not_full;
41743- atomic_long_t mesq_qf_switch_head_failed;
41744- atomic_long_t mesq_qf_unexpected_error;
41745- atomic_long_t mesq_noop_unexpected_error;
41746- atomic_long_t mesq_noop_lb_overflow;
41747- atomic_long_t mesq_noop_qlimit_reached;
41748- atomic_long_t mesq_noop_amo_nacked;
41749- atomic_long_t mesq_noop_put_nacked;
41750- atomic_long_t mesq_noop_page_overflow;
41751+ atomic_long_unchecked_t mesq_receive;
41752+ atomic_long_unchecked_t mesq_receive_none;
41753+ atomic_long_unchecked_t mesq_send;
41754+ atomic_long_unchecked_t mesq_send_failed;
41755+ atomic_long_unchecked_t mesq_noop;
41756+ atomic_long_unchecked_t mesq_send_unexpected_error;
41757+ atomic_long_unchecked_t mesq_send_lb_overflow;
41758+ atomic_long_unchecked_t mesq_send_qlimit_reached;
41759+ atomic_long_unchecked_t mesq_send_amo_nacked;
41760+ atomic_long_unchecked_t mesq_send_put_nacked;
41761+ atomic_long_unchecked_t mesq_page_overflow;
41762+ atomic_long_unchecked_t mesq_qf_locked;
41763+ atomic_long_unchecked_t mesq_qf_noop_not_full;
41764+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
41765+ atomic_long_unchecked_t mesq_qf_unexpected_error;
41766+ atomic_long_unchecked_t mesq_noop_unexpected_error;
41767+ atomic_long_unchecked_t mesq_noop_lb_overflow;
41768+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
41769+ atomic_long_unchecked_t mesq_noop_amo_nacked;
41770+ atomic_long_unchecked_t mesq_noop_put_nacked;
41771+ atomic_long_unchecked_t mesq_noop_page_overflow;
41772
41773 };
41774
41775@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
41776 tghop_invalidate, mcsop_last};
41777
41778 struct mcs_op_statistic {
41779- atomic_long_t count;
41780- atomic_long_t total;
41781+ atomic_long_unchecked_t count;
41782+ atomic_long_unchecked_t total;
41783 unsigned long max;
41784 };
41785
41786@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
41787
41788 #define STAT(id) do { \
41789 if (gru_options & OPT_STATS) \
41790- atomic_long_inc(&gru_stats.id); \
41791+ atomic_long_inc_unchecked(&gru_stats.id); \
41792 } while (0)
41793
41794 #ifdef CONFIG_SGI_GRU_DEBUG
41795diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
41796index c862cd4..0d176fe 100644
41797--- a/drivers/misc/sgi-xp/xp.h
41798+++ b/drivers/misc/sgi-xp/xp.h
41799@@ -288,7 +288,7 @@ struct xpc_interface {
41800 xpc_notify_func, void *);
41801 void (*received) (short, int, void *);
41802 enum xp_retval (*partid_to_nasids) (short, void *);
41803-};
41804+} __no_const;
41805
41806 extern struct xpc_interface xpc_interface;
41807
41808diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
41809index b94d5f7..7f494c5 100644
41810--- a/drivers/misc/sgi-xp/xpc.h
41811+++ b/drivers/misc/sgi-xp/xpc.h
41812@@ -835,6 +835,7 @@ struct xpc_arch_operations {
41813 void (*received_payload) (struct xpc_channel *, void *);
41814 void (*notify_senders_of_disconnect) (struct xpc_channel *);
41815 };
41816+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
41817
41818 /* struct xpc_partition act_state values (for XPC HB) */
41819
41820@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
41821 /* found in xpc_main.c */
41822 extern struct device *xpc_part;
41823 extern struct device *xpc_chan;
41824-extern struct xpc_arch_operations xpc_arch_ops;
41825+extern xpc_arch_operations_no_const xpc_arch_ops;
41826 extern int xpc_disengage_timelimit;
41827 extern int xpc_disengage_timedout;
41828 extern int xpc_activate_IRQ_rcvd;
41829diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
41830index d971817..33bdca5 100644
41831--- a/drivers/misc/sgi-xp/xpc_main.c
41832+++ b/drivers/misc/sgi-xp/xpc_main.c
41833@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
41834 .notifier_call = xpc_system_die,
41835 };
41836
41837-struct xpc_arch_operations xpc_arch_ops;
41838+xpc_arch_operations_no_const xpc_arch_ops;
41839
41840 /*
41841 * Timer function to enforce the timelimit on the partition disengage.
41842@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
41843
41844 if (((die_args->trapnr == X86_TRAP_MF) ||
41845 (die_args->trapnr == X86_TRAP_XF)) &&
41846- !user_mode_vm(die_args->regs))
41847+ !user_mode(die_args->regs))
41848 xpc_die_deactivate();
41849
41850 break;
41851diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
41852index 49f04bc..65660c2 100644
41853--- a/drivers/mmc/core/mmc_ops.c
41854+++ b/drivers/mmc/core/mmc_ops.c
41855@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
41856 void *data_buf;
41857 int is_on_stack;
41858
41859- is_on_stack = object_is_on_stack(buf);
41860+ is_on_stack = object_starts_on_stack(buf);
41861 if (is_on_stack) {
41862 /*
41863 * dma onto stack is unsafe/nonportable, but callers to this
41864diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
41865index 0b74189..818358f 100644
41866--- a/drivers/mmc/host/dw_mmc.h
41867+++ b/drivers/mmc/host/dw_mmc.h
41868@@ -202,5 +202,5 @@ struct dw_mci_drv_data {
41869 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
41870 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
41871 int (*parse_dt)(struct dw_mci *host);
41872-};
41873+} __do_const;
41874 #endif /* _DW_MMC_H_ */
41875diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
41876index c6f6246..60760a8 100644
41877--- a/drivers/mmc/host/sdhci-s3c.c
41878+++ b/drivers/mmc/host/sdhci-s3c.c
41879@@ -664,9 +664,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
41880 * we can use overriding functions instead of default.
41881 */
41882 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
41883- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
41884- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
41885- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
41886+ pax_open_kernel();
41887+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
41888+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
41889+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
41890+ pax_close_kernel();
41891 }
41892
41893 /* It supports additional host capabilities if needed */
41894diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
41895index 0c8bb6b..6f35deb 100644
41896--- a/drivers/mtd/nand/denali.c
41897+++ b/drivers/mtd/nand/denali.c
41898@@ -24,6 +24,7 @@
41899 #include <linux/slab.h>
41900 #include <linux/mtd/mtd.h>
41901 #include <linux/module.h>
41902+#include <linux/slab.h>
41903
41904 #include "denali.h"
41905
41906diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
41907index 51b9d6a..52af9a7 100644
41908--- a/drivers/mtd/nftlmount.c
41909+++ b/drivers/mtd/nftlmount.c
41910@@ -24,6 +24,7 @@
41911 #include <asm/errno.h>
41912 #include <linux/delay.h>
41913 #include <linux/slab.h>
41914+#include <linux/sched.h>
41915 #include <linux/mtd/mtd.h>
41916 #include <linux/mtd/nand.h>
41917 #include <linux/mtd/nftl.h>
41918diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
41919index f9d5615..99dd95f 100644
41920--- a/drivers/mtd/sm_ftl.c
41921+++ b/drivers/mtd/sm_ftl.c
41922@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
41923 #define SM_CIS_VENDOR_OFFSET 0x59
41924 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
41925 {
41926- struct attribute_group *attr_group;
41927+ attribute_group_no_const *attr_group;
41928 struct attribute **attributes;
41929 struct sm_sysfs_attribute *vendor_attribute;
41930
41931diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
41932index f975696..4597e21 100644
41933--- a/drivers/net/bonding/bond_main.c
41934+++ b/drivers/net/bonding/bond_main.c
41935@@ -4870,7 +4870,7 @@ static unsigned int bond_get_num_tx_queues(void)
41936 return tx_queues;
41937 }
41938
41939-static struct rtnl_link_ops bond_link_ops __read_mostly = {
41940+static struct rtnl_link_ops bond_link_ops = {
41941 .kind = "bond",
41942 .priv_size = sizeof(struct bonding),
41943 .setup = bond_setup,
41944@@ -4995,8 +4995,8 @@ static void __exit bonding_exit(void)
41945
41946 bond_destroy_debugfs();
41947
41948- rtnl_link_unregister(&bond_link_ops);
41949 unregister_pernet_subsys(&bond_net_ops);
41950+ rtnl_link_unregister(&bond_link_ops);
41951
41952 #ifdef CONFIG_NET_POLL_CONTROLLER
41953 /*
41954diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
41955index 25723d8..925ab8e 100644
41956--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
41957+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
41958@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
41959 if ((mc->ptr + rec_len) > mc->end)
41960 goto decode_failed;
41961
41962- memcpy(cf->data, mc->ptr, rec_len);
41963+ memcpy(cf->data, mc->ptr, cf->can_dlc);
41964 mc->ptr += rec_len;
41965 }
41966
41967diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
41968index e1d2643..7f4133b 100644
41969--- a/drivers/net/ethernet/8390/ax88796.c
41970+++ b/drivers/net/ethernet/8390/ax88796.c
41971@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
41972 if (ax->plat->reg_offsets)
41973 ei_local->reg_offset = ax->plat->reg_offsets;
41974 else {
41975+ resource_size_t _mem_size = mem_size;
41976+ do_div(_mem_size, 0x18);
41977 ei_local->reg_offset = ax->reg_offsets;
41978 for (ret = 0; ret < 0x18; ret++)
41979- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
41980+ ax->reg_offsets[ret] = _mem_size * ret;
41981 }
41982
41983 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
41984diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
41985index 151675d..0139a9d 100644
41986--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
41987+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
41988@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
41989 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
41990 {
41991 /* RX_MODE controlling object */
41992- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
41993+ bnx2x_init_rx_mode_obj(bp);
41994
41995 /* multicast configuration controlling object */
41996 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
41997diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
41998index ce1a916..10b52b0 100644
41999--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
42000+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
42001@@ -960,6 +960,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
42002 struct bnx2x *bp = netdev_priv(dev);
42003
42004 /* Use the ethtool_dump "flag" field as the dump preset index */
42005+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
42006+ return -EINVAL;
42007+
42008 bp->dump_preset_idx = val->flag;
42009 return 0;
42010 }
42011@@ -986,8 +989,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
42012 struct bnx2x *bp = netdev_priv(dev);
42013 struct dump_header dump_hdr = {0};
42014
42015- memset(p, 0, dump->len);
42016-
42017 /* Disable parity attentions as long as following dump may
42018 * cause false alarms by reading never written registers. We
42019 * will re-enable parity attentions right after the dump.
42020diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
42021index b4c9dea..2a9927f 100644
42022--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
42023+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
42024@@ -11497,6 +11497,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
42025 bp->min_msix_vec_cnt = 2;
42026 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
42027
42028+ bp->dump_preset_idx = 1;
42029+
42030 return rc;
42031 }
42032
42033diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
42034index 32a9609..0b1c53a 100644
42035--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
42036+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
42037@@ -2387,15 +2387,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
42038 return rc;
42039 }
42040
42041-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
42042- struct bnx2x_rx_mode_obj *o)
42043+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
42044 {
42045 if (CHIP_IS_E1x(bp)) {
42046- o->wait_comp = bnx2x_empty_rx_mode_wait;
42047- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
42048+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
42049+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
42050 } else {
42051- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
42052- o->config_rx_mode = bnx2x_set_rx_mode_e2;
42053+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
42054+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
42055 }
42056 }
42057
42058diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
42059index 43c00bc..dd1d03d 100644
42060--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
42061+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
42062@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
42063
42064 /********************* RX MODE ****************/
42065
42066-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
42067- struct bnx2x_rx_mode_obj *o);
42068+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
42069
42070 /**
42071 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
42072diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
42073index ff6e30e..87e8452 100644
42074--- a/drivers/net/ethernet/broadcom/tg3.h
42075+++ b/drivers/net/ethernet/broadcom/tg3.h
42076@@ -147,6 +147,7 @@
42077 #define CHIPREV_ID_5750_A0 0x4000
42078 #define CHIPREV_ID_5750_A1 0x4001
42079 #define CHIPREV_ID_5750_A3 0x4003
42080+#define CHIPREV_ID_5750_C1 0x4201
42081 #define CHIPREV_ID_5750_C2 0x4202
42082 #define CHIPREV_ID_5752_A0_HW 0x5000
42083 #define CHIPREV_ID_5752_A0 0x6000
42084diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
42085index 71497e8..b650951 100644
42086--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
42087+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
42088@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
42089 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
42090 t3_read_reg(adapter, A_PCIE_PEX_ERR));
42091
42092+ rtnl_lock();
42093 t3_resume_ports(adapter);
42094+ rtnl_unlock();
42095 }
42096
42097 static const struct pci_error_handlers t3_err_handler = {
42098diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
42099index 8cffcdf..aadf043 100644
42100--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
42101+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
42102@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
42103 */
42104 struct l2t_skb_cb {
42105 arp_failure_handler_func arp_failure_handler;
42106-};
42107+} __no_const;
42108
42109 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
42110
42111diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
42112index 4c83003..2a2a5b9 100644
42113--- a/drivers/net/ethernet/dec/tulip/de4x5.c
42114+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
42115@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
42116 for (i=0; i<ETH_ALEN; i++) {
42117 tmp.addr[i] = dev->dev_addr[i];
42118 }
42119- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
42120+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
42121 break;
42122
42123 case DE4X5_SET_HWADDR: /* Set the hardware address */
42124@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
42125 spin_lock_irqsave(&lp->lock, flags);
42126 memcpy(&statbuf, &lp->pktStats, ioc->len);
42127 spin_unlock_irqrestore(&lp->lock, flags);
42128- if (copy_to_user(ioc->data, &statbuf, ioc->len))
42129+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
42130 return -EFAULT;
42131 break;
42132 }
42133diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
42134index 6e43426..1bd8365 100644
42135--- a/drivers/net/ethernet/emulex/benet/be_main.c
42136+++ b/drivers/net/ethernet/emulex/benet/be_main.c
42137@@ -469,7 +469,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
42138
42139 if (wrapped)
42140 newacc += 65536;
42141- ACCESS_ONCE(*acc) = newacc;
42142+ ACCESS_ONCE_RW(*acc) = newacc;
42143 }
42144
42145 void populate_erx_stats(struct be_adapter *adapter,
42146diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
42147index 21b85fb..b49e5fc 100644
42148--- a/drivers/net/ethernet/faraday/ftgmac100.c
42149+++ b/drivers/net/ethernet/faraday/ftgmac100.c
42150@@ -31,6 +31,8 @@
42151 #include <linux/netdevice.h>
42152 #include <linux/phy.h>
42153 #include <linux/platform_device.h>
42154+#include <linux/interrupt.h>
42155+#include <linux/irqreturn.h>
42156 #include <net/ip.h>
42157
42158 #include "ftgmac100.h"
42159diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
42160index a6eda8d..935d273 100644
42161--- a/drivers/net/ethernet/faraday/ftmac100.c
42162+++ b/drivers/net/ethernet/faraday/ftmac100.c
42163@@ -31,6 +31,8 @@
42164 #include <linux/module.h>
42165 #include <linux/netdevice.h>
42166 #include <linux/platform_device.h>
42167+#include <linux/interrupt.h>
42168+#include <linux/irqreturn.h>
42169
42170 #include "ftmac100.h"
42171
42172diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
42173index 331987d..3be1135 100644
42174--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
42175+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
42176@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
42177 }
42178
42179 /* update the base incval used to calculate frequency adjustment */
42180- ACCESS_ONCE(adapter->base_incval) = incval;
42181+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
42182 smp_mb();
42183
42184 /* need lock to prevent incorrect read while modifying cyclecounter */
42185diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
42186index fbe5363..266b4e3 100644
42187--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
42188+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
42189@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
42190 struct __vxge_hw_fifo *fifo;
42191 struct vxge_hw_fifo_config *config;
42192 u32 txdl_size, txdl_per_memblock;
42193- struct vxge_hw_mempool_cbs fifo_mp_callback;
42194+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
42195+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
42196+ };
42197+
42198 struct __vxge_hw_virtualpath *vpath;
42199
42200 if ((vp == NULL) || (attr == NULL)) {
42201@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
42202 goto exit;
42203 }
42204
42205- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
42206-
42207 fifo->mempool =
42208 __vxge_hw_mempool_create(vpath->hldev,
42209 fifo->config->memblock_size,
42210diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
42211index 5e7fb1d..f8d1810 100644
42212--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
42213+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
42214@@ -1948,7 +1948,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
42215 op_mode = QLC_83XX_DEFAULT_OPMODE;
42216
42217 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
42218- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
42219+ pax_open_kernel();
42220+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
42221+ pax_close_kernel();
42222 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
42223 } else {
42224 return -EIO;
42225diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
42226index b0c3de9..fc5857e 100644
42227--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
42228+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
42229@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
42230 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
42231 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
42232 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
42233- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
42234+ pax_open_kernel();
42235+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
42236+ pax_close_kernel();
42237 } else if (priv_level == QLCNIC_PRIV_FUNC) {
42238 ahw->op_mode = QLCNIC_PRIV_FUNC;
42239 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
42240- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
42241+ pax_open_kernel();
42242+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
42243+ pax_close_kernel();
42244 } else if (priv_level == QLCNIC_MGMT_FUNC) {
42245 ahw->op_mode = QLCNIC_MGMT_FUNC;
42246 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
42247- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
42248+ pax_open_kernel();
42249+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
42250+ pax_close_kernel();
42251 } else {
42252 return -EIO;
42253 }
42254diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
42255index 6acf82b..14b097e 100644
42256--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
42257+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
42258@@ -206,10 +206,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
42259 if (err) {
42260 dev_info(&adapter->pdev->dev,
42261 "Failed to set driver version in firmware\n");
42262- return -EIO;
42263+ err = -EIO;
42264 }
42265-
42266- return 0;
42267+ qlcnic_free_mbx_args(&cmd);
42268+ return err;
42269 }
42270
42271 int
42272diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
42273index d3f8797..82a03d3 100644
42274--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
42275+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
42276@@ -262,7 +262,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
42277
42278 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
42279 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
42280- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
42281+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
42282
42283 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
42284 vlan_req->vlan_id = cpu_to_le16(vlan_id);
42285diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
42286index 887aebe..9095ff9 100644
42287--- a/drivers/net/ethernet/realtek/8139cp.c
42288+++ b/drivers/net/ethernet/realtek/8139cp.c
42289@@ -524,6 +524,7 @@ rx_status_loop:
42290 PCI_DMA_FROMDEVICE);
42291 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
42292 dev->stats.rx_dropped++;
42293+ kfree_skb(new_skb);
42294 goto rx_next;
42295 }
42296
42297diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
42298index 393f961..d343034 100644
42299--- a/drivers/net/ethernet/realtek/r8169.c
42300+++ b/drivers/net/ethernet/realtek/r8169.c
42301@@ -753,22 +753,22 @@ struct rtl8169_private {
42302 struct mdio_ops {
42303 void (*write)(struct rtl8169_private *, int, int);
42304 int (*read)(struct rtl8169_private *, int);
42305- } mdio_ops;
42306+ } __no_const mdio_ops;
42307
42308 struct pll_power_ops {
42309 void (*down)(struct rtl8169_private *);
42310 void (*up)(struct rtl8169_private *);
42311- } pll_power_ops;
42312+ } __no_const pll_power_ops;
42313
42314 struct jumbo_ops {
42315 void (*enable)(struct rtl8169_private *);
42316 void (*disable)(struct rtl8169_private *);
42317- } jumbo_ops;
42318+ } __no_const jumbo_ops;
42319
42320 struct csi_ops {
42321 void (*write)(struct rtl8169_private *, int, int);
42322 u32 (*read)(struct rtl8169_private *, int);
42323- } csi_ops;
42324+ } __no_const csi_ops;
42325
42326 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
42327 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
42328diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
42329index 9a95abf..36df7f9 100644
42330--- a/drivers/net/ethernet/sfc/ptp.c
42331+++ b/drivers/net/ethernet/sfc/ptp.c
42332@@ -535,7 +535,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
42333 (u32)((u64)ptp->start.dma_addr >> 32));
42334
42335 /* Clear flag that signals MC ready */
42336- ACCESS_ONCE(*start) = 0;
42337+ ACCESS_ONCE_RW(*start) = 0;
42338 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
42339 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
42340
42341diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
42342index 50617c5..b13724c 100644
42343--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
42344+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
42345@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
42346
42347 writel(value, ioaddr + MMC_CNTRL);
42348
42349- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
42350- MMC_CNTRL, value);
42351+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
42352+// MMC_CNTRL, value);
42353 }
42354
42355 /* To mask all all interrupts.*/
42356diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
42357index e6fe0d8..2b7d752 100644
42358--- a/drivers/net/hyperv/hyperv_net.h
42359+++ b/drivers/net/hyperv/hyperv_net.h
42360@@ -101,7 +101,7 @@ struct rndis_device {
42361
42362 enum rndis_device_state state;
42363 bool link_state;
42364- atomic_t new_req_id;
42365+ atomic_unchecked_t new_req_id;
42366
42367 spinlock_t request_lock;
42368 struct list_head req_list;
42369diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
42370index 0775f0a..d4fb316 100644
42371--- a/drivers/net/hyperv/rndis_filter.c
42372+++ b/drivers/net/hyperv/rndis_filter.c
42373@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
42374 * template
42375 */
42376 set = &rndis_msg->msg.set_req;
42377- set->req_id = atomic_inc_return(&dev->new_req_id);
42378+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
42379
42380 /* Add to the request list */
42381 spin_lock_irqsave(&dev->request_lock, flags);
42382@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
42383
42384 /* Setup the rndis set */
42385 halt = &request->request_msg.msg.halt_req;
42386- halt->req_id = atomic_inc_return(&dev->new_req_id);
42387+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
42388
42389 /* Ignore return since this msg is optional. */
42390 rndis_filter_send_request(dev, request);
42391diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
42392index bf0d55e..82bcfbd1 100644
42393--- a/drivers/net/ieee802154/fakehard.c
42394+++ b/drivers/net/ieee802154/fakehard.c
42395@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
42396 phy->transmit_power = 0xbf;
42397
42398 dev->netdev_ops = &fake_ops;
42399- dev->ml_priv = &fake_mlme;
42400+ dev->ml_priv = (void *)&fake_mlme;
42401
42402 priv = netdev_priv(dev);
42403 priv->phy = phy;
42404diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
42405index 6e91931..2b0ebe7 100644
42406--- a/drivers/net/macvlan.c
42407+++ b/drivers/net/macvlan.c
42408@@ -905,13 +905,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
42409 int macvlan_link_register(struct rtnl_link_ops *ops)
42410 {
42411 /* common fields */
42412- ops->priv_size = sizeof(struct macvlan_dev);
42413- ops->validate = macvlan_validate;
42414- ops->maxtype = IFLA_MACVLAN_MAX;
42415- ops->policy = macvlan_policy;
42416- ops->changelink = macvlan_changelink;
42417- ops->get_size = macvlan_get_size;
42418- ops->fill_info = macvlan_fill_info;
42419+ pax_open_kernel();
42420+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
42421+ *(void **)&ops->validate = macvlan_validate;
42422+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
42423+ *(const void **)&ops->policy = macvlan_policy;
42424+ *(void **)&ops->changelink = macvlan_changelink;
42425+ *(void **)&ops->get_size = macvlan_get_size;
42426+ *(void **)&ops->fill_info = macvlan_fill_info;
42427+ pax_close_kernel();
42428
42429 return rtnl_link_register(ops);
42430 };
42431@@ -967,7 +969,7 @@ static int macvlan_device_event(struct notifier_block *unused,
42432 return NOTIFY_DONE;
42433 }
42434
42435-static struct notifier_block macvlan_notifier_block __read_mostly = {
42436+static struct notifier_block macvlan_notifier_block = {
42437 .notifier_call = macvlan_device_event,
42438 };
42439
42440diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
42441index 523d6b2..5e16aa1 100644
42442--- a/drivers/net/macvtap.c
42443+++ b/drivers/net/macvtap.c
42444@@ -1110,7 +1110,7 @@ static int macvtap_device_event(struct notifier_block *unused,
42445 return NOTIFY_DONE;
42446 }
42447
42448-static struct notifier_block macvtap_notifier_block __read_mostly = {
42449+static struct notifier_block macvtap_notifier_block = {
42450 .notifier_call = macvtap_device_event,
42451 };
42452
42453diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
42454index daec9b0..6428fcb 100644
42455--- a/drivers/net/phy/mdio-bitbang.c
42456+++ b/drivers/net/phy/mdio-bitbang.c
42457@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
42458 struct mdiobb_ctrl *ctrl = bus->priv;
42459
42460 module_put(ctrl->ops->owner);
42461+ mdiobus_unregister(bus);
42462 mdiobus_free(bus);
42463 }
42464 EXPORT_SYMBOL(free_mdio_bitbang);
42465diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
42466index 72ff14b..11d442d 100644
42467--- a/drivers/net/ppp/ppp_generic.c
42468+++ b/drivers/net/ppp/ppp_generic.c
42469@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42470 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
42471 struct ppp_stats stats;
42472 struct ppp_comp_stats cstats;
42473- char *vers;
42474
42475 switch (cmd) {
42476 case SIOCGPPPSTATS:
42477@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42478 break;
42479
42480 case SIOCGPPPVER:
42481- vers = PPP_VERSION;
42482- if (copy_to_user(addr, vers, strlen(vers) + 1))
42483+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
42484 break;
42485 err = 0;
42486 break;
42487diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
42488index 1252d9c..80e660b 100644
42489--- a/drivers/net/slip/slhc.c
42490+++ b/drivers/net/slip/slhc.c
42491@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
42492 register struct tcphdr *thp;
42493 register struct iphdr *ip;
42494 register struct cstate *cs;
42495- int len, hdrlen;
42496+ long len, hdrlen;
42497 unsigned char *cp = icp;
42498
42499 /* We've got a compressed packet; read the change byte */
42500diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
42501index b305105..8ead6df 100644
42502--- a/drivers/net/team/team.c
42503+++ b/drivers/net/team/team.c
42504@@ -2682,7 +2682,7 @@ static int team_device_event(struct notifier_block *unused,
42505 return NOTIFY_DONE;
42506 }
42507
42508-static struct notifier_block team_notifier_block __read_mostly = {
42509+static struct notifier_block team_notifier_block = {
42510 .notifier_call = team_device_event,
42511 };
42512
42513diff --git a/drivers/net/tun.c b/drivers/net/tun.c
42514index 2491eb2..1a453eb 100644
42515--- a/drivers/net/tun.c
42516+++ b/drivers/net/tun.c
42517@@ -1076,8 +1076,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
42518 u32 rxhash;
42519
42520 if (!(tun->flags & TUN_NO_PI)) {
42521- if ((len -= sizeof(pi)) > total_len)
42522+ if (len < sizeof(pi))
42523 return -EINVAL;
42524+ len -= sizeof(pi);
42525
42526 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
42527 return -EFAULT;
42528@@ -1085,8 +1086,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
42529 }
42530
42531 if (tun->flags & TUN_VNET_HDR) {
42532- if ((len -= tun->vnet_hdr_sz) > total_len)
42533+ if (len < tun->vnet_hdr_sz)
42534 return -EINVAL;
42535+ len -= tun->vnet_hdr_sz;
42536
42537 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
42538 return -EFAULT;
42539@@ -1869,7 +1871,7 @@ unlock:
42540 }
42541
42542 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
42543- unsigned long arg, int ifreq_len)
42544+ unsigned long arg, size_t ifreq_len)
42545 {
42546 struct tun_file *tfile = file->private_data;
42547 struct tun_struct *tun;
42548@@ -1881,6 +1883,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
42549 int vnet_hdr_sz;
42550 int ret;
42551
42552+ if (ifreq_len > sizeof ifr)
42553+ return -EFAULT;
42554+
42555 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
42556 if (copy_from_user(&ifr, argp, ifreq_len))
42557 return -EFAULT;
42558diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
42559index cba1d46..f703766 100644
42560--- a/drivers/net/usb/hso.c
42561+++ b/drivers/net/usb/hso.c
42562@@ -71,7 +71,7 @@
42563 #include <asm/byteorder.h>
42564 #include <linux/serial_core.h>
42565 #include <linux/serial.h>
42566-
42567+#include <asm/local.h>
42568
42569 #define MOD_AUTHOR "Option Wireless"
42570 #define MOD_DESCRIPTION "USB High Speed Option driver"
42571@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
42572 struct urb *urb;
42573
42574 urb = serial->rx_urb[0];
42575- if (serial->port.count > 0) {
42576+ if (atomic_read(&serial->port.count) > 0) {
42577 count = put_rxbuf_data(urb, serial);
42578 if (count == -1)
42579 return;
42580@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
42581 DUMP1(urb->transfer_buffer, urb->actual_length);
42582
42583 /* Anyone listening? */
42584- if (serial->port.count == 0)
42585+ if (atomic_read(&serial->port.count) == 0)
42586 return;
42587
42588 if (status == 0) {
42589@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
42590 tty_port_tty_set(&serial->port, tty);
42591
42592 /* check for port already opened, if not set the termios */
42593- serial->port.count++;
42594- if (serial->port.count == 1) {
42595+ if (atomic_inc_return(&serial->port.count) == 1) {
42596 serial->rx_state = RX_IDLE;
42597 /* Force default termio settings */
42598 _hso_serial_set_termios(tty, NULL);
42599@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
42600 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
42601 if (result) {
42602 hso_stop_serial_device(serial->parent);
42603- serial->port.count--;
42604+ atomic_dec(&serial->port.count);
42605 kref_put(&serial->parent->ref, hso_serial_ref_free);
42606 }
42607 } else {
42608@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
42609
42610 /* reset the rts and dtr */
42611 /* do the actual close */
42612- serial->port.count--;
42613+ atomic_dec(&serial->port.count);
42614
42615- if (serial->port.count <= 0) {
42616- serial->port.count = 0;
42617+ if (atomic_read(&serial->port.count) <= 0) {
42618+ atomic_set(&serial->port.count, 0);
42619 tty_port_tty_set(&serial->port, NULL);
42620 if (!usb_gone)
42621 hso_stop_serial_device(serial->parent);
42622@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
42623
42624 /* the actual setup */
42625 spin_lock_irqsave(&serial->serial_lock, flags);
42626- if (serial->port.count)
42627+ if (atomic_read(&serial->port.count))
42628 _hso_serial_set_termios(tty, old);
42629 else
42630 tty->termios = *old;
42631@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
42632 D1("Pending read interrupt on port %d\n", i);
42633 spin_lock(&serial->serial_lock);
42634 if (serial->rx_state == RX_IDLE &&
42635- serial->port.count > 0) {
42636+ atomic_read(&serial->port.count) > 0) {
42637 /* Setup and send a ctrl req read on
42638 * port i */
42639 if (!serial->rx_urb_filled[0]) {
42640@@ -3057,7 +3056,7 @@ static int hso_resume(struct usb_interface *iface)
42641 /* Start all serial ports */
42642 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
42643 if (serial_table[i] && (serial_table[i]->interface == iface)) {
42644- if (dev2ser(serial_table[i])->port.count) {
42645+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
42646 result =
42647 hso_start_serial_device(serial_table[i], GFP_NOIO);
42648 hso_kick_transmit(dev2ser(serial_table[i]));
42649diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
42650index 57325f3..36b181f 100644
42651--- a/drivers/net/vxlan.c
42652+++ b/drivers/net/vxlan.c
42653@@ -1579,7 +1579,7 @@ nla_put_failure:
42654 return -EMSGSIZE;
42655 }
42656
42657-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
42658+static struct rtnl_link_ops vxlan_link_ops = {
42659 .kind = "vxlan",
42660 .maxtype = IFLA_VXLAN_MAX,
42661 .policy = vxlan_policy,
42662diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
42663index 34c8a33..3261fdc 100644
42664--- a/drivers/net/wireless/at76c50x-usb.c
42665+++ b/drivers/net/wireless/at76c50x-usb.c
42666@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
42667 }
42668
42669 /* Convert timeout from the DFU status to jiffies */
42670-static inline unsigned long at76_get_timeout(struct dfu_status *s)
42671+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
42672 {
42673 return msecs_to_jiffies((s->poll_timeout[2] << 16)
42674 | (s->poll_timeout[1] << 8)
42675diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
42676index 8d78253..bebbb68 100644
42677--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
42678+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
42679@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42680 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
42681 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
42682
42683- ACCESS_ONCE(ads->ds_link) = i->link;
42684- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
42685+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
42686+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
42687
42688 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
42689 ctl6 = SM(i->keytype, AR_EncrType);
42690@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42691
42692 if ((i->is_first || i->is_last) &&
42693 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
42694- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
42695+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
42696 | set11nTries(i->rates, 1)
42697 | set11nTries(i->rates, 2)
42698 | set11nTries(i->rates, 3)
42699 | (i->dur_update ? AR_DurUpdateEna : 0)
42700 | SM(0, AR_BurstDur);
42701
42702- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
42703+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
42704 | set11nRate(i->rates, 1)
42705 | set11nRate(i->rates, 2)
42706 | set11nRate(i->rates, 3);
42707 } else {
42708- ACCESS_ONCE(ads->ds_ctl2) = 0;
42709- ACCESS_ONCE(ads->ds_ctl3) = 0;
42710+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
42711+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
42712 }
42713
42714 if (!i->is_first) {
42715- ACCESS_ONCE(ads->ds_ctl0) = 0;
42716- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
42717- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
42718+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
42719+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
42720+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
42721 return;
42722 }
42723
42724@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42725 break;
42726 }
42727
42728- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
42729+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
42730 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
42731 | SM(i->txpower, AR_XmitPower)
42732 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
42733@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42734 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
42735 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
42736
42737- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
42738- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
42739+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
42740+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
42741
42742 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
42743 return;
42744
42745- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
42746+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
42747 | set11nPktDurRTSCTS(i->rates, 1);
42748
42749- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
42750+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
42751 | set11nPktDurRTSCTS(i->rates, 3);
42752
42753- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
42754+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
42755 | set11nRateFlags(i->rates, 1)
42756 | set11nRateFlags(i->rates, 2)
42757 | set11nRateFlags(i->rates, 3)
42758diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
42759index 301bf72..3f5654f 100644
42760--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
42761+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
42762@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42763 (i->qcu << AR_TxQcuNum_S) | desc_len;
42764
42765 checksum += val;
42766- ACCESS_ONCE(ads->info) = val;
42767+ ACCESS_ONCE_RW(ads->info) = val;
42768
42769 checksum += i->link;
42770- ACCESS_ONCE(ads->link) = i->link;
42771+ ACCESS_ONCE_RW(ads->link) = i->link;
42772
42773 checksum += i->buf_addr[0];
42774- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
42775+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
42776 checksum += i->buf_addr[1];
42777- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
42778+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
42779 checksum += i->buf_addr[2];
42780- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
42781+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
42782 checksum += i->buf_addr[3];
42783- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
42784+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
42785
42786 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
42787- ACCESS_ONCE(ads->ctl3) = val;
42788+ ACCESS_ONCE_RW(ads->ctl3) = val;
42789 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
42790- ACCESS_ONCE(ads->ctl5) = val;
42791+ ACCESS_ONCE_RW(ads->ctl5) = val;
42792 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
42793- ACCESS_ONCE(ads->ctl7) = val;
42794+ ACCESS_ONCE_RW(ads->ctl7) = val;
42795 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
42796- ACCESS_ONCE(ads->ctl9) = val;
42797+ ACCESS_ONCE_RW(ads->ctl9) = val;
42798
42799 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
42800- ACCESS_ONCE(ads->ctl10) = checksum;
42801+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
42802
42803 if (i->is_first || i->is_last) {
42804- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
42805+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
42806 | set11nTries(i->rates, 1)
42807 | set11nTries(i->rates, 2)
42808 | set11nTries(i->rates, 3)
42809 | (i->dur_update ? AR_DurUpdateEna : 0)
42810 | SM(0, AR_BurstDur);
42811
42812- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
42813+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
42814 | set11nRate(i->rates, 1)
42815 | set11nRate(i->rates, 2)
42816 | set11nRate(i->rates, 3);
42817 } else {
42818- ACCESS_ONCE(ads->ctl13) = 0;
42819- ACCESS_ONCE(ads->ctl14) = 0;
42820+ ACCESS_ONCE_RW(ads->ctl13) = 0;
42821+ ACCESS_ONCE_RW(ads->ctl14) = 0;
42822 }
42823
42824 ads->ctl20 = 0;
42825@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42826
42827 ctl17 = SM(i->keytype, AR_EncrType);
42828 if (!i->is_first) {
42829- ACCESS_ONCE(ads->ctl11) = 0;
42830- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
42831- ACCESS_ONCE(ads->ctl15) = 0;
42832- ACCESS_ONCE(ads->ctl16) = 0;
42833- ACCESS_ONCE(ads->ctl17) = ctl17;
42834- ACCESS_ONCE(ads->ctl18) = 0;
42835- ACCESS_ONCE(ads->ctl19) = 0;
42836+ ACCESS_ONCE_RW(ads->ctl11) = 0;
42837+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
42838+ ACCESS_ONCE_RW(ads->ctl15) = 0;
42839+ ACCESS_ONCE_RW(ads->ctl16) = 0;
42840+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
42841+ ACCESS_ONCE_RW(ads->ctl18) = 0;
42842+ ACCESS_ONCE_RW(ads->ctl19) = 0;
42843 return;
42844 }
42845
42846- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
42847+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
42848 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
42849 | SM(i->txpower, AR_XmitPower)
42850 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
42851@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
42852 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
42853 ctl12 |= SM(val, AR_PAPRDChainMask);
42854
42855- ACCESS_ONCE(ads->ctl12) = ctl12;
42856- ACCESS_ONCE(ads->ctl17) = ctl17;
42857+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
42858+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
42859
42860- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
42861+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
42862 | set11nPktDurRTSCTS(i->rates, 1);
42863
42864- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
42865+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
42866 | set11nPktDurRTSCTS(i->rates, 3);
42867
42868- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
42869+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
42870 | set11nRateFlags(i->rates, 1)
42871 | set11nRateFlags(i->rates, 2)
42872 | set11nRateFlags(i->rates, 3)
42873 | SM(i->rtscts_rate, AR_RTSCTSRate);
42874
42875- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
42876+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
42877 }
42878
42879 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
42880diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
42881index ae30343..a117806 100644
42882--- a/drivers/net/wireless/ath/ath9k/hw.h
42883+++ b/drivers/net/wireless/ath/ath9k/hw.h
42884@@ -652,7 +652,7 @@ struct ath_hw_private_ops {
42885
42886 /* ANI */
42887 void (*ani_cache_ini_regs)(struct ath_hw *ah);
42888-};
42889+} __no_const;
42890
42891 /**
42892 * struct ath_spec_scan - parameters for Atheros spectral scan
42893@@ -721,7 +721,7 @@ struct ath_hw_ops {
42894 struct ath_spec_scan *param);
42895 void (*spectral_scan_trigger)(struct ath_hw *ah);
42896 void (*spectral_scan_wait)(struct ath_hw *ah);
42897-};
42898+} __no_const;
42899
42900 struct ath_nf_limits {
42901 s16 max;
42902diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
42903index b37a582..680835d 100644
42904--- a/drivers/net/wireless/iwlegacy/3945-mac.c
42905+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
42906@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
42907 */
42908 if (il3945_mod_params.disable_hw_scan) {
42909 D_INFO("Disabling hw_scan\n");
42910- il3945_mac_ops.hw_scan = NULL;
42911+ pax_open_kernel();
42912+ *(void **)&il3945_mac_ops.hw_scan = NULL;
42913+ pax_close_kernel();
42914 }
42915
42916 D_INFO("*** LOAD DRIVER ***\n");
42917diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
42918index d532948..e0d8bb1 100644
42919--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
42920+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
42921@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
42922 {
42923 struct iwl_priv *priv = file->private_data;
42924 char buf[64];
42925- int buf_size;
42926+ size_t buf_size;
42927 u32 offset, len;
42928
42929 memset(buf, 0, sizeof(buf));
42930@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
42931 struct iwl_priv *priv = file->private_data;
42932
42933 char buf[8];
42934- int buf_size;
42935+ size_t buf_size;
42936 u32 reset_flag;
42937
42938 memset(buf, 0, sizeof(buf));
42939@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
42940 {
42941 struct iwl_priv *priv = file->private_data;
42942 char buf[8];
42943- int buf_size;
42944+ size_t buf_size;
42945 int ht40;
42946
42947 memset(buf, 0, sizeof(buf));
42948@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
42949 {
42950 struct iwl_priv *priv = file->private_data;
42951 char buf[8];
42952- int buf_size;
42953+ size_t buf_size;
42954 int value;
42955
42956 memset(buf, 0, sizeof(buf));
42957@@ -698,10 +698,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
42958 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
42959 DEBUGFS_READ_FILE_OPS(current_sleep_command);
42960
42961-static const char *fmt_value = " %-30s %10u\n";
42962-static const char *fmt_hex = " %-30s 0x%02X\n";
42963-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
42964-static const char *fmt_header =
42965+static const char fmt_value[] = " %-30s %10u\n";
42966+static const char fmt_hex[] = " %-30s 0x%02X\n";
42967+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
42968+static const char fmt_header[] =
42969 "%-32s current cumulative delta max\n";
42970
42971 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
42972@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
42973 {
42974 struct iwl_priv *priv = file->private_data;
42975 char buf[8];
42976- int buf_size;
42977+ size_t buf_size;
42978 int clear;
42979
42980 memset(buf, 0, sizeof(buf));
42981@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
42982 {
42983 struct iwl_priv *priv = file->private_data;
42984 char buf[8];
42985- int buf_size;
42986+ size_t buf_size;
42987 int trace;
42988
42989 memset(buf, 0, sizeof(buf));
42990@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
42991 {
42992 struct iwl_priv *priv = file->private_data;
42993 char buf[8];
42994- int buf_size;
42995+ size_t buf_size;
42996 int missed;
42997
42998 memset(buf, 0, sizeof(buf));
42999@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
43000
43001 struct iwl_priv *priv = file->private_data;
43002 char buf[8];
43003- int buf_size;
43004+ size_t buf_size;
43005 int plcp;
43006
43007 memset(buf, 0, sizeof(buf));
43008@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
43009
43010 struct iwl_priv *priv = file->private_data;
43011 char buf[8];
43012- int buf_size;
43013+ size_t buf_size;
43014 int flush;
43015
43016 memset(buf, 0, sizeof(buf));
43017@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
43018
43019 struct iwl_priv *priv = file->private_data;
43020 char buf[8];
43021- int buf_size;
43022+ size_t buf_size;
43023 int rts;
43024
43025 if (!priv->cfg->ht_params)
43026@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
43027 {
43028 struct iwl_priv *priv = file->private_data;
43029 char buf[8];
43030- int buf_size;
43031+ size_t buf_size;
43032
43033 memset(buf, 0, sizeof(buf));
43034 buf_size = min(count, sizeof(buf) - 1);
43035@@ -2254,7 +2254,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
43036 struct iwl_priv *priv = file->private_data;
43037 u32 event_log_flag;
43038 char buf[8];
43039- int buf_size;
43040+ size_t buf_size;
43041
43042 /* check that the interface is up */
43043 if (!iwl_is_ready(priv))
43044@@ -2308,7 +2308,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
43045 struct iwl_priv *priv = file->private_data;
43046 char buf[8];
43047 u32 calib_disabled;
43048- int buf_size;
43049+ size_t buf_size;
43050
43051 memset(buf, 0, sizeof(buf));
43052 buf_size = min(count, sizeof(buf) - 1);
43053diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
43054index 50ba0a4..29424e7 100644
43055--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
43056+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
43057@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
43058 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
43059
43060 char buf[8];
43061- int buf_size;
43062+ size_t buf_size;
43063 u32 reset_flag;
43064
43065 memset(buf, 0, sizeof(buf));
43066@@ -1350,7 +1350,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
43067 {
43068 struct iwl_trans *trans = file->private_data;
43069 char buf[8];
43070- int buf_size;
43071+ size_t buf_size;
43072 int csr;
43073
43074 memset(buf, 0, sizeof(buf));
43075diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
43076index cb34c78..9fec0dc 100644
43077--- a/drivers/net/wireless/mac80211_hwsim.c
43078+++ b/drivers/net/wireless/mac80211_hwsim.c
43079@@ -2195,25 +2195,19 @@ static int __init init_mac80211_hwsim(void)
43080
43081 if (channels > 1) {
43082 hwsim_if_comb.num_different_channels = channels;
43083- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
43084- mac80211_hwsim_ops.cancel_hw_scan =
43085- mac80211_hwsim_cancel_hw_scan;
43086- mac80211_hwsim_ops.sw_scan_start = NULL;
43087- mac80211_hwsim_ops.sw_scan_complete = NULL;
43088- mac80211_hwsim_ops.remain_on_channel =
43089- mac80211_hwsim_roc;
43090- mac80211_hwsim_ops.cancel_remain_on_channel =
43091- mac80211_hwsim_croc;
43092- mac80211_hwsim_ops.add_chanctx =
43093- mac80211_hwsim_add_chanctx;
43094- mac80211_hwsim_ops.remove_chanctx =
43095- mac80211_hwsim_remove_chanctx;
43096- mac80211_hwsim_ops.change_chanctx =
43097- mac80211_hwsim_change_chanctx;
43098- mac80211_hwsim_ops.assign_vif_chanctx =
43099- mac80211_hwsim_assign_vif_chanctx;
43100- mac80211_hwsim_ops.unassign_vif_chanctx =
43101- mac80211_hwsim_unassign_vif_chanctx;
43102+ pax_open_kernel();
43103+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
43104+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
43105+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
43106+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
43107+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
43108+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
43109+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
43110+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
43111+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
43112+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
43113+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
43114+ pax_close_kernel();
43115 }
43116
43117 spin_lock_init(&hwsim_radio_lock);
43118diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
43119index 8169a85..7fa3b47 100644
43120--- a/drivers/net/wireless/rndis_wlan.c
43121+++ b/drivers/net/wireless/rndis_wlan.c
43122@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
43123
43124 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
43125
43126- if (rts_threshold < 0 || rts_threshold > 2347)
43127+ if (rts_threshold > 2347)
43128 rts_threshold = 2347;
43129
43130 tmp = cpu_to_le32(rts_threshold);
43131diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
43132index 7510723..5ba37f5 100644
43133--- a/drivers/net/wireless/rt2x00/rt2x00.h
43134+++ b/drivers/net/wireless/rt2x00/rt2x00.h
43135@@ -386,7 +386,7 @@ struct rt2x00_intf {
43136 * for hardware which doesn't support hardware
43137 * sequence counting.
43138 */
43139- atomic_t seqno;
43140+ atomic_unchecked_t seqno;
43141 };
43142
43143 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
43144diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
43145index d955741..8730748 100644
43146--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
43147+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
43148@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
43149 * sequence counter given by mac80211.
43150 */
43151 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
43152- seqno = atomic_add_return(0x10, &intf->seqno);
43153+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
43154 else
43155- seqno = atomic_read(&intf->seqno);
43156+ seqno = atomic_read_unchecked(&intf->seqno);
43157
43158 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
43159 hdr->seq_ctrl |= cpu_to_le16(seqno);
43160diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
43161index e2b3d9c..67a5184 100644
43162--- a/drivers/net/wireless/ti/wl1251/sdio.c
43163+++ b/drivers/net/wireless/ti/wl1251/sdio.c
43164@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
43165
43166 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
43167
43168- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
43169- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
43170+ pax_open_kernel();
43171+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
43172+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
43173+ pax_close_kernel();
43174
43175 wl1251_info("using dedicated interrupt line");
43176 } else {
43177- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
43178- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
43179+ pax_open_kernel();
43180+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
43181+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
43182+ pax_close_kernel();
43183
43184 wl1251_info("using SDIO interrupt");
43185 }
43186diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
43187index 1c627da..69f7d17 100644
43188--- a/drivers/net/wireless/ti/wl12xx/main.c
43189+++ b/drivers/net/wireless/ti/wl12xx/main.c
43190@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
43191 sizeof(wl->conf.mem));
43192
43193 /* read data preparation is only needed by wl127x */
43194- wl->ops->prepare_read = wl127x_prepare_read;
43195+ pax_open_kernel();
43196+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
43197+ pax_close_kernel();
43198
43199 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
43200 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
43201@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
43202 sizeof(wl->conf.mem));
43203
43204 /* read data preparation is only needed by wl127x */
43205- wl->ops->prepare_read = wl127x_prepare_read;
43206+ pax_open_kernel();
43207+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
43208+ pax_close_kernel();
43209
43210 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
43211 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
43212diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
43213index 9fa692d..b31fee0 100644
43214--- a/drivers/net/wireless/ti/wl18xx/main.c
43215+++ b/drivers/net/wireless/ti/wl18xx/main.c
43216@@ -1687,8 +1687,10 @@ static int wl18xx_setup(struct wl1271 *wl)
43217 }
43218
43219 if (!checksum_param) {
43220- wl18xx_ops.set_rx_csum = NULL;
43221- wl18xx_ops.init_vif = NULL;
43222+ pax_open_kernel();
43223+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
43224+ *(void **)&wl18xx_ops.init_vif = NULL;
43225+ pax_close_kernel();
43226 }
43227
43228 /* Enable 11a Band only if we have 5G antennas */
43229diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
43230index 7ef0b4a..ff65c28 100644
43231--- a/drivers/net/wireless/zd1211rw/zd_usb.c
43232+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
43233@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
43234 {
43235 struct zd_usb *usb = urb->context;
43236 struct zd_usb_interrupt *intr = &usb->intr;
43237- int len;
43238+ unsigned int len;
43239 u16 int_num;
43240
43241 ZD_ASSERT(in_interrupt());
43242diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
43243index d93b2b6..ae50401 100644
43244--- a/drivers/oprofile/buffer_sync.c
43245+++ b/drivers/oprofile/buffer_sync.c
43246@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
43247 if (cookie == NO_COOKIE)
43248 offset = pc;
43249 if (cookie == INVALID_COOKIE) {
43250- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
43251+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
43252 offset = pc;
43253 }
43254 if (cookie != last_cookie) {
43255@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
43256 /* add userspace sample */
43257
43258 if (!mm) {
43259- atomic_inc(&oprofile_stats.sample_lost_no_mm);
43260+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
43261 return 0;
43262 }
43263
43264 cookie = lookup_dcookie(mm, s->eip, &offset);
43265
43266 if (cookie == INVALID_COOKIE) {
43267- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
43268+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
43269 return 0;
43270 }
43271
43272@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
43273 /* ignore backtraces if failed to add a sample */
43274 if (state == sb_bt_start) {
43275 state = sb_bt_ignore;
43276- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
43277+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
43278 }
43279 }
43280 release_mm(mm);
43281diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
43282index c0cc4e7..44d4e54 100644
43283--- a/drivers/oprofile/event_buffer.c
43284+++ b/drivers/oprofile/event_buffer.c
43285@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
43286 }
43287
43288 if (buffer_pos == buffer_size) {
43289- atomic_inc(&oprofile_stats.event_lost_overflow);
43290+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
43291 return;
43292 }
43293
43294diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
43295index ed2c3ec..deda85a 100644
43296--- a/drivers/oprofile/oprof.c
43297+++ b/drivers/oprofile/oprof.c
43298@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
43299 if (oprofile_ops.switch_events())
43300 return;
43301
43302- atomic_inc(&oprofile_stats.multiplex_counter);
43303+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
43304 start_switch_worker();
43305 }
43306
43307diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
43308index 84a208d..d61b0a1 100644
43309--- a/drivers/oprofile/oprofile_files.c
43310+++ b/drivers/oprofile/oprofile_files.c
43311@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
43312
43313 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
43314
43315-static ssize_t timeout_read(struct file *file, char __user *buf,
43316+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
43317 size_t count, loff_t *offset)
43318 {
43319 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
43320diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
43321index 917d28e..d62d981 100644
43322--- a/drivers/oprofile/oprofile_stats.c
43323+++ b/drivers/oprofile/oprofile_stats.c
43324@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
43325 cpu_buf->sample_invalid_eip = 0;
43326 }
43327
43328- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
43329- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
43330- atomic_set(&oprofile_stats.event_lost_overflow, 0);
43331- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
43332- atomic_set(&oprofile_stats.multiplex_counter, 0);
43333+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
43334+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
43335+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
43336+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
43337+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
43338 }
43339
43340
43341diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
43342index 38b6fc0..b5cbfce 100644
43343--- a/drivers/oprofile/oprofile_stats.h
43344+++ b/drivers/oprofile/oprofile_stats.h
43345@@ -13,11 +13,11 @@
43346 #include <linux/atomic.h>
43347
43348 struct oprofile_stat_struct {
43349- atomic_t sample_lost_no_mm;
43350- atomic_t sample_lost_no_mapping;
43351- atomic_t bt_lost_no_mapping;
43352- atomic_t event_lost_overflow;
43353- atomic_t multiplex_counter;
43354+ atomic_unchecked_t sample_lost_no_mm;
43355+ atomic_unchecked_t sample_lost_no_mapping;
43356+ atomic_unchecked_t bt_lost_no_mapping;
43357+ atomic_unchecked_t event_lost_overflow;
43358+ atomic_unchecked_t multiplex_counter;
43359 };
43360
43361 extern struct oprofile_stat_struct oprofile_stats;
43362diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
43363index 7c12d9c..558bf3bb 100644
43364--- a/drivers/oprofile/oprofilefs.c
43365+++ b/drivers/oprofile/oprofilefs.c
43366@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
43367
43368
43369 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
43370- char const *name, atomic_t *val)
43371+ char const *name, atomic_unchecked_t *val)
43372 {
43373 return __oprofilefs_create_file(sb, root, name,
43374 &atomic_ro_fops, 0444, val);
43375diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
43376index 93404f7..4a313d8 100644
43377--- a/drivers/oprofile/timer_int.c
43378+++ b/drivers/oprofile/timer_int.c
43379@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
43380 return NOTIFY_OK;
43381 }
43382
43383-static struct notifier_block __refdata oprofile_cpu_notifier = {
43384+static struct notifier_block oprofile_cpu_notifier = {
43385 .notifier_call = oprofile_cpu_notify,
43386 };
43387
43388diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
43389index 92ed045..62d39bd7 100644
43390--- a/drivers/parport/procfs.c
43391+++ b/drivers/parport/procfs.c
43392@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
43393
43394 *ppos += len;
43395
43396- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
43397+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
43398 }
43399
43400 #ifdef CONFIG_PARPORT_1284
43401@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
43402
43403 *ppos += len;
43404
43405- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
43406+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
43407 }
43408 #endif /* IEEE1284.3 support. */
43409
43410diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
43411index c35e8ad..fc33beb 100644
43412--- a/drivers/pci/hotplug/acpiphp_ibm.c
43413+++ b/drivers/pci/hotplug/acpiphp_ibm.c
43414@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
43415 goto init_cleanup;
43416 }
43417
43418- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
43419+ pax_open_kernel();
43420+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
43421+ pax_close_kernel();
43422 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
43423
43424 return retval;
43425diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
43426index a6a71c4..c91097b 100644
43427--- a/drivers/pci/hotplug/cpcihp_generic.c
43428+++ b/drivers/pci/hotplug/cpcihp_generic.c
43429@@ -73,7 +73,6 @@ static u16 port;
43430 static unsigned int enum_bit;
43431 static u8 enum_mask;
43432
43433-static struct cpci_hp_controller_ops generic_hpc_ops;
43434 static struct cpci_hp_controller generic_hpc;
43435
43436 static int __init validate_parameters(void)
43437@@ -139,6 +138,10 @@ static int query_enum(void)
43438 return ((value & enum_mask) == enum_mask);
43439 }
43440
43441+static struct cpci_hp_controller_ops generic_hpc_ops = {
43442+ .query_enum = query_enum,
43443+};
43444+
43445 static int __init cpcihp_generic_init(void)
43446 {
43447 int status;
43448@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
43449 pci_dev_put(dev);
43450
43451 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
43452- generic_hpc_ops.query_enum = query_enum;
43453 generic_hpc.ops = &generic_hpc_ops;
43454
43455 status = cpci_hp_register_controller(&generic_hpc);
43456diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
43457index 449b4bb..257e2e8 100644
43458--- a/drivers/pci/hotplug/cpcihp_zt5550.c
43459+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
43460@@ -59,7 +59,6 @@
43461 /* local variables */
43462 static bool debug;
43463 static bool poll;
43464-static struct cpci_hp_controller_ops zt5550_hpc_ops;
43465 static struct cpci_hp_controller zt5550_hpc;
43466
43467 /* Primary cPCI bus bridge device */
43468@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
43469 return 0;
43470 }
43471
43472+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
43473+ .query_enum = zt5550_hc_query_enum,
43474+};
43475+
43476 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
43477 {
43478 int status;
43479@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
43480 dbg("returned from zt5550_hc_config");
43481
43482 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
43483- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
43484 zt5550_hpc.ops = &zt5550_hpc_ops;
43485 if(!poll) {
43486 zt5550_hpc.irq = hc_dev->irq;
43487 zt5550_hpc.irq_flags = IRQF_SHARED;
43488 zt5550_hpc.dev_id = hc_dev;
43489
43490- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
43491- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
43492- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
43493+ pax_open_kernel();
43494+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
43495+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
43496+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
43497+ pax_open_kernel();
43498 } else {
43499 info("using ENUM# polling mode");
43500 }
43501diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
43502index 76ba8a1..20ca857 100644
43503--- a/drivers/pci/hotplug/cpqphp_nvram.c
43504+++ b/drivers/pci/hotplug/cpqphp_nvram.c
43505@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
43506
43507 void compaq_nvram_init (void __iomem *rom_start)
43508 {
43509+
43510+#ifndef CONFIG_PAX_KERNEXEC
43511 if (rom_start) {
43512 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
43513 }
43514+#endif
43515+
43516 dbg("int15 entry = %p\n", compaq_int15_entry_point);
43517
43518 /* initialize our int15 lock */
43519diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
43520index ec20f74..c1d961e 100644
43521--- a/drivers/pci/hotplug/pci_hotplug_core.c
43522+++ b/drivers/pci/hotplug/pci_hotplug_core.c
43523@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
43524 return -EINVAL;
43525 }
43526
43527- slot->ops->owner = owner;
43528- slot->ops->mod_name = mod_name;
43529+ pax_open_kernel();
43530+ *(struct module **)&slot->ops->owner = owner;
43531+ *(const char **)&slot->ops->mod_name = mod_name;
43532+ pax_close_kernel();
43533
43534 mutex_lock(&pci_hp_mutex);
43535 /*
43536diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
43537index 7d72c5e..edce02c 100644
43538--- a/drivers/pci/hotplug/pciehp_core.c
43539+++ b/drivers/pci/hotplug/pciehp_core.c
43540@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
43541 struct slot *slot = ctrl->slot;
43542 struct hotplug_slot *hotplug = NULL;
43543 struct hotplug_slot_info *info = NULL;
43544- struct hotplug_slot_ops *ops = NULL;
43545+ hotplug_slot_ops_no_const *ops = NULL;
43546 char name[SLOT_NAME_SIZE];
43547 int retval = -ENOMEM;
43548
43549diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
43550index 5b4a9d9..cd5ac1f 100644
43551--- a/drivers/pci/pci-sysfs.c
43552+++ b/drivers/pci/pci-sysfs.c
43553@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
43554 {
43555 /* allocate attribute structure, piggyback attribute name */
43556 int name_len = write_combine ? 13 : 10;
43557- struct bin_attribute *res_attr;
43558+ bin_attribute_no_const *res_attr;
43559 int retval;
43560
43561 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
43562@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
43563 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
43564 {
43565 int retval;
43566- struct bin_attribute *attr;
43567+ bin_attribute_no_const *attr;
43568
43569 /* If the device has VPD, try to expose it in sysfs. */
43570 if (dev->vpd) {
43571@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
43572 {
43573 int retval;
43574 int rom_size = 0;
43575- struct bin_attribute *attr;
43576+ bin_attribute_no_const *attr;
43577
43578 if (!sysfs_initialized)
43579 return -EACCES;
43580diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
43581index d1182c4..2a138ec 100644
43582--- a/drivers/pci/pci.h
43583+++ b/drivers/pci/pci.h
43584@@ -92,7 +92,7 @@ struct pci_vpd_ops {
43585 struct pci_vpd {
43586 unsigned int len;
43587 const struct pci_vpd_ops *ops;
43588- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
43589+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
43590 };
43591
43592 int pci_vpd_pci22_init(struct pci_dev *dev);
43593diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
43594index d320df6..ca9a8f6 100644
43595--- a/drivers/pci/pcie/aspm.c
43596+++ b/drivers/pci/pcie/aspm.c
43597@@ -27,9 +27,9 @@
43598 #define MODULE_PARAM_PREFIX "pcie_aspm."
43599
43600 /* Note: those are not register definitions */
43601-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
43602-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
43603-#define ASPM_STATE_L1 (4) /* L1 state */
43604+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
43605+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
43606+#define ASPM_STATE_L1 (4U) /* L1 state */
43607 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
43608 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
43609
43610diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
43611index ea37072..10e58e56 100644
43612--- a/drivers/pci/probe.c
43613+++ b/drivers/pci/probe.c
43614@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
43615 struct pci_bus_region region;
43616 bool bar_too_big = false, bar_disabled = false;
43617
43618- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
43619+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
43620
43621 /* No printks while decoding is disabled! */
43622 if (!dev->mmio_always_on) {
43623diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
43624index 0812608..b04018c4 100644
43625--- a/drivers/pci/proc.c
43626+++ b/drivers/pci/proc.c
43627@@ -453,7 +453,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
43628 static int __init pci_proc_init(void)
43629 {
43630 struct pci_dev *dev = NULL;
43631+
43632+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43633+#ifdef CONFIG_GRKERNSEC_PROC_USER
43634+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
43635+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43636+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43637+#endif
43638+#else
43639 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
43640+#endif
43641 proc_create("devices", 0, proc_bus_pci_dir,
43642 &proc_bus_pci_dev_operations);
43643 proc_initialized = 1;
43644diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
43645index 3e5b4497..dcdfb70 100644
43646--- a/drivers/platform/x86/chromeos_laptop.c
43647+++ b/drivers/platform/x86/chromeos_laptop.c
43648@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
43649 return 0;
43650 }
43651
43652-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
43653+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
43654 {
43655 .ident = "Samsung Series 5 550 - Touchpad",
43656 .matches = {
43657diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
43658index 6b22938..bc9700e 100644
43659--- a/drivers/platform/x86/msi-laptop.c
43660+++ b/drivers/platform/x86/msi-laptop.c
43661@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
43662
43663 if (!quirks->ec_read_only) {
43664 /* allow userland write sysfs file */
43665- dev_attr_bluetooth.store = store_bluetooth;
43666- dev_attr_wlan.store = store_wlan;
43667- dev_attr_threeg.store = store_threeg;
43668- dev_attr_bluetooth.attr.mode |= S_IWUSR;
43669- dev_attr_wlan.attr.mode |= S_IWUSR;
43670- dev_attr_threeg.attr.mode |= S_IWUSR;
43671+ pax_open_kernel();
43672+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
43673+ *(void **)&dev_attr_wlan.store = store_wlan;
43674+ *(void **)&dev_attr_threeg.store = store_threeg;
43675+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
43676+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
43677+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
43678+ pax_close_kernel();
43679 }
43680
43681 /* disable hardware control by fn key */
43682diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
43683index 2ac045f..39c443d 100644
43684--- a/drivers/platform/x86/sony-laptop.c
43685+++ b/drivers/platform/x86/sony-laptop.c
43686@@ -2483,7 +2483,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
43687 }
43688
43689 /* High speed charging function */
43690-static struct device_attribute *hsc_handle;
43691+static device_attribute_no_const *hsc_handle;
43692
43693 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
43694 struct device_attribute *attr,
43695diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
43696index 54d31c0..3f896d3 100644
43697--- a/drivers/platform/x86/thinkpad_acpi.c
43698+++ b/drivers/platform/x86/thinkpad_acpi.c
43699@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
43700 return 0;
43701 }
43702
43703-void static hotkey_mask_warn_incomplete_mask(void)
43704+static void hotkey_mask_warn_incomplete_mask(void)
43705 {
43706 /* log only what the user can fix... */
43707 const u32 wantedmask = hotkey_driver_mask &
43708@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
43709 }
43710 }
43711
43712-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43713- struct tp_nvram_state *newn,
43714- const u32 event_mask)
43715-{
43716-
43717 #define TPACPI_COMPARE_KEY(__scancode, __member) \
43718 do { \
43719 if ((event_mask & (1 << __scancode)) && \
43720@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43721 tpacpi_hotkey_send_key(__scancode); \
43722 } while (0)
43723
43724- void issue_volchange(const unsigned int oldvol,
43725- const unsigned int newvol)
43726- {
43727- unsigned int i = oldvol;
43728+static void issue_volchange(const unsigned int oldvol,
43729+ const unsigned int newvol,
43730+ const u32 event_mask)
43731+{
43732+ unsigned int i = oldvol;
43733
43734- while (i > newvol) {
43735- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
43736- i--;
43737- }
43738- while (i < newvol) {
43739- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
43740- i++;
43741- }
43742+ while (i > newvol) {
43743+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
43744+ i--;
43745 }
43746+ while (i < newvol) {
43747+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
43748+ i++;
43749+ }
43750+}
43751
43752- void issue_brightnesschange(const unsigned int oldbrt,
43753- const unsigned int newbrt)
43754- {
43755- unsigned int i = oldbrt;
43756+static void issue_brightnesschange(const unsigned int oldbrt,
43757+ const unsigned int newbrt,
43758+ const u32 event_mask)
43759+{
43760+ unsigned int i = oldbrt;
43761
43762- while (i > newbrt) {
43763- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
43764- i--;
43765- }
43766- while (i < newbrt) {
43767- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
43768- i++;
43769- }
43770+ while (i > newbrt) {
43771+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
43772+ i--;
43773+ }
43774+ while (i < newbrt) {
43775+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
43776+ i++;
43777 }
43778+}
43779
43780+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43781+ struct tp_nvram_state *newn,
43782+ const u32 event_mask)
43783+{
43784 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
43785 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
43786 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
43787@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43788 oldn->volume_level != newn->volume_level) {
43789 /* recently muted, or repeated mute keypress, or
43790 * multiple presses ending in mute */
43791- issue_volchange(oldn->volume_level, newn->volume_level);
43792+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
43793 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
43794 }
43795 } else {
43796@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43797 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
43798 }
43799 if (oldn->volume_level != newn->volume_level) {
43800- issue_volchange(oldn->volume_level, newn->volume_level);
43801+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
43802 } else if (oldn->volume_toggle != newn->volume_toggle) {
43803 /* repeated vol up/down keypress at end of scale ? */
43804 if (newn->volume_level == 0)
43805@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43806 /* handle brightness */
43807 if (oldn->brightness_level != newn->brightness_level) {
43808 issue_brightnesschange(oldn->brightness_level,
43809- newn->brightness_level);
43810+ newn->brightness_level,
43811+ event_mask);
43812 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
43813 /* repeated key presses that didn't change state */
43814 if (newn->brightness_level == 0)
43815@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
43816 && !tp_features.bright_unkfw)
43817 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
43818 }
43819+}
43820
43821 #undef TPACPI_COMPARE_KEY
43822 #undef TPACPI_MAY_SEND_KEY
43823-}
43824
43825 /*
43826 * Polling driver
43827diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
43828index 769d265..a3a05ca 100644
43829--- a/drivers/pnp/pnpbios/bioscalls.c
43830+++ b/drivers/pnp/pnpbios/bioscalls.c
43831@@ -58,7 +58,7 @@ do { \
43832 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
43833 } while(0)
43834
43835-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
43836+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
43837 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
43838
43839 /*
43840@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
43841
43842 cpu = get_cpu();
43843 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
43844+
43845+ pax_open_kernel();
43846 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
43847+ pax_close_kernel();
43848
43849 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
43850 spin_lock_irqsave(&pnp_bios_lock, flags);
43851@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
43852 :"memory");
43853 spin_unlock_irqrestore(&pnp_bios_lock, flags);
43854
43855+ pax_open_kernel();
43856 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
43857+ pax_close_kernel();
43858+
43859 put_cpu();
43860
43861 /* If we get here and this is set then the PnP BIOS faulted on us. */
43862@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
43863 return status;
43864 }
43865
43866-void pnpbios_calls_init(union pnp_bios_install_struct *header)
43867+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
43868 {
43869 int i;
43870
43871@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
43872 pnp_bios_callpoint.offset = header->fields.pm16offset;
43873 pnp_bios_callpoint.segment = PNP_CS16;
43874
43875+ pax_open_kernel();
43876+
43877 for_each_possible_cpu(i) {
43878 struct desc_struct *gdt = get_cpu_gdt_table(i);
43879 if (!gdt)
43880@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
43881 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
43882 (unsigned long)__va(header->fields.pm16dseg));
43883 }
43884+
43885+ pax_close_kernel();
43886 }
43887diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
43888index 3e6db1c..1fbbdae 100644
43889--- a/drivers/pnp/resource.c
43890+++ b/drivers/pnp/resource.c
43891@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
43892 return 1;
43893
43894 /* check if the resource is valid */
43895- if (*irq < 0 || *irq > 15)
43896+ if (*irq > 15)
43897 return 0;
43898
43899 /* check if the resource is reserved */
43900@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
43901 return 1;
43902
43903 /* check if the resource is valid */
43904- if (*dma < 0 || *dma == 4 || *dma > 7)
43905+ if (*dma == 4 || *dma > 7)
43906 return 0;
43907
43908 /* check if the resource is reserved */
43909diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
43910index 0c52e2a..3421ab7 100644
43911--- a/drivers/power/pda_power.c
43912+++ b/drivers/power/pda_power.c
43913@@ -37,7 +37,11 @@ static int polling;
43914
43915 #if IS_ENABLED(CONFIG_USB_PHY)
43916 static struct usb_phy *transceiver;
43917-static struct notifier_block otg_nb;
43918+static int otg_handle_notification(struct notifier_block *nb,
43919+ unsigned long event, void *unused);
43920+static struct notifier_block otg_nb = {
43921+ .notifier_call = otg_handle_notification
43922+};
43923 #endif
43924
43925 static struct regulator *ac_draw;
43926@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
43927
43928 #if IS_ENABLED(CONFIG_USB_PHY)
43929 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
43930- otg_nb.notifier_call = otg_handle_notification;
43931 ret = usb_register_notifier(transceiver, &otg_nb);
43932 if (ret) {
43933 dev_err(dev, "failure to register otg notifier\n");
43934diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
43935index cc439fd..8fa30df 100644
43936--- a/drivers/power/power_supply.h
43937+++ b/drivers/power/power_supply.h
43938@@ -16,12 +16,12 @@ struct power_supply;
43939
43940 #ifdef CONFIG_SYSFS
43941
43942-extern void power_supply_init_attrs(struct device_type *dev_type);
43943+extern void power_supply_init_attrs(void);
43944 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
43945
43946 #else
43947
43948-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
43949+static inline void power_supply_init_attrs(void) {}
43950 #define power_supply_uevent NULL
43951
43952 #endif /* CONFIG_SYSFS */
43953diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
43954index 1c517c3..ffa2f17 100644
43955--- a/drivers/power/power_supply_core.c
43956+++ b/drivers/power/power_supply_core.c
43957@@ -24,7 +24,10 @@
43958 struct class *power_supply_class;
43959 EXPORT_SYMBOL_GPL(power_supply_class);
43960
43961-static struct device_type power_supply_dev_type;
43962+extern const struct attribute_group *power_supply_attr_groups[];
43963+static struct device_type power_supply_dev_type = {
43964+ .groups = power_supply_attr_groups,
43965+};
43966
43967 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
43968 struct power_supply *supply)
43969@@ -554,7 +557,7 @@ static int __init power_supply_class_init(void)
43970 return PTR_ERR(power_supply_class);
43971
43972 power_supply_class->dev_uevent = power_supply_uevent;
43973- power_supply_init_attrs(&power_supply_dev_type);
43974+ power_supply_init_attrs();
43975
43976 return 0;
43977 }
43978diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
43979index 29178f7..c65f324 100644
43980--- a/drivers/power/power_supply_sysfs.c
43981+++ b/drivers/power/power_supply_sysfs.c
43982@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
43983 .is_visible = power_supply_attr_is_visible,
43984 };
43985
43986-static const struct attribute_group *power_supply_attr_groups[] = {
43987+const struct attribute_group *power_supply_attr_groups[] = {
43988 &power_supply_attr_group,
43989 NULL,
43990 };
43991
43992-void power_supply_init_attrs(struct device_type *dev_type)
43993+void power_supply_init_attrs(void)
43994 {
43995 int i;
43996
43997- dev_type->groups = power_supply_attr_groups;
43998-
43999 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
44000 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
44001 }
44002diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
44003index d428ef9..fdc0357 100644
44004--- a/drivers/regulator/max8660.c
44005+++ b/drivers/regulator/max8660.c
44006@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
44007 max8660->shadow_regs[MAX8660_OVER1] = 5;
44008 } else {
44009 /* Otherwise devices can be toggled via software */
44010- max8660_dcdc_ops.enable = max8660_dcdc_enable;
44011- max8660_dcdc_ops.disable = max8660_dcdc_disable;
44012+ pax_open_kernel();
44013+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
44014+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
44015+ pax_close_kernel();
44016 }
44017
44018 /*
44019diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
44020index adb1414..c13e0ce 100644
44021--- a/drivers/regulator/max8973-regulator.c
44022+++ b/drivers/regulator/max8973-regulator.c
44023@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
44024 if (!pdata->enable_ext_control) {
44025 max->desc.enable_reg = MAX8973_VOUT;
44026 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
44027- max8973_dcdc_ops.enable = regulator_enable_regmap;
44028- max8973_dcdc_ops.disable = regulator_disable_regmap;
44029- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
44030+ pax_open_kernel();
44031+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
44032+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
44033+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
44034+ pax_close_kernel();
44035 }
44036
44037 max->enable_external_control = pdata->enable_ext_control;
44038diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
44039index b716283..3cc4349 100644
44040--- a/drivers/regulator/mc13892-regulator.c
44041+++ b/drivers/regulator/mc13892-regulator.c
44042@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
44043 }
44044 mc13xxx_unlock(mc13892);
44045
44046- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
44047+ pax_open_kernel();
44048+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
44049 = mc13892_vcam_set_mode;
44050- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
44051+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
44052 = mc13892_vcam_get_mode;
44053+ pax_close_kernel();
44054
44055 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
44056 ARRAY_SIZE(mc13892_regulators));
44057diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
44058index f1cb706..4c7832a 100644
44059--- a/drivers/rtc/rtc-cmos.c
44060+++ b/drivers/rtc/rtc-cmos.c
44061@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
44062 hpet_rtc_timer_init();
44063
44064 /* export at least the first block of NVRAM */
44065- nvram.size = address_space - NVRAM_OFFSET;
44066+ pax_open_kernel();
44067+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
44068+ pax_close_kernel();
44069 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
44070 if (retval < 0) {
44071 dev_dbg(dev, "can't create nvram file? %d\n", retval);
44072diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
44073index d049393..bb20be0 100644
44074--- a/drivers/rtc/rtc-dev.c
44075+++ b/drivers/rtc/rtc-dev.c
44076@@ -16,6 +16,7 @@
44077 #include <linux/module.h>
44078 #include <linux/rtc.h>
44079 #include <linux/sched.h>
44080+#include <linux/grsecurity.h>
44081 #include "rtc-core.h"
44082
44083 static dev_t rtc_devt;
44084@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
44085 if (copy_from_user(&tm, uarg, sizeof(tm)))
44086 return -EFAULT;
44087
44088+ gr_log_timechange();
44089+
44090 return rtc_set_time(rtc, &tm);
44091
44092 case RTC_PIE_ON:
44093diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
44094index b53992a..776df84 100644
44095--- a/drivers/rtc/rtc-ds1307.c
44096+++ b/drivers/rtc/rtc-ds1307.c
44097@@ -107,7 +107,7 @@ struct ds1307 {
44098 u8 offset; /* register's offset */
44099 u8 regs[11];
44100 u16 nvram_offset;
44101- struct bin_attribute *nvram;
44102+ bin_attribute_no_const *nvram;
44103 enum ds_type type;
44104 unsigned long flags;
44105 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
44106diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
44107index 130f29a..6179d03 100644
44108--- a/drivers/rtc/rtc-m48t59.c
44109+++ b/drivers/rtc/rtc-m48t59.c
44110@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
44111 goto out;
44112 }
44113
44114- m48t59_nvram_attr.size = pdata->offset;
44115+ pax_open_kernel();
44116+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
44117+ pax_close_kernel();
44118
44119 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
44120 if (ret) {
44121diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
44122index e693af6..2e525b6 100644
44123--- a/drivers/scsi/bfa/bfa_fcpim.h
44124+++ b/drivers/scsi/bfa/bfa_fcpim.h
44125@@ -36,7 +36,7 @@ struct bfa_iotag_s {
44126
44127 struct bfa_itn_s {
44128 bfa_isr_func_t isr;
44129-};
44130+} __no_const;
44131
44132 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
44133 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
44134diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
44135index 23a90e7..9cf04ee 100644
44136--- a/drivers/scsi/bfa/bfa_ioc.h
44137+++ b/drivers/scsi/bfa/bfa_ioc.h
44138@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
44139 bfa_ioc_disable_cbfn_t disable_cbfn;
44140 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
44141 bfa_ioc_reset_cbfn_t reset_cbfn;
44142-};
44143+} __no_const;
44144
44145 /*
44146 * IOC event notification mechanism.
44147@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
44148 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
44149 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
44150 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
44151-};
44152+} __no_const;
44153
44154 /*
44155 * Queue element to wait for room in request queue. FIFO order is
44156diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
44157index df0c3c7..b00e1d0 100644
44158--- a/drivers/scsi/hosts.c
44159+++ b/drivers/scsi/hosts.c
44160@@ -42,7 +42,7 @@
44161 #include "scsi_logging.h"
44162
44163
44164-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
44165+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
44166
44167
44168 static void scsi_host_cls_release(struct device *dev)
44169@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
44170 * subtract one because we increment first then return, but we need to
44171 * know what the next host number was before increment
44172 */
44173- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
44174+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
44175 shost->dma_channel = 0xff;
44176
44177 /* These three are default values which can be overridden */
44178diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
44179index 7f4f790..b75b92a 100644
44180--- a/drivers/scsi/hpsa.c
44181+++ b/drivers/scsi/hpsa.c
44182@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
44183 unsigned long flags;
44184
44185 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
44186- return h->access.command_completed(h, q);
44187+ return h->access->command_completed(h, q);
44188
44189 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
44190 a = rq->head[rq->current_entry];
44191@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
44192 while (!list_empty(&h->reqQ)) {
44193 c = list_entry(h->reqQ.next, struct CommandList, list);
44194 /* can't do anything if fifo is full */
44195- if ((h->access.fifo_full(h))) {
44196+ if ((h->access->fifo_full(h))) {
44197 dev_warn(&h->pdev->dev, "fifo full\n");
44198 break;
44199 }
44200@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
44201
44202 /* Tell the controller execute command */
44203 spin_unlock_irqrestore(&h->lock, flags);
44204- h->access.submit_command(h, c);
44205+ h->access->submit_command(h, c);
44206 spin_lock_irqsave(&h->lock, flags);
44207 }
44208 spin_unlock_irqrestore(&h->lock, flags);
44209@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
44210
44211 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
44212 {
44213- return h->access.command_completed(h, q);
44214+ return h->access->command_completed(h, q);
44215 }
44216
44217 static inline bool interrupt_pending(struct ctlr_info *h)
44218 {
44219- return h->access.intr_pending(h);
44220+ return h->access->intr_pending(h);
44221 }
44222
44223 static inline long interrupt_not_for_us(struct ctlr_info *h)
44224 {
44225- return (h->access.intr_pending(h) == 0) ||
44226+ return (h->access->intr_pending(h) == 0) ||
44227 (h->interrupts_enabled == 0);
44228 }
44229
44230@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
44231 if (prod_index < 0)
44232 return -ENODEV;
44233 h->product_name = products[prod_index].product_name;
44234- h->access = *(products[prod_index].access);
44235+ h->access = products[prod_index].access;
44236
44237 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
44238 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
44239@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
44240
44241 assert_spin_locked(&lockup_detector_lock);
44242 remove_ctlr_from_lockup_detector_list(h);
44243- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44244+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44245 spin_lock_irqsave(&h->lock, flags);
44246 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
44247 spin_unlock_irqrestore(&h->lock, flags);
44248@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
44249 }
44250
44251 /* make sure the board interrupts are off */
44252- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44253+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44254
44255 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
44256 goto clean2;
44257@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
44258 * fake ones to scoop up any residual completions.
44259 */
44260 spin_lock_irqsave(&h->lock, flags);
44261- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44262+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44263 spin_unlock_irqrestore(&h->lock, flags);
44264 free_irqs(h);
44265 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
44266@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
44267 dev_info(&h->pdev->dev, "Board READY.\n");
44268 dev_info(&h->pdev->dev,
44269 "Waiting for stale completions to drain.\n");
44270- h->access.set_intr_mask(h, HPSA_INTR_ON);
44271+ h->access->set_intr_mask(h, HPSA_INTR_ON);
44272 msleep(10000);
44273- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44274+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44275
44276 rc = controller_reset_failed(h->cfgtable);
44277 if (rc)
44278@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
44279 }
44280
44281 /* Turn the interrupts on so we can service requests */
44282- h->access.set_intr_mask(h, HPSA_INTR_ON);
44283+ h->access->set_intr_mask(h, HPSA_INTR_ON);
44284
44285 hpsa_hba_inquiry(h);
44286 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
44287@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
44288 * To write all data in the battery backed cache to disks
44289 */
44290 hpsa_flush_cache(h);
44291- h->access.set_intr_mask(h, HPSA_INTR_OFF);
44292+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
44293 hpsa_free_irqs_and_disable_msix(h);
44294 }
44295
44296@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
44297 return;
44298 }
44299 /* Change the access methods to the performant access methods */
44300- h->access = SA5_performant_access;
44301+ h->access = &SA5_performant_access;
44302 h->transMethod = CFGTBL_Trans_Performant;
44303 }
44304
44305diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
44306index 9816479..c5d4e97 100644
44307--- a/drivers/scsi/hpsa.h
44308+++ b/drivers/scsi/hpsa.h
44309@@ -79,7 +79,7 @@ struct ctlr_info {
44310 unsigned int msix_vector;
44311 unsigned int msi_vector;
44312 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
44313- struct access_method access;
44314+ struct access_method *access;
44315
44316 /* queue and queue Info */
44317 struct list_head reqQ;
44318diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
44319index 8b928c6..9c76300 100644
44320--- a/drivers/scsi/libfc/fc_exch.c
44321+++ b/drivers/scsi/libfc/fc_exch.c
44322@@ -100,12 +100,12 @@ struct fc_exch_mgr {
44323 u16 pool_max_index;
44324
44325 struct {
44326- atomic_t no_free_exch;
44327- atomic_t no_free_exch_xid;
44328- atomic_t xid_not_found;
44329- atomic_t xid_busy;
44330- atomic_t seq_not_found;
44331- atomic_t non_bls_resp;
44332+ atomic_unchecked_t no_free_exch;
44333+ atomic_unchecked_t no_free_exch_xid;
44334+ atomic_unchecked_t xid_not_found;
44335+ atomic_unchecked_t xid_busy;
44336+ atomic_unchecked_t seq_not_found;
44337+ atomic_unchecked_t non_bls_resp;
44338 } stats;
44339 };
44340
44341@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
44342 /* allocate memory for exchange */
44343 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
44344 if (!ep) {
44345- atomic_inc(&mp->stats.no_free_exch);
44346+ atomic_inc_unchecked(&mp->stats.no_free_exch);
44347 goto out;
44348 }
44349 memset(ep, 0, sizeof(*ep));
44350@@ -797,7 +797,7 @@ out:
44351 return ep;
44352 err:
44353 spin_unlock_bh(&pool->lock);
44354- atomic_inc(&mp->stats.no_free_exch_xid);
44355+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
44356 mempool_free(ep, mp->ep_pool);
44357 return NULL;
44358 }
44359@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
44360 xid = ntohs(fh->fh_ox_id); /* we originated exch */
44361 ep = fc_exch_find(mp, xid);
44362 if (!ep) {
44363- atomic_inc(&mp->stats.xid_not_found);
44364+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44365 reject = FC_RJT_OX_ID;
44366 goto out;
44367 }
44368@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
44369 ep = fc_exch_find(mp, xid);
44370 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
44371 if (ep) {
44372- atomic_inc(&mp->stats.xid_busy);
44373+ atomic_inc_unchecked(&mp->stats.xid_busy);
44374 reject = FC_RJT_RX_ID;
44375 goto rel;
44376 }
44377@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
44378 }
44379 xid = ep->xid; /* get our XID */
44380 } else if (!ep) {
44381- atomic_inc(&mp->stats.xid_not_found);
44382+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44383 reject = FC_RJT_RX_ID; /* XID not found */
44384 goto out;
44385 }
44386@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
44387 } else {
44388 sp = &ep->seq;
44389 if (sp->id != fh->fh_seq_id) {
44390- atomic_inc(&mp->stats.seq_not_found);
44391+ atomic_inc_unchecked(&mp->stats.seq_not_found);
44392 if (f_ctl & FC_FC_END_SEQ) {
44393 /*
44394 * Update sequence_id based on incoming last
44395@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
44396
44397 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
44398 if (!ep) {
44399- atomic_inc(&mp->stats.xid_not_found);
44400+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44401 goto out;
44402 }
44403 if (ep->esb_stat & ESB_ST_COMPLETE) {
44404- atomic_inc(&mp->stats.xid_not_found);
44405+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44406 goto rel;
44407 }
44408 if (ep->rxid == FC_XID_UNKNOWN)
44409 ep->rxid = ntohs(fh->fh_rx_id);
44410 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
44411- atomic_inc(&mp->stats.xid_not_found);
44412+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44413 goto rel;
44414 }
44415 if (ep->did != ntoh24(fh->fh_s_id) &&
44416 ep->did != FC_FID_FLOGI) {
44417- atomic_inc(&mp->stats.xid_not_found);
44418+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44419 goto rel;
44420 }
44421 sof = fr_sof(fp);
44422@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
44423 sp->ssb_stat |= SSB_ST_RESP;
44424 sp->id = fh->fh_seq_id;
44425 } else if (sp->id != fh->fh_seq_id) {
44426- atomic_inc(&mp->stats.seq_not_found);
44427+ atomic_inc_unchecked(&mp->stats.seq_not_found);
44428 goto rel;
44429 }
44430
44431@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
44432 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
44433
44434 if (!sp)
44435- atomic_inc(&mp->stats.xid_not_found);
44436+ atomic_inc_unchecked(&mp->stats.xid_not_found);
44437 else
44438- atomic_inc(&mp->stats.non_bls_resp);
44439+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
44440
44441 fc_frame_free(fp);
44442 }
44443@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
44444
44445 list_for_each_entry(ema, &lport->ema_list, ema_list) {
44446 mp = ema->mp;
44447- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
44448+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
44449 st->fc_no_free_exch_xid +=
44450- atomic_read(&mp->stats.no_free_exch_xid);
44451- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
44452- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
44453- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
44454- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
44455+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
44456+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
44457+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
44458+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
44459+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
44460 }
44461 }
44462 EXPORT_SYMBOL(fc_exch_update_stats);
44463diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
44464index 161c98e..6d563b3 100644
44465--- a/drivers/scsi/libsas/sas_ata.c
44466+++ b/drivers/scsi/libsas/sas_ata.c
44467@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
44468 .postreset = ata_std_postreset,
44469 .error_handler = ata_std_error_handler,
44470 .post_internal_cmd = sas_ata_post_internal,
44471- .qc_defer = ata_std_qc_defer,
44472+ .qc_defer = ata_std_qc_defer,
44473 .qc_prep = ata_noop_qc_prep,
44474 .qc_issue = sas_ata_qc_issue,
44475 .qc_fill_rtf = sas_ata_qc_fill_rtf,
44476diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
44477index bcc56ca..6f4174a 100644
44478--- a/drivers/scsi/lpfc/lpfc.h
44479+++ b/drivers/scsi/lpfc/lpfc.h
44480@@ -431,7 +431,7 @@ struct lpfc_vport {
44481 struct dentry *debug_nodelist;
44482 struct dentry *vport_debugfs_root;
44483 struct lpfc_debugfs_trc *disc_trc;
44484- atomic_t disc_trc_cnt;
44485+ atomic_unchecked_t disc_trc_cnt;
44486 #endif
44487 uint8_t stat_data_enabled;
44488 uint8_t stat_data_blocked;
44489@@ -865,8 +865,8 @@ struct lpfc_hba {
44490 struct timer_list fabric_block_timer;
44491 unsigned long bit_flags;
44492 #define FABRIC_COMANDS_BLOCKED 0
44493- atomic_t num_rsrc_err;
44494- atomic_t num_cmd_success;
44495+ atomic_unchecked_t num_rsrc_err;
44496+ atomic_unchecked_t num_cmd_success;
44497 unsigned long last_rsrc_error_time;
44498 unsigned long last_ramp_down_time;
44499 unsigned long last_ramp_up_time;
44500@@ -902,7 +902,7 @@ struct lpfc_hba {
44501
44502 struct dentry *debug_slow_ring_trc;
44503 struct lpfc_debugfs_trc *slow_ring_trc;
44504- atomic_t slow_ring_trc_cnt;
44505+ atomic_unchecked_t slow_ring_trc_cnt;
44506 /* iDiag debugfs sub-directory */
44507 struct dentry *idiag_root;
44508 struct dentry *idiag_pci_cfg;
44509diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
44510index f525ecb..32549a4 100644
44511--- a/drivers/scsi/lpfc/lpfc_debugfs.c
44512+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
44513@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
44514
44515 #include <linux/debugfs.h>
44516
44517-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
44518+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
44519 static unsigned long lpfc_debugfs_start_time = 0L;
44520
44521 /* iDiag */
44522@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
44523 lpfc_debugfs_enable = 0;
44524
44525 len = 0;
44526- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
44527+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
44528 (lpfc_debugfs_max_disc_trc - 1);
44529 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
44530 dtp = vport->disc_trc + i;
44531@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
44532 lpfc_debugfs_enable = 0;
44533
44534 len = 0;
44535- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
44536+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
44537 (lpfc_debugfs_max_slow_ring_trc - 1);
44538 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
44539 dtp = phba->slow_ring_trc + i;
44540@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
44541 !vport || !vport->disc_trc)
44542 return;
44543
44544- index = atomic_inc_return(&vport->disc_trc_cnt) &
44545+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
44546 (lpfc_debugfs_max_disc_trc - 1);
44547 dtp = vport->disc_trc + index;
44548 dtp->fmt = fmt;
44549 dtp->data1 = data1;
44550 dtp->data2 = data2;
44551 dtp->data3 = data3;
44552- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
44553+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
44554 dtp->jif = jiffies;
44555 #endif
44556 return;
44557@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
44558 !phba || !phba->slow_ring_trc)
44559 return;
44560
44561- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
44562+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
44563 (lpfc_debugfs_max_slow_ring_trc - 1);
44564 dtp = phba->slow_ring_trc + index;
44565 dtp->fmt = fmt;
44566 dtp->data1 = data1;
44567 dtp->data2 = data2;
44568 dtp->data3 = data3;
44569- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
44570+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
44571 dtp->jif = jiffies;
44572 #endif
44573 return;
44574@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
44575 "slow_ring buffer\n");
44576 goto debug_failed;
44577 }
44578- atomic_set(&phba->slow_ring_trc_cnt, 0);
44579+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
44580 memset(phba->slow_ring_trc, 0,
44581 (sizeof(struct lpfc_debugfs_trc) *
44582 lpfc_debugfs_max_slow_ring_trc));
44583@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
44584 "buffer\n");
44585 goto debug_failed;
44586 }
44587- atomic_set(&vport->disc_trc_cnt, 0);
44588+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
44589
44590 snprintf(name, sizeof(name), "discovery_trace");
44591 vport->debug_disc_trc =
44592diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
44593index cb465b2..2e7b25f 100644
44594--- a/drivers/scsi/lpfc/lpfc_init.c
44595+++ b/drivers/scsi/lpfc/lpfc_init.c
44596@@ -10950,8 +10950,10 @@ lpfc_init(void)
44597 "misc_register returned with status %d", error);
44598
44599 if (lpfc_enable_npiv) {
44600- lpfc_transport_functions.vport_create = lpfc_vport_create;
44601- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
44602+ pax_open_kernel();
44603+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
44604+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
44605+ pax_close_kernel();
44606 }
44607 lpfc_transport_template =
44608 fc_attach_transport(&lpfc_transport_functions);
44609diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
44610index 8523b278e..ce1d812 100644
44611--- a/drivers/scsi/lpfc/lpfc_scsi.c
44612+++ b/drivers/scsi/lpfc/lpfc_scsi.c
44613@@ -331,7 +331,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
44614 uint32_t evt_posted;
44615
44616 spin_lock_irqsave(&phba->hbalock, flags);
44617- atomic_inc(&phba->num_rsrc_err);
44618+ atomic_inc_unchecked(&phba->num_rsrc_err);
44619 phba->last_rsrc_error_time = jiffies;
44620
44621 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
44622@@ -372,7 +372,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
44623 unsigned long flags;
44624 struct lpfc_hba *phba = vport->phba;
44625 uint32_t evt_posted;
44626- atomic_inc(&phba->num_cmd_success);
44627+ atomic_inc_unchecked(&phba->num_cmd_success);
44628
44629 if (vport->cfg_lun_queue_depth <= queue_depth)
44630 return;
44631@@ -416,8 +416,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
44632 unsigned long num_rsrc_err, num_cmd_success;
44633 int i;
44634
44635- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
44636- num_cmd_success = atomic_read(&phba->num_cmd_success);
44637+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
44638+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
44639
44640 /*
44641 * The error and success command counters are global per
44642@@ -445,8 +445,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
44643 }
44644 }
44645 lpfc_destroy_vport_work_array(phba, vports);
44646- atomic_set(&phba->num_rsrc_err, 0);
44647- atomic_set(&phba->num_cmd_success, 0);
44648+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
44649+ atomic_set_unchecked(&phba->num_cmd_success, 0);
44650 }
44651
44652 /**
44653@@ -480,8 +480,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
44654 }
44655 }
44656 lpfc_destroy_vport_work_array(phba, vports);
44657- atomic_set(&phba->num_rsrc_err, 0);
44658- atomic_set(&phba->num_cmd_success, 0);
44659+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
44660+ atomic_set_unchecked(&phba->num_cmd_success, 0);
44661 }
44662
44663 /**
44664diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
44665index 8e1b737..50ff510 100644
44666--- a/drivers/scsi/pmcraid.c
44667+++ b/drivers/scsi/pmcraid.c
44668@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
44669 res->scsi_dev = scsi_dev;
44670 scsi_dev->hostdata = res;
44671 res->change_detected = 0;
44672- atomic_set(&res->read_failures, 0);
44673- atomic_set(&res->write_failures, 0);
44674+ atomic_set_unchecked(&res->read_failures, 0);
44675+ atomic_set_unchecked(&res->write_failures, 0);
44676 rc = 0;
44677 }
44678 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
44679@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
44680
44681 /* If this was a SCSI read/write command keep count of errors */
44682 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
44683- atomic_inc(&res->read_failures);
44684+ atomic_inc_unchecked(&res->read_failures);
44685 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
44686- atomic_inc(&res->write_failures);
44687+ atomic_inc_unchecked(&res->write_failures);
44688
44689 if (!RES_IS_GSCSI(res->cfg_entry) &&
44690 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
44691@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
44692 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
44693 * hrrq_id assigned here in queuecommand
44694 */
44695- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
44696+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
44697 pinstance->num_hrrq;
44698 cmd->cmd_done = pmcraid_io_done;
44699
44700@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
44701 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
44702 * hrrq_id assigned here in queuecommand
44703 */
44704- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
44705+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
44706 pinstance->num_hrrq;
44707
44708 if (request_size) {
44709@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
44710
44711 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
44712 /* add resources only after host is added into system */
44713- if (!atomic_read(&pinstance->expose_resources))
44714+ if (!atomic_read_unchecked(&pinstance->expose_resources))
44715 return;
44716
44717 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
44718@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
44719 init_waitqueue_head(&pinstance->reset_wait_q);
44720
44721 atomic_set(&pinstance->outstanding_cmds, 0);
44722- atomic_set(&pinstance->last_message_id, 0);
44723- atomic_set(&pinstance->expose_resources, 0);
44724+ atomic_set_unchecked(&pinstance->last_message_id, 0);
44725+ atomic_set_unchecked(&pinstance->expose_resources, 0);
44726
44727 INIT_LIST_HEAD(&pinstance->free_res_q);
44728 INIT_LIST_HEAD(&pinstance->used_res_q);
44729@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
44730 /* Schedule worker thread to handle CCN and take care of adding and
44731 * removing devices to OS
44732 */
44733- atomic_set(&pinstance->expose_resources, 1);
44734+ atomic_set_unchecked(&pinstance->expose_resources, 1);
44735 schedule_work(&pinstance->worker_q);
44736 return rc;
44737
44738diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
44739index e1d150f..6c6df44 100644
44740--- a/drivers/scsi/pmcraid.h
44741+++ b/drivers/scsi/pmcraid.h
44742@@ -748,7 +748,7 @@ struct pmcraid_instance {
44743 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
44744
44745 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
44746- atomic_t last_message_id;
44747+ atomic_unchecked_t last_message_id;
44748
44749 /* configuration table */
44750 struct pmcraid_config_table *cfg_table;
44751@@ -777,7 +777,7 @@ struct pmcraid_instance {
44752 atomic_t outstanding_cmds;
44753
44754 /* should add/delete resources to mid-layer now ?*/
44755- atomic_t expose_resources;
44756+ atomic_unchecked_t expose_resources;
44757
44758
44759
44760@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
44761 struct pmcraid_config_table_entry_ext cfg_entry_ext;
44762 };
44763 struct scsi_device *scsi_dev; /* Link scsi_device structure */
44764- atomic_t read_failures; /* count of failed READ commands */
44765- atomic_t write_failures; /* count of failed WRITE commands */
44766+ atomic_unchecked_t read_failures; /* count of failed READ commands */
44767+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
44768
44769 /* To indicate add/delete/modify during CCN */
44770 u8 change_detected;
44771diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
44772index bf60c63..74d4dce 100644
44773--- a/drivers/scsi/qla2xxx/qla_attr.c
44774+++ b/drivers/scsi/qla2xxx/qla_attr.c
44775@@ -2001,7 +2001,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
44776 return 0;
44777 }
44778
44779-struct fc_function_template qla2xxx_transport_functions = {
44780+fc_function_template_no_const qla2xxx_transport_functions = {
44781
44782 .show_host_node_name = 1,
44783 .show_host_port_name = 1,
44784@@ -2048,7 +2048,7 @@ struct fc_function_template qla2xxx_transport_functions = {
44785 .bsg_timeout = qla24xx_bsg_timeout,
44786 };
44787
44788-struct fc_function_template qla2xxx_transport_vport_functions = {
44789+fc_function_template_no_const qla2xxx_transport_vport_functions = {
44790
44791 .show_host_node_name = 1,
44792 .show_host_port_name = 1,
44793diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
44794index 026bfde..90c4018 100644
44795--- a/drivers/scsi/qla2xxx/qla_gbl.h
44796+++ b/drivers/scsi/qla2xxx/qla_gbl.h
44797@@ -528,8 +528,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
44798 struct device_attribute;
44799 extern struct device_attribute *qla2x00_host_attrs[];
44800 struct fc_function_template;
44801-extern struct fc_function_template qla2xxx_transport_functions;
44802-extern struct fc_function_template qla2xxx_transport_vport_functions;
44803+extern fc_function_template_no_const qla2xxx_transport_functions;
44804+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
44805 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
44806 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
44807 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
44808diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
44809index ad72c1d..afc9a98 100644
44810--- a/drivers/scsi/qla2xxx/qla_os.c
44811+++ b/drivers/scsi/qla2xxx/qla_os.c
44812@@ -1571,8 +1571,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
44813 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
44814 /* Ok, a 64bit DMA mask is applicable. */
44815 ha->flags.enable_64bit_addressing = 1;
44816- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
44817- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
44818+ pax_open_kernel();
44819+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
44820+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
44821+ pax_close_kernel();
44822 return;
44823 }
44824 }
44825diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
44826index ddf16a8..80f4dd0 100644
44827--- a/drivers/scsi/qla4xxx/ql4_def.h
44828+++ b/drivers/scsi/qla4xxx/ql4_def.h
44829@@ -291,7 +291,7 @@ struct ddb_entry {
44830 * (4000 only) */
44831 atomic_t relogin_timer; /* Max Time to wait for
44832 * relogin to complete */
44833- atomic_t relogin_retry_count; /* Num of times relogin has been
44834+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
44835 * retried */
44836 uint32_t default_time2wait; /* Default Min time between
44837 * relogins (+aens) */
44838diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
44839index 4d231c1..2892c37 100644
44840--- a/drivers/scsi/qla4xxx/ql4_os.c
44841+++ b/drivers/scsi/qla4xxx/ql4_os.c
44842@@ -2971,12 +2971,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
44843 */
44844 if (!iscsi_is_session_online(cls_sess)) {
44845 /* Reset retry relogin timer */
44846- atomic_inc(&ddb_entry->relogin_retry_count);
44847+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
44848 DEBUG2(ql4_printk(KERN_INFO, ha,
44849 "%s: index[%d] relogin timed out-retrying"
44850 " relogin (%d), retry (%d)\n", __func__,
44851 ddb_entry->fw_ddb_index,
44852- atomic_read(&ddb_entry->relogin_retry_count),
44853+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
44854 ddb_entry->default_time2wait + 4));
44855 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
44856 atomic_set(&ddb_entry->retry_relogin_timer,
44857@@ -5081,7 +5081,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
44858
44859 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
44860 atomic_set(&ddb_entry->relogin_timer, 0);
44861- atomic_set(&ddb_entry->relogin_retry_count, 0);
44862+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
44863 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
44864 ddb_entry->default_relogin_timeout =
44865 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
44866diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
44867index eaa808e..95f8841 100644
44868--- a/drivers/scsi/scsi.c
44869+++ b/drivers/scsi/scsi.c
44870@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
44871 unsigned long timeout;
44872 int rtn = 0;
44873
44874- atomic_inc(&cmd->device->iorequest_cnt);
44875+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
44876
44877 /* check if the device is still usable */
44878 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
44879diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
44880index 86d5220..f22c51a 100644
44881--- a/drivers/scsi/scsi_lib.c
44882+++ b/drivers/scsi/scsi_lib.c
44883@@ -1458,7 +1458,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
44884 shost = sdev->host;
44885 scsi_init_cmd_errh(cmd);
44886 cmd->result = DID_NO_CONNECT << 16;
44887- atomic_inc(&cmd->device->iorequest_cnt);
44888+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
44889
44890 /*
44891 * SCSI request completion path will do scsi_device_unbusy(),
44892@@ -1484,9 +1484,9 @@ static void scsi_softirq_done(struct request *rq)
44893
44894 INIT_LIST_HEAD(&cmd->eh_entry);
44895
44896- atomic_inc(&cmd->device->iodone_cnt);
44897+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
44898 if (cmd->result)
44899- atomic_inc(&cmd->device->ioerr_cnt);
44900+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
44901
44902 disposition = scsi_decide_disposition(cmd);
44903 if (disposition != SUCCESS &&
44904diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
44905index 931a7d9..0c2a754 100644
44906--- a/drivers/scsi/scsi_sysfs.c
44907+++ b/drivers/scsi/scsi_sysfs.c
44908@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
44909 char *buf) \
44910 { \
44911 struct scsi_device *sdev = to_scsi_device(dev); \
44912- unsigned long long count = atomic_read(&sdev->field); \
44913+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
44914 return snprintf(buf, 20, "0x%llx\n", count); \
44915 } \
44916 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
44917diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
44918index 84a1fdf..693b0d6 100644
44919--- a/drivers/scsi/scsi_tgt_lib.c
44920+++ b/drivers/scsi/scsi_tgt_lib.c
44921@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
44922 int err;
44923
44924 dprintk("%lx %u\n", uaddr, len);
44925- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
44926+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
44927 if (err) {
44928 /*
44929 * TODO: need to fixup sg_tablesize, max_segment_size,
44930diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
44931index e106c27..11a380e 100644
44932--- a/drivers/scsi/scsi_transport_fc.c
44933+++ b/drivers/scsi/scsi_transport_fc.c
44934@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
44935 * Netlink Infrastructure
44936 */
44937
44938-static atomic_t fc_event_seq;
44939+static atomic_unchecked_t fc_event_seq;
44940
44941 /**
44942 * fc_get_event_number - Obtain the next sequential FC event number
44943@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
44944 u32
44945 fc_get_event_number(void)
44946 {
44947- return atomic_add_return(1, &fc_event_seq);
44948+ return atomic_add_return_unchecked(1, &fc_event_seq);
44949 }
44950 EXPORT_SYMBOL(fc_get_event_number);
44951
44952@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
44953 {
44954 int error;
44955
44956- atomic_set(&fc_event_seq, 0);
44957+ atomic_set_unchecked(&fc_event_seq, 0);
44958
44959 error = transport_class_register(&fc_host_class);
44960 if (error)
44961@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
44962 char *cp;
44963
44964 *val = simple_strtoul(buf, &cp, 0);
44965- if ((*cp && (*cp != '\n')) || (*val < 0))
44966+ if (*cp && (*cp != '\n'))
44967 return -EINVAL;
44968 /*
44969 * Check for overflow; dev_loss_tmo is u32
44970diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
44971index 133926b..903000d 100644
44972--- a/drivers/scsi/scsi_transport_iscsi.c
44973+++ b/drivers/scsi/scsi_transport_iscsi.c
44974@@ -80,7 +80,7 @@ struct iscsi_internal {
44975 struct transport_container session_cont;
44976 };
44977
44978-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
44979+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
44980 static struct workqueue_struct *iscsi_eh_timer_workq;
44981
44982 static DEFINE_IDA(iscsi_sess_ida);
44983@@ -1738,7 +1738,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
44984 int err;
44985
44986 ihost = shost->shost_data;
44987- session->sid = atomic_add_return(1, &iscsi_session_nr);
44988+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
44989
44990 if (target_id == ISCSI_MAX_TARGET) {
44991 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
44992@@ -3944,7 +3944,7 @@ static __init int iscsi_transport_init(void)
44993 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
44994 ISCSI_TRANSPORT_VERSION);
44995
44996- atomic_set(&iscsi_session_nr, 0);
44997+ atomic_set_unchecked(&iscsi_session_nr, 0);
44998
44999 err = class_register(&iscsi_transport_class);
45000 if (err)
45001diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
45002index f379c7f..e8fc69c 100644
45003--- a/drivers/scsi/scsi_transport_srp.c
45004+++ b/drivers/scsi/scsi_transport_srp.c
45005@@ -33,7 +33,7 @@
45006 #include "scsi_transport_srp_internal.h"
45007
45008 struct srp_host_attrs {
45009- atomic_t next_port_id;
45010+ atomic_unchecked_t next_port_id;
45011 };
45012 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
45013
45014@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
45015 struct Scsi_Host *shost = dev_to_shost(dev);
45016 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
45017
45018- atomic_set(&srp_host->next_port_id, 0);
45019+ atomic_set_unchecked(&srp_host->next_port_id, 0);
45020 return 0;
45021 }
45022
45023@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
45024 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
45025 rport->roles = ids->roles;
45026
45027- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
45028+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
45029 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
45030
45031 transport_setup_device(&rport->dev);
45032diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
45033index 610417e..1544fa9 100644
45034--- a/drivers/scsi/sd.c
45035+++ b/drivers/scsi/sd.c
45036@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
45037 sdkp->disk = gd;
45038 sdkp->index = index;
45039 atomic_set(&sdkp->openers, 0);
45040- atomic_set(&sdkp->device->ioerr_cnt, 0);
45041+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
45042
45043 if (!sdp->request_queue->rq_timeout) {
45044 if (sdp->type != TYPE_MOD)
45045diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
45046index df5e961..df6b97f 100644
45047--- a/drivers/scsi/sg.c
45048+++ b/drivers/scsi/sg.c
45049@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
45050 sdp->disk->disk_name,
45051 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
45052 NULL,
45053- (char *)arg);
45054+ (char __user *)arg);
45055 case BLKTRACESTART:
45056 return blk_trace_startstop(sdp->device->request_queue, 1);
45057 case BLKTRACESTOP:
45058diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
45059index 32b7bb1..2f1c4bd 100644
45060--- a/drivers/spi/spi.c
45061+++ b/drivers/spi/spi.c
45062@@ -1631,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master)
45063 EXPORT_SYMBOL_GPL(spi_bus_unlock);
45064
45065 /* portable code must never pass more than 32 bytes */
45066-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
45067+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
45068
45069 static u8 *buf;
45070
45071diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
45072index 3675020..e80d92c 100644
45073--- a/drivers/staging/media/solo6x10/solo6x10-core.c
45074+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
45075@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
45076
45077 static int solo_sysfs_init(struct solo_dev *solo_dev)
45078 {
45079- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45080+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45081 struct device *dev = &solo_dev->dev;
45082 const char *driver;
45083 int i;
45084diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
45085index 34afc16..ffe44dd 100644
45086--- a/drivers/staging/octeon/ethernet-rx.c
45087+++ b/drivers/staging/octeon/ethernet-rx.c
45088@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
45089 /* Increment RX stats for virtual ports */
45090 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
45091 #ifdef CONFIG_64BIT
45092- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
45093- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
45094+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
45095+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
45096 #else
45097- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
45098- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
45099+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
45100+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
45101 #endif
45102 }
45103 netif_receive_skb(skb);
45104@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
45105 dev->name);
45106 */
45107 #ifdef CONFIG_64BIT
45108- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
45109+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
45110 #else
45111- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
45112+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
45113 #endif
45114 dev_kfree_skb_irq(skb);
45115 }
45116diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
45117index c3a90e7..023619a 100644
45118--- a/drivers/staging/octeon/ethernet.c
45119+++ b/drivers/staging/octeon/ethernet.c
45120@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
45121 * since the RX tasklet also increments it.
45122 */
45123 #ifdef CONFIG_64BIT
45124- atomic64_add(rx_status.dropped_packets,
45125- (atomic64_t *)&priv->stats.rx_dropped);
45126+ atomic64_add_unchecked(rx_status.dropped_packets,
45127+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
45128 #else
45129- atomic_add(rx_status.dropped_packets,
45130- (atomic_t *)&priv->stats.rx_dropped);
45131+ atomic_add_unchecked(rx_status.dropped_packets,
45132+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
45133 #endif
45134 }
45135
45136diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
45137index dc23395..cf7e9b1 100644
45138--- a/drivers/staging/rtl8712/rtl871x_io.h
45139+++ b/drivers/staging/rtl8712/rtl871x_io.h
45140@@ -108,7 +108,7 @@ struct _io_ops {
45141 u8 *pmem);
45142 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
45143 u8 *pmem);
45144-};
45145+} __no_const;
45146
45147 struct io_req {
45148 struct list_head list;
45149diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
45150index 1f5088b..0e59820 100644
45151--- a/drivers/staging/sbe-2t3e3/netdev.c
45152+++ b/drivers/staging/sbe-2t3e3/netdev.c
45153@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
45154 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
45155
45156 if (rlen)
45157- if (copy_to_user(data, &resp, rlen))
45158+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
45159 return -EFAULT;
45160
45161 return 0;
45162diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
45163index a863a98..d272795 100644
45164--- a/drivers/staging/usbip/vhci.h
45165+++ b/drivers/staging/usbip/vhci.h
45166@@ -83,7 +83,7 @@ struct vhci_hcd {
45167 unsigned resuming:1;
45168 unsigned long re_timeout;
45169
45170- atomic_t seqnum;
45171+ atomic_unchecked_t seqnum;
45172
45173 /*
45174 * NOTE:
45175diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
45176index d7974cb..d78076b 100644
45177--- a/drivers/staging/usbip/vhci_hcd.c
45178+++ b/drivers/staging/usbip/vhci_hcd.c
45179@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
45180
45181 spin_lock(&vdev->priv_lock);
45182
45183- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
45184+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
45185 if (priv->seqnum == 0xffff)
45186 dev_info(&urb->dev->dev, "seqnum max\n");
45187
45188@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
45189 return -ENOMEM;
45190 }
45191
45192- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
45193+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
45194 if (unlink->seqnum == 0xffff)
45195 pr_info("seqnum max\n");
45196
45197@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
45198 vdev->rhport = rhport;
45199 }
45200
45201- atomic_set(&vhci->seqnum, 0);
45202+ atomic_set_unchecked(&vhci->seqnum, 0);
45203 spin_lock_init(&vhci->lock);
45204
45205 hcd->power_budget = 0; /* no limit */
45206diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
45207index d07fcb5..358e1e1 100644
45208--- a/drivers/staging/usbip/vhci_rx.c
45209+++ b/drivers/staging/usbip/vhci_rx.c
45210@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
45211 if (!urb) {
45212 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
45213 pr_info("max seqnum %d\n",
45214- atomic_read(&the_controller->seqnum));
45215+ atomic_read_unchecked(&the_controller->seqnum));
45216 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
45217 return;
45218 }
45219diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
45220index 8417c2f..ef5ebd6 100644
45221--- a/drivers/staging/vt6655/hostap.c
45222+++ b/drivers/staging/vt6655/hostap.c
45223@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
45224 *
45225 */
45226
45227+static net_device_ops_no_const apdev_netdev_ops;
45228+
45229 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
45230 {
45231 PSDevice apdev_priv;
45232 struct net_device *dev = pDevice->dev;
45233 int ret;
45234- const struct net_device_ops apdev_netdev_ops = {
45235- .ndo_start_xmit = pDevice->tx_80211,
45236- };
45237
45238 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
45239
45240@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
45241 *apdev_priv = *pDevice;
45242 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
45243
45244+ /* only half broken now */
45245+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
45246 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
45247
45248 pDevice->apdev->type = ARPHRD_IEEE80211;
45249diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
45250index c699a30..b90a5fd 100644
45251--- a/drivers/staging/vt6656/hostap.c
45252+++ b/drivers/staging/vt6656/hostap.c
45253@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
45254 *
45255 */
45256
45257+static net_device_ops_no_const apdev_netdev_ops;
45258+
45259 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
45260 {
45261 struct vnt_private *apdev_priv;
45262 struct net_device *dev = pDevice->dev;
45263 int ret;
45264- const struct net_device_ops apdev_netdev_ops = {
45265- .ndo_start_xmit = pDevice->tx_80211,
45266- };
45267
45268 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
45269
45270@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
45271 *apdev_priv = *pDevice;
45272 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
45273
45274+ /* only half broken now */
45275+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
45276 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
45277
45278 pDevice->apdev->type = ARPHRD_IEEE80211;
45279diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
45280index d7e51e4..d07eaab 100644
45281--- a/drivers/staging/zcache/tmem.c
45282+++ b/drivers/staging/zcache/tmem.c
45283@@ -51,7 +51,7 @@
45284 * A tmem host implementation must use this function to register callbacks
45285 * for memory allocation.
45286 */
45287-static struct tmem_hostops tmem_hostops;
45288+static tmem_hostops_no_const tmem_hostops;
45289
45290 static void tmem_objnode_tree_init(void);
45291
45292@@ -65,7 +65,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
45293 * A tmem host implementation must use this function to register
45294 * callbacks for a page-accessible memory (PAM) implementation.
45295 */
45296-static struct tmem_pamops tmem_pamops;
45297+static tmem_pamops_no_const tmem_pamops;
45298
45299 void tmem_register_pamops(struct tmem_pamops *m)
45300 {
45301diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
45302index d128ce2..a43980c 100644
45303--- a/drivers/staging/zcache/tmem.h
45304+++ b/drivers/staging/zcache/tmem.h
45305@@ -226,6 +226,7 @@ struct tmem_pamops {
45306 int (*replace_in_obj)(void *, struct tmem_obj *);
45307 #endif
45308 };
45309+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
45310 extern void tmem_register_pamops(struct tmem_pamops *m);
45311
45312 /* memory allocation methods provided by the host implementation */
45313@@ -235,6 +236,7 @@ struct tmem_hostops {
45314 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
45315 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
45316 };
45317+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
45318 extern void tmem_register_hostops(struct tmem_hostops *m);
45319
45320 /* core tmem accessor functions */
45321diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
45322index 4630481..c26782a 100644
45323--- a/drivers/target/target_core_device.c
45324+++ b/drivers/target/target_core_device.c
45325@@ -1400,7 +1400,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
45326 spin_lock_init(&dev->se_port_lock);
45327 spin_lock_init(&dev->se_tmr_lock);
45328 spin_lock_init(&dev->qf_cmd_lock);
45329- atomic_set(&dev->dev_ordered_id, 0);
45330+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
45331 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
45332 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
45333 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
45334diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
45335index 21e3158..43c6004 100644
45336--- a/drivers/target/target_core_transport.c
45337+++ b/drivers/target/target_core_transport.c
45338@@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
45339 * Used to determine when ORDERED commands should go from
45340 * Dormant to Active status.
45341 */
45342- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
45343+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
45344 smp_mb__after_atomic_inc();
45345 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
45346 cmd->se_ordered_id, cmd->sam_task_attr,
45347diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
45348index 33f83fe..d80f8e1 100644
45349--- a/drivers/tty/cyclades.c
45350+++ b/drivers/tty/cyclades.c
45351@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
45352 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
45353 info->port.count);
45354 #endif
45355- info->port.count++;
45356+ atomic_inc(&info->port.count);
45357 #ifdef CY_DEBUG_COUNT
45358 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
45359- current->pid, info->port.count);
45360+ current->pid, atomic_read(&info->port.count));
45361 #endif
45362
45363 /*
45364@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
45365 for (j = 0; j < cy_card[i].nports; j++) {
45366 info = &cy_card[i].ports[j];
45367
45368- if (info->port.count) {
45369+ if (atomic_read(&info->port.count)) {
45370 /* XXX is the ldisc num worth this? */
45371 struct tty_struct *tty;
45372 struct tty_ldisc *ld;
45373diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
45374index eb255e8..f637a57 100644
45375--- a/drivers/tty/hvc/hvc_console.c
45376+++ b/drivers/tty/hvc/hvc_console.c
45377@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
45378
45379 spin_lock_irqsave(&hp->port.lock, flags);
45380 /* Check and then increment for fast path open. */
45381- if (hp->port.count++ > 0) {
45382+ if (atomic_inc_return(&hp->port.count) > 1) {
45383 spin_unlock_irqrestore(&hp->port.lock, flags);
45384 hvc_kick();
45385 return 0;
45386@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
45387
45388 spin_lock_irqsave(&hp->port.lock, flags);
45389
45390- if (--hp->port.count == 0) {
45391+ if (atomic_dec_return(&hp->port.count) == 0) {
45392 spin_unlock_irqrestore(&hp->port.lock, flags);
45393 /* We are done with the tty pointer now. */
45394 tty_port_tty_set(&hp->port, NULL);
45395@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
45396 */
45397 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
45398 } else {
45399- if (hp->port.count < 0)
45400+ if (atomic_read(&hp->port.count) < 0)
45401 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
45402- hp->vtermno, hp->port.count);
45403+ hp->vtermno, atomic_read(&hp->port.count));
45404 spin_unlock_irqrestore(&hp->port.lock, flags);
45405 }
45406 }
45407@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
45408 * open->hangup case this can be called after the final close so prevent
45409 * that from happening for now.
45410 */
45411- if (hp->port.count <= 0) {
45412+ if (atomic_read(&hp->port.count) <= 0) {
45413 spin_unlock_irqrestore(&hp->port.lock, flags);
45414 return;
45415 }
45416
45417- hp->port.count = 0;
45418+ atomic_set(&hp->port.count, 0);
45419 spin_unlock_irqrestore(&hp->port.lock, flags);
45420 tty_port_tty_set(&hp->port, NULL);
45421
45422@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
45423 return -EPIPE;
45424
45425 /* FIXME what's this (unprotected) check for? */
45426- if (hp->port.count <= 0)
45427+ if (atomic_read(&hp->port.count) <= 0)
45428 return -EIO;
45429
45430 spin_lock_irqsave(&hp->lock, flags);
45431diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
45432index 81e939e..95ead10 100644
45433--- a/drivers/tty/hvc/hvcs.c
45434+++ b/drivers/tty/hvc/hvcs.c
45435@@ -83,6 +83,7 @@
45436 #include <asm/hvcserver.h>
45437 #include <asm/uaccess.h>
45438 #include <asm/vio.h>
45439+#include <asm/local.h>
45440
45441 /*
45442 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
45443@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
45444
45445 spin_lock_irqsave(&hvcsd->lock, flags);
45446
45447- if (hvcsd->port.count > 0) {
45448+ if (atomic_read(&hvcsd->port.count) > 0) {
45449 spin_unlock_irqrestore(&hvcsd->lock, flags);
45450 printk(KERN_INFO "HVCS: vterm state unchanged. "
45451 "The hvcs device node is still in use.\n");
45452@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
45453 }
45454 }
45455
45456- hvcsd->port.count = 0;
45457+ atomic_set(&hvcsd->port.count, 0);
45458 hvcsd->port.tty = tty;
45459 tty->driver_data = hvcsd;
45460
45461@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
45462 unsigned long flags;
45463
45464 spin_lock_irqsave(&hvcsd->lock, flags);
45465- hvcsd->port.count++;
45466+ atomic_inc(&hvcsd->port.count);
45467 hvcsd->todo_mask |= HVCS_SCHED_READ;
45468 spin_unlock_irqrestore(&hvcsd->lock, flags);
45469
45470@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
45471 hvcsd = tty->driver_data;
45472
45473 spin_lock_irqsave(&hvcsd->lock, flags);
45474- if (--hvcsd->port.count == 0) {
45475+ if (atomic_dec_and_test(&hvcsd->port.count)) {
45476
45477 vio_disable_interrupts(hvcsd->vdev);
45478
45479@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
45480
45481 free_irq(irq, hvcsd);
45482 return;
45483- } else if (hvcsd->port.count < 0) {
45484+ } else if (atomic_read(&hvcsd->port.count) < 0) {
45485 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
45486 " is missmanaged.\n",
45487- hvcsd->vdev->unit_address, hvcsd->port.count);
45488+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
45489 }
45490
45491 spin_unlock_irqrestore(&hvcsd->lock, flags);
45492@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
45493
45494 spin_lock_irqsave(&hvcsd->lock, flags);
45495 /* Preserve this so that we know how many kref refs to put */
45496- temp_open_count = hvcsd->port.count;
45497+ temp_open_count = atomic_read(&hvcsd->port.count);
45498
45499 /*
45500 * Don't kref put inside the spinlock because the destruction
45501@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
45502 tty->driver_data = NULL;
45503 hvcsd->port.tty = NULL;
45504
45505- hvcsd->port.count = 0;
45506+ atomic_set(&hvcsd->port.count, 0);
45507
45508 /* This will drop any buffered data on the floor which is OK in a hangup
45509 * scenario. */
45510@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
45511 * the middle of a write operation? This is a crummy place to do this
45512 * but we want to keep it all in the spinlock.
45513 */
45514- if (hvcsd->port.count <= 0) {
45515+ if (atomic_read(&hvcsd->port.count) <= 0) {
45516 spin_unlock_irqrestore(&hvcsd->lock, flags);
45517 return -ENODEV;
45518 }
45519@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
45520 {
45521 struct hvcs_struct *hvcsd = tty->driver_data;
45522
45523- if (!hvcsd || hvcsd->port.count <= 0)
45524+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
45525 return 0;
45526
45527 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
45528diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
45529index 8fd72ff..34a0bed 100644
45530--- a/drivers/tty/ipwireless/tty.c
45531+++ b/drivers/tty/ipwireless/tty.c
45532@@ -29,6 +29,7 @@
45533 #include <linux/tty_driver.h>
45534 #include <linux/tty_flip.h>
45535 #include <linux/uaccess.h>
45536+#include <asm/local.h>
45537
45538 #include "tty.h"
45539 #include "network.h"
45540@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
45541 mutex_unlock(&tty->ipw_tty_mutex);
45542 return -ENODEV;
45543 }
45544- if (tty->port.count == 0)
45545+ if (atomic_read(&tty->port.count) == 0)
45546 tty->tx_bytes_queued = 0;
45547
45548- tty->port.count++;
45549+ atomic_inc(&tty->port.count);
45550
45551 tty->port.tty = linux_tty;
45552 linux_tty->driver_data = tty;
45553@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
45554
45555 static void do_ipw_close(struct ipw_tty *tty)
45556 {
45557- tty->port.count--;
45558-
45559- if (tty->port.count == 0) {
45560+ if (atomic_dec_return(&tty->port.count) == 0) {
45561 struct tty_struct *linux_tty = tty->port.tty;
45562
45563 if (linux_tty != NULL) {
45564@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
45565 return;
45566
45567 mutex_lock(&tty->ipw_tty_mutex);
45568- if (tty->port.count == 0) {
45569+ if (atomic_read(&tty->port.count) == 0) {
45570 mutex_unlock(&tty->ipw_tty_mutex);
45571 return;
45572 }
45573@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
45574
45575 mutex_lock(&tty->ipw_tty_mutex);
45576
45577- if (!tty->port.count) {
45578+ if (!atomic_read(&tty->port.count)) {
45579 mutex_unlock(&tty->ipw_tty_mutex);
45580 return;
45581 }
45582@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
45583 return -ENODEV;
45584
45585 mutex_lock(&tty->ipw_tty_mutex);
45586- if (!tty->port.count) {
45587+ if (!atomic_read(&tty->port.count)) {
45588 mutex_unlock(&tty->ipw_tty_mutex);
45589 return -EINVAL;
45590 }
45591@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
45592 if (!tty)
45593 return -ENODEV;
45594
45595- if (!tty->port.count)
45596+ if (!atomic_read(&tty->port.count))
45597 return -EINVAL;
45598
45599 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
45600@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
45601 if (!tty)
45602 return 0;
45603
45604- if (!tty->port.count)
45605+ if (!atomic_read(&tty->port.count))
45606 return 0;
45607
45608 return tty->tx_bytes_queued;
45609@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
45610 if (!tty)
45611 return -ENODEV;
45612
45613- if (!tty->port.count)
45614+ if (!atomic_read(&tty->port.count))
45615 return -EINVAL;
45616
45617 return get_control_lines(tty);
45618@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
45619 if (!tty)
45620 return -ENODEV;
45621
45622- if (!tty->port.count)
45623+ if (!atomic_read(&tty->port.count))
45624 return -EINVAL;
45625
45626 return set_control_lines(tty, set, clear);
45627@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
45628 if (!tty)
45629 return -ENODEV;
45630
45631- if (!tty->port.count)
45632+ if (!atomic_read(&tty->port.count))
45633 return -EINVAL;
45634
45635 /* FIXME: Exactly how is the tty object locked here .. */
45636@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
45637 * are gone */
45638 mutex_lock(&ttyj->ipw_tty_mutex);
45639 }
45640- while (ttyj->port.count)
45641+ while (atomic_read(&ttyj->port.count))
45642 do_ipw_close(ttyj);
45643 ipwireless_disassociate_network_ttys(network,
45644 ttyj->channel_idx);
45645diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
45646index 1deaca4..c8582d4 100644
45647--- a/drivers/tty/moxa.c
45648+++ b/drivers/tty/moxa.c
45649@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
45650 }
45651
45652 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
45653- ch->port.count++;
45654+ atomic_inc(&ch->port.count);
45655 tty->driver_data = ch;
45656 tty_port_tty_set(&ch->port, tty);
45657 mutex_lock(&ch->port.mutex);
45658diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
45659index 6422390..49003ac8 100644
45660--- a/drivers/tty/n_gsm.c
45661+++ b/drivers/tty/n_gsm.c
45662@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
45663 spin_lock_init(&dlci->lock);
45664 mutex_init(&dlci->mutex);
45665 dlci->fifo = &dlci->_fifo;
45666- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
45667+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
45668 kfree(dlci);
45669 return NULL;
45670 }
45671@@ -2932,7 +2932,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
45672 struct gsm_dlci *dlci = tty->driver_data;
45673 struct tty_port *port = &dlci->port;
45674
45675- port->count++;
45676+ atomic_inc(&port->count);
45677 dlci_get(dlci);
45678 dlci_get(dlci->gsm->dlci[0]);
45679 mux_get(dlci->gsm);
45680diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
45681index 6c7fe90..9241dab 100644
45682--- a/drivers/tty/n_tty.c
45683+++ b/drivers/tty/n_tty.c
45684@@ -2203,6 +2203,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
45685 {
45686 *ops = tty_ldisc_N_TTY;
45687 ops->owner = NULL;
45688- ops->refcount = ops->flags = 0;
45689+ atomic_set(&ops->refcount, 0);
45690+ ops->flags = 0;
45691 }
45692 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
45693diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
45694index abfd990..5ab5da9 100644
45695--- a/drivers/tty/pty.c
45696+++ b/drivers/tty/pty.c
45697@@ -796,8 +796,10 @@ static void __init unix98_pty_init(void)
45698 panic("Couldn't register Unix98 pts driver");
45699
45700 /* Now create the /dev/ptmx special device */
45701+ pax_open_kernel();
45702 tty_default_fops(&ptmx_fops);
45703- ptmx_fops.open = ptmx_open;
45704+ *(void **)&ptmx_fops.open = ptmx_open;
45705+ pax_close_kernel();
45706
45707 cdev_init(&ptmx_cdev, &ptmx_fops);
45708 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
45709diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
45710index 354564e..fe50d9a 100644
45711--- a/drivers/tty/rocket.c
45712+++ b/drivers/tty/rocket.c
45713@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
45714 tty->driver_data = info;
45715 tty_port_tty_set(port, tty);
45716
45717- if (port->count++ == 0) {
45718+ if (atomic_inc_return(&port->count) == 1) {
45719 atomic_inc(&rp_num_ports_open);
45720
45721 #ifdef ROCKET_DEBUG_OPEN
45722@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
45723 #endif
45724 }
45725 #ifdef ROCKET_DEBUG_OPEN
45726- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
45727+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
45728 #endif
45729
45730 /*
45731@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
45732 spin_unlock_irqrestore(&info->port.lock, flags);
45733 return;
45734 }
45735- if (info->port.count)
45736+ if (atomic_read(&info->port.count))
45737 atomic_dec(&rp_num_ports_open);
45738 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
45739 spin_unlock_irqrestore(&info->port.lock, flags);
45740diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
45741index 1002054..dd644a8 100644
45742--- a/drivers/tty/serial/kgdboc.c
45743+++ b/drivers/tty/serial/kgdboc.c
45744@@ -24,8 +24,9 @@
45745 #define MAX_CONFIG_LEN 40
45746
45747 static struct kgdb_io kgdboc_io_ops;
45748+static struct kgdb_io kgdboc_io_ops_console;
45749
45750-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
45751+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
45752 static int configured = -1;
45753
45754 static char config[MAX_CONFIG_LEN];
45755@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
45756 kgdboc_unregister_kbd();
45757 if (configured == 1)
45758 kgdb_unregister_io_module(&kgdboc_io_ops);
45759+ else if (configured == 2)
45760+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
45761 }
45762
45763 static int configure_kgdboc(void)
45764@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
45765 int err;
45766 char *cptr = config;
45767 struct console *cons;
45768+ int is_console = 0;
45769
45770 err = kgdboc_option_setup(config);
45771 if (err || !strlen(config) || isspace(config[0]))
45772 goto noconfig;
45773
45774 err = -ENODEV;
45775- kgdboc_io_ops.is_console = 0;
45776 kgdb_tty_driver = NULL;
45777
45778 kgdboc_use_kms = 0;
45779@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
45780 int idx;
45781 if (cons->device && cons->device(cons, &idx) == p &&
45782 idx == tty_line) {
45783- kgdboc_io_ops.is_console = 1;
45784+ is_console = 1;
45785 break;
45786 }
45787 cons = cons->next;
45788@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
45789 kgdb_tty_line = tty_line;
45790
45791 do_register:
45792- err = kgdb_register_io_module(&kgdboc_io_ops);
45793+ if (is_console) {
45794+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
45795+ configured = 2;
45796+ } else {
45797+ err = kgdb_register_io_module(&kgdboc_io_ops);
45798+ configured = 1;
45799+ }
45800 if (err)
45801 goto noconfig;
45802
45803@@ -205,8 +214,6 @@ do_register:
45804 if (err)
45805 goto nmi_con_failed;
45806
45807- configured = 1;
45808-
45809 return 0;
45810
45811 nmi_con_failed:
45812@@ -223,7 +230,7 @@ noconfig:
45813 static int __init init_kgdboc(void)
45814 {
45815 /* Already configured? */
45816- if (configured == 1)
45817+ if (configured >= 1)
45818 return 0;
45819
45820 return configure_kgdboc();
45821@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
45822 if (config[len - 1] == '\n')
45823 config[len - 1] = '\0';
45824
45825- if (configured == 1)
45826+ if (configured >= 1)
45827 cleanup_kgdboc();
45828
45829 /* Go and configure with the new params. */
45830@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
45831 .post_exception = kgdboc_post_exp_handler,
45832 };
45833
45834+static struct kgdb_io kgdboc_io_ops_console = {
45835+ .name = "kgdboc",
45836+ .read_char = kgdboc_get_char,
45837+ .write_char = kgdboc_put_char,
45838+ .pre_exception = kgdboc_pre_exp_handler,
45839+ .post_exception = kgdboc_post_exp_handler,
45840+ .is_console = 1
45841+};
45842+
45843 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
45844 /* This is only available if kgdboc is a built in for early debugging */
45845 static int __init kgdboc_early_init(char *opt)
45846diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
45847index 0c8a9fa..234a95f 100644
45848--- a/drivers/tty/serial/samsung.c
45849+++ b/drivers/tty/serial/samsung.c
45850@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
45851 }
45852 }
45853
45854+static int s3c64xx_serial_startup(struct uart_port *port);
45855 static int s3c24xx_serial_startup(struct uart_port *port)
45856 {
45857 struct s3c24xx_uart_port *ourport = to_ourport(port);
45858 int ret;
45859
45860+ /* Startup sequence is different for s3c64xx and higher SoC's */
45861+ if (s3c24xx_serial_has_interrupt_mask(port))
45862+ return s3c64xx_serial_startup(port);
45863+
45864 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
45865 port->mapbase, port->membase);
45866
45867@@ -1124,10 +1129,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
45868 /* setup info for port */
45869 port->dev = &platdev->dev;
45870
45871- /* Startup sequence is different for s3c64xx and higher SoC's */
45872- if (s3c24xx_serial_has_interrupt_mask(port))
45873- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
45874-
45875 port->uartclk = 1;
45876
45877 if (cfg->uart_flags & UPF_CONS_FLOW) {
45878diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
45879index f87dbfd..42ad4b1 100644
45880--- a/drivers/tty/serial/serial_core.c
45881+++ b/drivers/tty/serial/serial_core.c
45882@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
45883 uart_flush_buffer(tty);
45884 uart_shutdown(tty, state);
45885 spin_lock_irqsave(&port->lock, flags);
45886- port->count = 0;
45887+ atomic_set(&port->count, 0);
45888 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
45889 spin_unlock_irqrestore(&port->lock, flags);
45890 tty_port_tty_set(port, NULL);
45891@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
45892 goto end;
45893 }
45894
45895- port->count++;
45896+ atomic_inc(&port->count);
45897 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
45898 retval = -ENXIO;
45899 goto err_dec_count;
45900@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
45901 /*
45902 * Make sure the device is in D0 state.
45903 */
45904- if (port->count == 1)
45905+ if (atomic_read(&port->count) == 1)
45906 uart_change_pm(state, UART_PM_STATE_ON);
45907
45908 /*
45909@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
45910 end:
45911 return retval;
45912 err_dec_count:
45913- port->count--;
45914+ atomic_inc(&port->count);
45915 mutex_unlock(&port->mutex);
45916 goto end;
45917 }
45918diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
45919index 8eaf1ab..85c030d 100644
45920--- a/drivers/tty/synclink.c
45921+++ b/drivers/tty/synclink.c
45922@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
45923
45924 if (debug_level >= DEBUG_LEVEL_INFO)
45925 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
45926- __FILE__,__LINE__, info->device_name, info->port.count);
45927+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
45928
45929 if (tty_port_close_start(&info->port, tty, filp) == 0)
45930 goto cleanup;
45931@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
45932 cleanup:
45933 if (debug_level >= DEBUG_LEVEL_INFO)
45934 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
45935- tty->driver->name, info->port.count);
45936+ tty->driver->name, atomic_read(&info->port.count));
45937
45938 } /* end of mgsl_close() */
45939
45940@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
45941
45942 mgsl_flush_buffer(tty);
45943 shutdown(info);
45944-
45945- info->port.count = 0;
45946+
45947+ atomic_set(&info->port.count, 0);
45948 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
45949 info->port.tty = NULL;
45950
45951@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
45952
45953 if (debug_level >= DEBUG_LEVEL_INFO)
45954 printk("%s(%d):block_til_ready before block on %s count=%d\n",
45955- __FILE__,__LINE__, tty->driver->name, port->count );
45956+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
45957
45958 spin_lock_irqsave(&info->irq_spinlock, flags);
45959 if (!tty_hung_up_p(filp)) {
45960 extra_count = true;
45961- port->count--;
45962+ atomic_dec(&port->count);
45963 }
45964 spin_unlock_irqrestore(&info->irq_spinlock, flags);
45965 port->blocked_open++;
45966@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
45967
45968 if (debug_level >= DEBUG_LEVEL_INFO)
45969 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
45970- __FILE__,__LINE__, tty->driver->name, port->count );
45971+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
45972
45973 tty_unlock(tty);
45974 schedule();
45975@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
45976
45977 /* FIXME: Racy on hangup during close wait */
45978 if (extra_count)
45979- port->count++;
45980+ atomic_inc(&port->count);
45981 port->blocked_open--;
45982
45983 if (debug_level >= DEBUG_LEVEL_INFO)
45984 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
45985- __FILE__,__LINE__, tty->driver->name, port->count );
45986+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
45987
45988 if (!retval)
45989 port->flags |= ASYNC_NORMAL_ACTIVE;
45990@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
45991
45992 if (debug_level >= DEBUG_LEVEL_INFO)
45993 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
45994- __FILE__,__LINE__,tty->driver->name, info->port.count);
45995+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
45996
45997 /* If port is closing, signal caller to try again */
45998 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
45999@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
46000 spin_unlock_irqrestore(&info->netlock, flags);
46001 goto cleanup;
46002 }
46003- info->port.count++;
46004+ atomic_inc(&info->port.count);
46005 spin_unlock_irqrestore(&info->netlock, flags);
46006
46007- if (info->port.count == 1) {
46008+ if (atomic_read(&info->port.count) == 1) {
46009 /* 1st open on this device, init hardware */
46010 retval = startup(info);
46011 if (retval < 0)
46012@@ -3446,8 +3446,8 @@ cleanup:
46013 if (retval) {
46014 if (tty->count == 1)
46015 info->port.tty = NULL; /* tty layer will release tty struct */
46016- if(info->port.count)
46017- info->port.count--;
46018+ if (atomic_read(&info->port.count))
46019+ atomic_dec(&info->port.count);
46020 }
46021
46022 return retval;
46023@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
46024 unsigned short new_crctype;
46025
46026 /* return error if TTY interface open */
46027- if (info->port.count)
46028+ if (atomic_read(&info->port.count))
46029 return -EBUSY;
46030
46031 switch (encoding)
46032@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
46033
46034 /* arbitrate between network and tty opens */
46035 spin_lock_irqsave(&info->netlock, flags);
46036- if (info->port.count != 0 || info->netcount != 0) {
46037+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
46038 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
46039 spin_unlock_irqrestore(&info->netlock, flags);
46040 return -EBUSY;
46041@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46042 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
46043
46044 /* return error if TTY interface open */
46045- if (info->port.count)
46046+ if (atomic_read(&info->port.count))
46047 return -EBUSY;
46048
46049 if (cmd != SIOCWANDEV)
46050diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
46051index 1abf946..1ee34fc 100644
46052--- a/drivers/tty/synclink_gt.c
46053+++ b/drivers/tty/synclink_gt.c
46054@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
46055 tty->driver_data = info;
46056 info->port.tty = tty;
46057
46058- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
46059+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
46060
46061 /* If port is closing, signal caller to try again */
46062 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
46063@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
46064 mutex_unlock(&info->port.mutex);
46065 goto cleanup;
46066 }
46067- info->port.count++;
46068+ atomic_inc(&info->port.count);
46069 spin_unlock_irqrestore(&info->netlock, flags);
46070
46071- if (info->port.count == 1) {
46072+ if (atomic_read(&info->port.count) == 1) {
46073 /* 1st open on this device, init hardware */
46074 retval = startup(info);
46075 if (retval < 0) {
46076@@ -715,8 +715,8 @@ cleanup:
46077 if (retval) {
46078 if (tty->count == 1)
46079 info->port.tty = NULL; /* tty layer will release tty struct */
46080- if(info->port.count)
46081- info->port.count--;
46082+ if(atomic_read(&info->port.count))
46083+ atomic_dec(&info->port.count);
46084 }
46085
46086 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
46087@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
46088
46089 if (sanity_check(info, tty->name, "close"))
46090 return;
46091- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
46092+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
46093
46094 if (tty_port_close_start(&info->port, tty, filp) == 0)
46095 goto cleanup;
46096@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
46097 tty_port_close_end(&info->port, tty);
46098 info->port.tty = NULL;
46099 cleanup:
46100- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
46101+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
46102 }
46103
46104 static void hangup(struct tty_struct *tty)
46105@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
46106 shutdown(info);
46107
46108 spin_lock_irqsave(&info->port.lock, flags);
46109- info->port.count = 0;
46110+ atomic_set(&info->port.count, 0);
46111 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
46112 info->port.tty = NULL;
46113 spin_unlock_irqrestore(&info->port.lock, flags);
46114@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
46115 unsigned short new_crctype;
46116
46117 /* return error if TTY interface open */
46118- if (info->port.count)
46119+ if (atomic_read(&info->port.count))
46120 return -EBUSY;
46121
46122 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
46123@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
46124
46125 /* arbitrate between network and tty opens */
46126 spin_lock_irqsave(&info->netlock, flags);
46127- if (info->port.count != 0 || info->netcount != 0) {
46128+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
46129 DBGINFO(("%s hdlc_open busy\n", dev->name));
46130 spin_unlock_irqrestore(&info->netlock, flags);
46131 return -EBUSY;
46132@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46133 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
46134
46135 /* return error if TTY interface open */
46136- if (info->port.count)
46137+ if (atomic_read(&info->port.count))
46138 return -EBUSY;
46139
46140 if (cmd != SIOCWANDEV)
46141@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
46142 if (port == NULL)
46143 continue;
46144 spin_lock(&port->lock);
46145- if ((port->port.count || port->netcount) &&
46146+ if ((atomic_read(&port->port.count) || port->netcount) &&
46147 port->pending_bh && !port->bh_running &&
46148 !port->bh_requested) {
46149 DBGISR(("%s bh queued\n", port->device_name));
46150@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46151 spin_lock_irqsave(&info->lock, flags);
46152 if (!tty_hung_up_p(filp)) {
46153 extra_count = true;
46154- port->count--;
46155+ atomic_dec(&port->count);
46156 }
46157 spin_unlock_irqrestore(&info->lock, flags);
46158 port->blocked_open++;
46159@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46160 remove_wait_queue(&port->open_wait, &wait);
46161
46162 if (extra_count)
46163- port->count++;
46164+ atomic_inc(&port->count);
46165 port->blocked_open--;
46166
46167 if (!retval)
46168diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
46169index ff17138..e38b41e 100644
46170--- a/drivers/tty/synclinkmp.c
46171+++ b/drivers/tty/synclinkmp.c
46172@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
46173
46174 if (debug_level >= DEBUG_LEVEL_INFO)
46175 printk("%s(%d):%s open(), old ref count = %d\n",
46176- __FILE__,__LINE__,tty->driver->name, info->port.count);
46177+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
46178
46179 /* If port is closing, signal caller to try again */
46180 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
46181@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
46182 spin_unlock_irqrestore(&info->netlock, flags);
46183 goto cleanup;
46184 }
46185- info->port.count++;
46186+ atomic_inc(&info->port.count);
46187 spin_unlock_irqrestore(&info->netlock, flags);
46188
46189- if (info->port.count == 1) {
46190+ if (atomic_read(&info->port.count) == 1) {
46191 /* 1st open on this device, init hardware */
46192 retval = startup(info);
46193 if (retval < 0)
46194@@ -796,8 +796,8 @@ cleanup:
46195 if (retval) {
46196 if (tty->count == 1)
46197 info->port.tty = NULL; /* tty layer will release tty struct */
46198- if(info->port.count)
46199- info->port.count--;
46200+ if(atomic_read(&info->port.count))
46201+ atomic_dec(&info->port.count);
46202 }
46203
46204 return retval;
46205@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
46206
46207 if (debug_level >= DEBUG_LEVEL_INFO)
46208 printk("%s(%d):%s close() entry, count=%d\n",
46209- __FILE__,__LINE__, info->device_name, info->port.count);
46210+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
46211
46212 if (tty_port_close_start(&info->port, tty, filp) == 0)
46213 goto cleanup;
46214@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
46215 cleanup:
46216 if (debug_level >= DEBUG_LEVEL_INFO)
46217 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
46218- tty->driver->name, info->port.count);
46219+ tty->driver->name, atomic_read(&info->port.count));
46220 }
46221
46222 /* Called by tty_hangup() when a hangup is signaled.
46223@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
46224 shutdown(info);
46225
46226 spin_lock_irqsave(&info->port.lock, flags);
46227- info->port.count = 0;
46228+ atomic_set(&info->port.count, 0);
46229 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
46230 info->port.tty = NULL;
46231 spin_unlock_irqrestore(&info->port.lock, flags);
46232@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
46233 unsigned short new_crctype;
46234
46235 /* return error if TTY interface open */
46236- if (info->port.count)
46237+ if (atomic_read(&info->port.count))
46238 return -EBUSY;
46239
46240 switch (encoding)
46241@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
46242
46243 /* arbitrate between network and tty opens */
46244 spin_lock_irqsave(&info->netlock, flags);
46245- if (info->port.count != 0 || info->netcount != 0) {
46246+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
46247 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
46248 spin_unlock_irqrestore(&info->netlock, flags);
46249 return -EBUSY;
46250@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46251 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
46252
46253 /* return error if TTY interface open */
46254- if (info->port.count)
46255+ if (atomic_read(&info->port.count))
46256 return -EBUSY;
46257
46258 if (cmd != SIOCWANDEV)
46259@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
46260 * do not request bottom half processing if the
46261 * device is not open in a normal mode.
46262 */
46263- if ( port && (port->port.count || port->netcount) &&
46264+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
46265 port->pending_bh && !port->bh_running &&
46266 !port->bh_requested ) {
46267 if ( debug_level >= DEBUG_LEVEL_ISR )
46268@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46269
46270 if (debug_level >= DEBUG_LEVEL_INFO)
46271 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
46272- __FILE__,__LINE__, tty->driver->name, port->count );
46273+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
46274
46275 spin_lock_irqsave(&info->lock, flags);
46276 if (!tty_hung_up_p(filp)) {
46277 extra_count = true;
46278- port->count--;
46279+ atomic_dec(&port->count);
46280 }
46281 spin_unlock_irqrestore(&info->lock, flags);
46282 port->blocked_open++;
46283@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46284
46285 if (debug_level >= DEBUG_LEVEL_INFO)
46286 printk("%s(%d):%s block_til_ready() count=%d\n",
46287- __FILE__,__LINE__, tty->driver->name, port->count );
46288+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
46289
46290 tty_unlock(tty);
46291 schedule();
46292@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
46293 remove_wait_queue(&port->open_wait, &wait);
46294
46295 if (extra_count)
46296- port->count++;
46297+ atomic_inc(&port->count);
46298 port->blocked_open--;
46299
46300 if (debug_level >= DEBUG_LEVEL_INFO)
46301 printk("%s(%d):%s block_til_ready() after, count=%d\n",
46302- __FILE__,__LINE__, tty->driver->name, port->count );
46303+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
46304
46305 if (!retval)
46306 port->flags |= ASYNC_NORMAL_ACTIVE;
46307diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
46308index b51c154..17d55d1 100644
46309--- a/drivers/tty/sysrq.c
46310+++ b/drivers/tty/sysrq.c
46311@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
46312 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
46313 size_t count, loff_t *ppos)
46314 {
46315- if (count) {
46316+ if (count && capable(CAP_SYS_ADMIN)) {
46317 char c;
46318
46319 if (get_user(c, buf))
46320diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
46321index 4476682..d77e748 100644
46322--- a/drivers/tty/tty_io.c
46323+++ b/drivers/tty/tty_io.c
46324@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
46325
46326 void tty_default_fops(struct file_operations *fops)
46327 {
46328- *fops = tty_fops;
46329+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
46330 }
46331
46332 /*
46333diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
46334index 1afe192..73d2c20 100644
46335--- a/drivers/tty/tty_ldisc.c
46336+++ b/drivers/tty/tty_ldisc.c
46337@@ -66,7 +66,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
46338 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
46339 tty_ldiscs[disc] = new_ldisc;
46340 new_ldisc->num = disc;
46341- new_ldisc->refcount = 0;
46342+ atomic_set(&new_ldisc->refcount, 0);
46343 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
46344
46345 return ret;
46346@@ -94,7 +94,7 @@ int tty_unregister_ldisc(int disc)
46347 return -EINVAL;
46348
46349 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
46350- if (tty_ldiscs[disc]->refcount)
46351+ if (atomic_read(&tty_ldiscs[disc]->refcount))
46352 ret = -EBUSY;
46353 else
46354 tty_ldiscs[disc] = NULL;
46355@@ -115,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
46356 if (ldops) {
46357 ret = ERR_PTR(-EAGAIN);
46358 if (try_module_get(ldops->owner)) {
46359- ldops->refcount++;
46360+ atomic_inc(&ldops->refcount);
46361 ret = ldops;
46362 }
46363 }
46364@@ -128,7 +128,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
46365 unsigned long flags;
46366
46367 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
46368- ldops->refcount--;
46369+ atomic_dec(&ldops->refcount);
46370 module_put(ldops->owner);
46371 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
46372 }
46373@@ -196,7 +196,7 @@ static inline void tty_ldisc_put(struct tty_ldisc *ld)
46374 /* unreleased reader reference(s) will cause this WARN */
46375 WARN_ON(!atomic_dec_and_test(&ld->users));
46376
46377- ld->ops->refcount--;
46378+ atomic_dec(&ld->ops->refcount);
46379 module_put(ld->ops->owner);
46380 kfree(ld);
46381 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
46382diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
46383index f597e88..b7f68ed 100644
46384--- a/drivers/tty/tty_port.c
46385+++ b/drivers/tty/tty_port.c
46386@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
46387 unsigned long flags;
46388
46389 spin_lock_irqsave(&port->lock, flags);
46390- port->count = 0;
46391+ atomic_set(&port->count, 0);
46392 port->flags &= ~ASYNC_NORMAL_ACTIVE;
46393 tty = port->tty;
46394 if (tty)
46395@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
46396 /* The port lock protects the port counts */
46397 spin_lock_irqsave(&port->lock, flags);
46398 if (!tty_hung_up_p(filp))
46399- port->count--;
46400+ atomic_dec(&port->count);
46401 port->blocked_open++;
46402 spin_unlock_irqrestore(&port->lock, flags);
46403
46404@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
46405 we must not mess that up further */
46406 spin_lock_irqsave(&port->lock, flags);
46407 if (!tty_hung_up_p(filp))
46408- port->count++;
46409+ atomic_inc(&port->count);
46410 port->blocked_open--;
46411 if (retval == 0)
46412 port->flags |= ASYNC_NORMAL_ACTIVE;
46413@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
46414 return 0;
46415 }
46416
46417- if (tty->count == 1 && port->count != 1) {
46418+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
46419 printk(KERN_WARNING
46420 "tty_port_close_start: tty->count = 1 port count = %d.\n",
46421- port->count);
46422- port->count = 1;
46423+ atomic_read(&port->count));
46424+ atomic_set(&port->count, 1);
46425 }
46426- if (--port->count < 0) {
46427+ if (atomic_dec_return(&port->count) < 0) {
46428 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
46429- port->count);
46430- port->count = 0;
46431+ atomic_read(&port->count));
46432+ atomic_set(&port->count, 0);
46433 }
46434
46435- if (port->count) {
46436+ if (atomic_read(&port->count)) {
46437 spin_unlock_irqrestore(&port->lock, flags);
46438 if (port->ops->drop)
46439 port->ops->drop(port);
46440@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
46441 {
46442 spin_lock_irq(&port->lock);
46443 if (!tty_hung_up_p(filp))
46444- ++port->count;
46445+ atomic_inc(&port->count);
46446 spin_unlock_irq(&port->lock);
46447 tty_port_tty_set(port, tty);
46448
46449diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
46450index a9af1b9a..1e08e7f 100644
46451--- a/drivers/tty/vt/keyboard.c
46452+++ b/drivers/tty/vt/keyboard.c
46453@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
46454 kbd->kbdmode == VC_OFF) &&
46455 value != KVAL(K_SAK))
46456 return; /* SAK is allowed even in raw mode */
46457+
46458+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46459+ {
46460+ void *func = fn_handler[value];
46461+ if (func == fn_show_state || func == fn_show_ptregs ||
46462+ func == fn_show_mem)
46463+ return;
46464+ }
46465+#endif
46466+
46467 fn_handler[value](vc);
46468 }
46469
46470@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
46471 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
46472 return -EFAULT;
46473
46474- if (!capable(CAP_SYS_TTY_CONFIG))
46475- perm = 0;
46476-
46477 switch (cmd) {
46478 case KDGKBENT:
46479 /* Ensure another thread doesn't free it under us */
46480@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
46481 spin_unlock_irqrestore(&kbd_event_lock, flags);
46482 return put_user(val, &user_kbe->kb_value);
46483 case KDSKBENT:
46484+ if (!capable(CAP_SYS_TTY_CONFIG))
46485+ perm = 0;
46486+
46487 if (!perm)
46488 return -EPERM;
46489 if (!i && v == K_NOSUCHMAP) {
46490@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
46491 int i, j, k;
46492 int ret;
46493
46494- if (!capable(CAP_SYS_TTY_CONFIG))
46495- perm = 0;
46496-
46497 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
46498 if (!kbs) {
46499 ret = -ENOMEM;
46500@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
46501 kfree(kbs);
46502 return ((p && *p) ? -EOVERFLOW : 0);
46503 case KDSKBSENT:
46504+ if (!capable(CAP_SYS_TTY_CONFIG))
46505+ perm = 0;
46506+
46507 if (!perm) {
46508 ret = -EPERM;
46509 goto reterr;
46510diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
46511index b645c47..a55c182 100644
46512--- a/drivers/uio/uio.c
46513+++ b/drivers/uio/uio.c
46514@@ -25,6 +25,7 @@
46515 #include <linux/kobject.h>
46516 #include <linux/cdev.h>
46517 #include <linux/uio_driver.h>
46518+#include <asm/local.h>
46519
46520 #define UIO_MAX_DEVICES (1U << MINORBITS)
46521
46522@@ -32,10 +33,10 @@ struct uio_device {
46523 struct module *owner;
46524 struct device *dev;
46525 int minor;
46526- atomic_t event;
46527+ atomic_unchecked_t event;
46528 struct fasync_struct *async_queue;
46529 wait_queue_head_t wait;
46530- int vma_count;
46531+ local_t vma_count;
46532 struct uio_info *info;
46533 struct kobject *map_dir;
46534 struct kobject *portio_dir;
46535@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
46536 struct device_attribute *attr, char *buf)
46537 {
46538 struct uio_device *idev = dev_get_drvdata(dev);
46539- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
46540+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
46541 }
46542
46543 static struct device_attribute uio_class_attributes[] = {
46544@@ -398,7 +399,7 @@ void uio_event_notify(struct uio_info *info)
46545 {
46546 struct uio_device *idev = info->uio_dev;
46547
46548- atomic_inc(&idev->event);
46549+ atomic_inc_unchecked(&idev->event);
46550 wake_up_interruptible(&idev->wait);
46551 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
46552 }
46553@@ -451,7 +452,7 @@ static int uio_open(struct inode *inode, struct file *filep)
46554 }
46555
46556 listener->dev = idev;
46557- listener->event_count = atomic_read(&idev->event);
46558+ listener->event_count = atomic_read_unchecked(&idev->event);
46559 filep->private_data = listener;
46560
46561 if (idev->info->open) {
46562@@ -502,7 +503,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
46563 return -EIO;
46564
46565 poll_wait(filep, &idev->wait, wait);
46566- if (listener->event_count != atomic_read(&idev->event))
46567+ if (listener->event_count != atomic_read_unchecked(&idev->event))
46568 return POLLIN | POLLRDNORM;
46569 return 0;
46570 }
46571@@ -527,7 +528,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
46572 do {
46573 set_current_state(TASK_INTERRUPTIBLE);
46574
46575- event_count = atomic_read(&idev->event);
46576+ event_count = atomic_read_unchecked(&idev->event);
46577 if (event_count != listener->event_count) {
46578 if (copy_to_user(buf, &event_count, count))
46579 retval = -EFAULT;
46580@@ -596,13 +597,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
46581 static void uio_vma_open(struct vm_area_struct *vma)
46582 {
46583 struct uio_device *idev = vma->vm_private_data;
46584- idev->vma_count++;
46585+ local_inc(&idev->vma_count);
46586 }
46587
46588 static void uio_vma_close(struct vm_area_struct *vma)
46589 {
46590 struct uio_device *idev = vma->vm_private_data;
46591- idev->vma_count--;
46592+ local_dec(&idev->vma_count);
46593 }
46594
46595 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
46596@@ -809,7 +810,7 @@ int __uio_register_device(struct module *owner,
46597 idev->owner = owner;
46598 idev->info = info;
46599 init_waitqueue_head(&idev->wait);
46600- atomic_set(&idev->event, 0);
46601+ atomic_set_unchecked(&idev->event, 0);
46602
46603 ret = uio_get_minor(idev);
46604 if (ret)
46605diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
46606index 8a7eb77..c00402f 100644
46607--- a/drivers/usb/atm/cxacru.c
46608+++ b/drivers/usb/atm/cxacru.c
46609@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
46610 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
46611 if (ret < 2)
46612 return -EINVAL;
46613- if (index < 0 || index > 0x7f)
46614+ if (index > 0x7f)
46615 return -EINVAL;
46616 pos += tmp;
46617
46618diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
46619index d3527dd..26effa2 100644
46620--- a/drivers/usb/atm/usbatm.c
46621+++ b/drivers/usb/atm/usbatm.c
46622@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46623 if (printk_ratelimit())
46624 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
46625 __func__, vpi, vci);
46626- atomic_inc(&vcc->stats->rx_err);
46627+ atomic_inc_unchecked(&vcc->stats->rx_err);
46628 return;
46629 }
46630
46631@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46632 if (length > ATM_MAX_AAL5_PDU) {
46633 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
46634 __func__, length, vcc);
46635- atomic_inc(&vcc->stats->rx_err);
46636+ atomic_inc_unchecked(&vcc->stats->rx_err);
46637 goto out;
46638 }
46639
46640@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46641 if (sarb->len < pdu_length) {
46642 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
46643 __func__, pdu_length, sarb->len, vcc);
46644- atomic_inc(&vcc->stats->rx_err);
46645+ atomic_inc_unchecked(&vcc->stats->rx_err);
46646 goto out;
46647 }
46648
46649 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
46650 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
46651 __func__, vcc);
46652- atomic_inc(&vcc->stats->rx_err);
46653+ atomic_inc_unchecked(&vcc->stats->rx_err);
46654 goto out;
46655 }
46656
46657@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46658 if (printk_ratelimit())
46659 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
46660 __func__, length);
46661- atomic_inc(&vcc->stats->rx_drop);
46662+ atomic_inc_unchecked(&vcc->stats->rx_drop);
46663 goto out;
46664 }
46665
46666@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
46667
46668 vcc->push(vcc, skb);
46669
46670- atomic_inc(&vcc->stats->rx);
46671+ atomic_inc_unchecked(&vcc->stats->rx);
46672 out:
46673 skb_trim(sarb, 0);
46674 }
46675@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
46676 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
46677
46678 usbatm_pop(vcc, skb);
46679- atomic_inc(&vcc->stats->tx);
46680+ atomic_inc_unchecked(&vcc->stats->tx);
46681
46682 skb = skb_dequeue(&instance->sndqueue);
46683 }
46684@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
46685 if (!left--)
46686 return sprintf(page,
46687 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
46688- atomic_read(&atm_dev->stats.aal5.tx),
46689- atomic_read(&atm_dev->stats.aal5.tx_err),
46690- atomic_read(&atm_dev->stats.aal5.rx),
46691- atomic_read(&atm_dev->stats.aal5.rx_err),
46692- atomic_read(&atm_dev->stats.aal5.rx_drop));
46693+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
46694+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
46695+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
46696+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
46697+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
46698
46699 if (!left--) {
46700 if (instance->disconnected)
46701diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
46702index 2a3bbdf..91d72cf 100644
46703--- a/drivers/usb/core/devices.c
46704+++ b/drivers/usb/core/devices.c
46705@@ -126,7 +126,7 @@ static const char format_endpt[] =
46706 * time it gets called.
46707 */
46708 static struct device_connect_event {
46709- atomic_t count;
46710+ atomic_unchecked_t count;
46711 wait_queue_head_t wait;
46712 } device_event = {
46713 .count = ATOMIC_INIT(1),
46714@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
46715
46716 void usbfs_conn_disc_event(void)
46717 {
46718- atomic_add(2, &device_event.count);
46719+ atomic_add_unchecked(2, &device_event.count);
46720 wake_up(&device_event.wait);
46721 }
46722
46723@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
46724
46725 poll_wait(file, &device_event.wait, wait);
46726
46727- event_count = atomic_read(&device_event.count);
46728+ event_count = atomic_read_unchecked(&device_event.count);
46729 if (file->f_version != event_count) {
46730 file->f_version = event_count;
46731 return POLLIN | POLLRDNORM;
46732diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
46733index d53547d..6a22d02 100644
46734--- a/drivers/usb/core/hcd.c
46735+++ b/drivers/usb/core/hcd.c
46736@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
46737 */
46738 usb_get_urb(urb);
46739 atomic_inc(&urb->use_count);
46740- atomic_inc(&urb->dev->urbnum);
46741+ atomic_inc_unchecked(&urb->dev->urbnum);
46742 usbmon_urb_submit(&hcd->self, urb);
46743
46744 /* NOTE requirements on root-hub callers (usbfs and the hub
46745@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
46746 urb->hcpriv = NULL;
46747 INIT_LIST_HEAD(&urb->urb_list);
46748 atomic_dec(&urb->use_count);
46749- atomic_dec(&urb->dev->urbnum);
46750+ atomic_dec_unchecked(&urb->dev->urbnum);
46751 if (atomic_read(&urb->reject))
46752 wake_up(&usb_kill_urb_queue);
46753 usb_put_urb(urb);
46754diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
46755index 444d30e..f15c850 100644
46756--- a/drivers/usb/core/message.c
46757+++ b/drivers/usb/core/message.c
46758@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
46759 * method can wait for it to complete. Since you don't have a handle on the
46760 * URB used, you can't cancel the request.
46761 */
46762-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
46763+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
46764 __u8 requesttype, __u16 value, __u16 index, void *data,
46765 __u16 size, int timeout)
46766 {
46767diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
46768index aa38db4..0a08682 100644
46769--- a/drivers/usb/core/sysfs.c
46770+++ b/drivers/usb/core/sysfs.c
46771@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
46772 struct usb_device *udev;
46773
46774 udev = to_usb_device(dev);
46775- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
46776+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
46777 }
46778 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
46779
46780diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
46781index b10da72..43aa0b2 100644
46782--- a/drivers/usb/core/usb.c
46783+++ b/drivers/usb/core/usb.c
46784@@ -389,7 +389,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
46785 set_dev_node(&dev->dev, dev_to_node(bus->controller));
46786 dev->state = USB_STATE_ATTACHED;
46787 dev->lpm_disable_count = 1;
46788- atomic_set(&dev->urbnum, 0);
46789+ atomic_set_unchecked(&dev->urbnum, 0);
46790
46791 INIT_LIST_HEAD(&dev->ep0.urb_list);
46792 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
46793diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
46794index 5e29dde..eca992f 100644
46795--- a/drivers/usb/early/ehci-dbgp.c
46796+++ b/drivers/usb/early/ehci-dbgp.c
46797@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
46798
46799 #ifdef CONFIG_KGDB
46800 static struct kgdb_io kgdbdbgp_io_ops;
46801-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
46802+static struct kgdb_io kgdbdbgp_io_ops_console;
46803+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
46804 #else
46805 #define dbgp_kgdb_mode (0)
46806 #endif
46807@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
46808 .write_char = kgdbdbgp_write_char,
46809 };
46810
46811+static struct kgdb_io kgdbdbgp_io_ops_console = {
46812+ .name = "kgdbdbgp",
46813+ .read_char = kgdbdbgp_read_char,
46814+ .write_char = kgdbdbgp_write_char,
46815+ .is_console = 1
46816+};
46817+
46818 static int kgdbdbgp_wait_time;
46819
46820 static int __init kgdbdbgp_parse_config(char *str)
46821@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
46822 ptr++;
46823 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
46824 }
46825- kgdb_register_io_module(&kgdbdbgp_io_ops);
46826- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
46827+ if (early_dbgp_console.index != -1)
46828+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
46829+ else
46830+ kgdb_register_io_module(&kgdbdbgp_io_ops);
46831
46832 return 0;
46833 }
46834diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
46835index b369292..9f3ba40 100644
46836--- a/drivers/usb/gadget/u_serial.c
46837+++ b/drivers/usb/gadget/u_serial.c
46838@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
46839 spin_lock_irq(&port->port_lock);
46840
46841 /* already open? Great. */
46842- if (port->port.count) {
46843+ if (atomic_read(&port->port.count)) {
46844 status = 0;
46845- port->port.count++;
46846+ atomic_inc(&port->port.count);
46847
46848 /* currently opening/closing? wait ... */
46849 } else if (port->openclose) {
46850@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
46851 tty->driver_data = port;
46852 port->port.tty = tty;
46853
46854- port->port.count = 1;
46855+ atomic_set(&port->port.count, 1);
46856 port->openclose = false;
46857
46858 /* if connected, start the I/O stream */
46859@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
46860
46861 spin_lock_irq(&port->port_lock);
46862
46863- if (port->port.count != 1) {
46864- if (port->port.count == 0)
46865+ if (atomic_read(&port->port.count) != 1) {
46866+ if (atomic_read(&port->port.count) == 0)
46867 WARN_ON(1);
46868 else
46869- --port->port.count;
46870+ atomic_dec(&port->port.count);
46871 goto exit;
46872 }
46873
46874@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
46875 * and sleep if necessary
46876 */
46877 port->openclose = true;
46878- port->port.count = 0;
46879+ atomic_set(&port->port.count, 0);
46880
46881 gser = port->port_usb;
46882 if (gser && gser->disconnect)
46883@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
46884 int cond;
46885
46886 spin_lock_irq(&port->port_lock);
46887- cond = (port->port.count == 0) && !port->openclose;
46888+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
46889 spin_unlock_irq(&port->port_lock);
46890 return cond;
46891 }
46892@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
46893 /* if it's already open, start I/O ... and notify the serial
46894 * protocol about open/close status (connect/disconnect).
46895 */
46896- if (port->port.count) {
46897+ if (atomic_read(&port->port.count)) {
46898 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
46899 gs_start_io(port);
46900 if (gser->connect)
46901@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
46902
46903 port->port_usb = NULL;
46904 gser->ioport = NULL;
46905- if (port->port.count > 0 || port->openclose) {
46906+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
46907 wake_up_interruptible(&port->drain_wait);
46908 if (port->port.tty)
46909 tty_hangup(port->port.tty);
46910@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
46911
46912 /* finally, free any unused/unusable I/O buffers */
46913 spin_lock_irqsave(&port->port_lock, flags);
46914- if (port->port.count == 0 && !port->openclose)
46915+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
46916 gs_buf_free(&port->port_write_buf);
46917 gs_free_requests(gser->out, &port->read_pool, NULL);
46918 gs_free_requests(gser->out, &port->read_queue, NULL);
46919diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
46920index 5f3bcd3..bfca43f 100644
46921--- a/drivers/usb/serial/console.c
46922+++ b/drivers/usb/serial/console.c
46923@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
46924
46925 info->port = port;
46926
46927- ++port->port.count;
46928+ atomic_inc(&port->port.count);
46929 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
46930 if (serial->type->set_termios) {
46931 /*
46932@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
46933 }
46934 /* Now that any required fake tty operations are completed restore
46935 * the tty port count */
46936- --port->port.count;
46937+ atomic_dec(&port->port.count);
46938 /* The console is special in terms of closing the device so
46939 * indicate this port is now acting as a system console. */
46940 port->port.console = 1;
46941@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
46942 free_tty:
46943 kfree(tty);
46944 reset_open_count:
46945- port->port.count = 0;
46946+ atomic_set(&port->port.count, 0);
46947 usb_autopm_put_interface(serial->interface);
46948 error_get_interface:
46949 usb_serial_put(serial);
46950diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
46951index 75f70f0..d467e1a 100644
46952--- a/drivers/usb/storage/usb.h
46953+++ b/drivers/usb/storage/usb.h
46954@@ -63,7 +63,7 @@ struct us_unusual_dev {
46955 __u8 useProtocol;
46956 __u8 useTransport;
46957 int (*initFunction)(struct us_data *);
46958-};
46959+} __do_const;
46960
46961
46962 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
46963diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
46964index d6bea3e..60b250e 100644
46965--- a/drivers/usb/wusbcore/wa-hc.h
46966+++ b/drivers/usb/wusbcore/wa-hc.h
46967@@ -192,7 +192,7 @@ struct wahc {
46968 struct list_head xfer_delayed_list;
46969 spinlock_t xfer_list_lock;
46970 struct work_struct xfer_work;
46971- atomic_t xfer_id_count;
46972+ atomic_unchecked_t xfer_id_count;
46973 };
46974
46975
46976@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
46977 INIT_LIST_HEAD(&wa->xfer_delayed_list);
46978 spin_lock_init(&wa->xfer_list_lock);
46979 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
46980- atomic_set(&wa->xfer_id_count, 1);
46981+ atomic_set_unchecked(&wa->xfer_id_count, 1);
46982 }
46983
46984 /**
46985diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
46986index 6ef94bc..1b41265 100644
46987--- a/drivers/usb/wusbcore/wa-xfer.c
46988+++ b/drivers/usb/wusbcore/wa-xfer.c
46989@@ -296,7 +296,7 @@ out:
46990 */
46991 static void wa_xfer_id_init(struct wa_xfer *xfer)
46992 {
46993- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
46994+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
46995 }
46996
46997 /*
46998diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
46999index 5174eba..86e764a 100644
47000--- a/drivers/vhost/vringh.c
47001+++ b/drivers/vhost/vringh.c
47002@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
47003
47004 static inline int putu16_kern(u16 *p, u16 val)
47005 {
47006- ACCESS_ONCE(*p) = val;
47007+ ACCESS_ONCE_RW(*p) = val;
47008 return 0;
47009 }
47010
47011diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
47012index 8c55011..eed4ae1a 100644
47013--- a/drivers/video/aty/aty128fb.c
47014+++ b/drivers/video/aty/aty128fb.c
47015@@ -149,7 +149,7 @@ enum {
47016 };
47017
47018 /* Must match above enum */
47019-static char * const r128_family[] = {
47020+static const char * const r128_family[] = {
47021 "AGP",
47022 "PCI",
47023 "PRO AGP",
47024diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
47025index 4f27fdc..d3537e6 100644
47026--- a/drivers/video/aty/atyfb_base.c
47027+++ b/drivers/video/aty/atyfb_base.c
47028@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
47029 par->accel_flags = var->accel_flags; /* hack */
47030
47031 if (var->accel_flags) {
47032- info->fbops->fb_sync = atyfb_sync;
47033+ pax_open_kernel();
47034+ *(void **)&info->fbops->fb_sync = atyfb_sync;
47035+ pax_close_kernel();
47036 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47037 } else {
47038- info->fbops->fb_sync = NULL;
47039+ pax_open_kernel();
47040+ *(void **)&info->fbops->fb_sync = NULL;
47041+ pax_close_kernel();
47042 info->flags |= FBINFO_HWACCEL_DISABLED;
47043 }
47044
47045diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
47046index 95ec042..e6affdd 100644
47047--- a/drivers/video/aty/mach64_cursor.c
47048+++ b/drivers/video/aty/mach64_cursor.c
47049@@ -7,6 +7,7 @@
47050 #include <linux/string.h>
47051
47052 #include <asm/io.h>
47053+#include <asm/pgtable.h>
47054
47055 #ifdef __sparc__
47056 #include <asm/fbio.h>
47057@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
47058 info->sprite.buf_align = 16; /* and 64 lines tall. */
47059 info->sprite.flags = FB_PIXMAP_IO;
47060
47061- info->fbops->fb_cursor = atyfb_cursor;
47062+ pax_open_kernel();
47063+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
47064+ pax_close_kernel();
47065
47066 return 0;
47067 }
47068diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
47069index c74e7aa..e3c2790 100644
47070--- a/drivers/video/backlight/backlight.c
47071+++ b/drivers/video/backlight/backlight.c
47072@@ -304,7 +304,7 @@ struct backlight_device *backlight_device_register(const char *name,
47073 new_bd->dev.class = backlight_class;
47074 new_bd->dev.parent = parent;
47075 new_bd->dev.release = bl_device_release;
47076- dev_set_name(&new_bd->dev, name);
47077+ dev_set_name(&new_bd->dev, "%s", name);
47078 dev_set_drvdata(&new_bd->dev, devdata);
47079
47080 /* Set default properties */
47081diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
47082index bca6ccc..252107e 100644
47083--- a/drivers/video/backlight/kb3886_bl.c
47084+++ b/drivers/video/backlight/kb3886_bl.c
47085@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
47086 static unsigned long kb3886bl_flags;
47087 #define KB3886BL_SUSPENDED 0x01
47088
47089-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
47090+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
47091 {
47092 .ident = "Sahara Touch-iT",
47093 .matches = {
47094diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
47095index 34fb6bd..3649fd9 100644
47096--- a/drivers/video/backlight/lcd.c
47097+++ b/drivers/video/backlight/lcd.c
47098@@ -219,7 +219,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
47099 new_ld->dev.class = lcd_class;
47100 new_ld->dev.parent = parent;
47101 new_ld->dev.release = lcd_device_release;
47102- dev_set_name(&new_ld->dev, name);
47103+ dev_set_name(&new_ld->dev, "%s", name);
47104 dev_set_drvdata(&new_ld->dev, devdata);
47105
47106 rc = device_register(&new_ld->dev);
47107diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
47108index 900aa4e..6d49418 100644
47109--- a/drivers/video/fb_defio.c
47110+++ b/drivers/video/fb_defio.c
47111@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
47112
47113 BUG_ON(!fbdefio);
47114 mutex_init(&fbdefio->lock);
47115- info->fbops->fb_mmap = fb_deferred_io_mmap;
47116+ pax_open_kernel();
47117+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
47118+ pax_close_kernel();
47119 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
47120 INIT_LIST_HEAD(&fbdefio->pagelist);
47121 if (fbdefio->delay == 0) /* set a default of 1 s */
47122@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
47123 page->mapping = NULL;
47124 }
47125
47126- info->fbops->fb_mmap = NULL;
47127+ *(void **)&info->fbops->fb_mmap = NULL;
47128 mutex_destroy(&fbdefio->lock);
47129 }
47130 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
47131diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
47132index 5c3960d..15cf8fc 100644
47133--- a/drivers/video/fbcmap.c
47134+++ b/drivers/video/fbcmap.c
47135@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
47136 rc = -ENODEV;
47137 goto out;
47138 }
47139- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
47140- !info->fbops->fb_setcmap)) {
47141+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
47142 rc = -EINVAL;
47143 goto out1;
47144 }
47145diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
47146index 098bfc6..796841d 100644
47147--- a/drivers/video/fbmem.c
47148+++ b/drivers/video/fbmem.c
47149@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
47150 image->dx += image->width + 8;
47151 }
47152 } else if (rotate == FB_ROTATE_UD) {
47153- for (x = 0; x < num && image->dx >= 0; x++) {
47154+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
47155 info->fbops->fb_imageblit(info, image);
47156 image->dx -= image->width + 8;
47157 }
47158@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
47159 image->dy += image->height + 8;
47160 }
47161 } else if (rotate == FB_ROTATE_CCW) {
47162- for (x = 0; x < num && image->dy >= 0; x++) {
47163+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
47164 info->fbops->fb_imageblit(info, image);
47165 image->dy -= image->height + 8;
47166 }
47167@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
47168 return -EFAULT;
47169 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
47170 return -EINVAL;
47171- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
47172+ if (con2fb.framebuffer >= FB_MAX)
47173 return -EINVAL;
47174 if (!registered_fb[con2fb.framebuffer])
47175 request_module("fb%d", con2fb.framebuffer);
47176diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
47177index 7672d2e..b56437f 100644
47178--- a/drivers/video/i810/i810_accel.c
47179+++ b/drivers/video/i810/i810_accel.c
47180@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
47181 }
47182 }
47183 printk("ringbuffer lockup!!!\n");
47184+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
47185 i810_report_error(mmio);
47186 par->dev_flags |= LOCKUP;
47187 info->pixmap.scan_align = 1;
47188diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
47189index 3c14e43..eafa544 100644
47190--- a/drivers/video/logo/logo_linux_clut224.ppm
47191+++ b/drivers/video/logo/logo_linux_clut224.ppm
47192@@ -1,1604 +1,1123 @@
47193 P3
47194-# Standard 224-color Linux logo
47195 80 80
47196 255
47197- 0 0 0 0 0 0 0 0 0 0 0 0
47198- 0 0 0 0 0 0 0 0 0 0 0 0
47199- 0 0 0 0 0 0 0 0 0 0 0 0
47200- 0 0 0 0 0 0 0 0 0 0 0 0
47201- 0 0 0 0 0 0 0 0 0 0 0 0
47202- 0 0 0 0 0 0 0 0 0 0 0 0
47203- 0 0 0 0 0 0 0 0 0 0 0 0
47204- 0 0 0 0 0 0 0 0 0 0 0 0
47205- 0 0 0 0 0 0 0 0 0 0 0 0
47206- 6 6 6 6 6 6 10 10 10 10 10 10
47207- 10 10 10 6 6 6 6 6 6 6 6 6
47208- 0 0 0 0 0 0 0 0 0 0 0 0
47209- 0 0 0 0 0 0 0 0 0 0 0 0
47210- 0 0 0 0 0 0 0 0 0 0 0 0
47211- 0 0 0 0 0 0 0 0 0 0 0 0
47212- 0 0 0 0 0 0 0 0 0 0 0 0
47213- 0 0 0 0 0 0 0 0 0 0 0 0
47214- 0 0 0 0 0 0 0 0 0 0 0 0
47215- 0 0 0 0 0 0 0 0 0 0 0 0
47216- 0 0 0 0 0 0 0 0 0 0 0 0
47217- 0 0 0 0 0 0 0 0 0 0 0 0
47218- 0 0 0 0 0 0 0 0 0 0 0 0
47219- 0 0 0 0 0 0 0 0 0 0 0 0
47220- 0 0 0 0 0 0 0 0 0 0 0 0
47221- 0 0 0 0 0 0 0 0 0 0 0 0
47222- 0 0 0 0 0 0 0 0 0 0 0 0
47223- 0 0 0 0 0 0 0 0 0 0 0 0
47224- 0 0 0 0 0 0 0 0 0 0 0 0
47225- 0 0 0 6 6 6 10 10 10 14 14 14
47226- 22 22 22 26 26 26 30 30 30 34 34 34
47227- 30 30 30 30 30 30 26 26 26 18 18 18
47228- 14 14 14 10 10 10 6 6 6 0 0 0
47229- 0 0 0 0 0 0 0 0 0 0 0 0
47230- 0 0 0 0 0 0 0 0 0 0 0 0
47231- 0 0 0 0 0 0 0 0 0 0 0 0
47232- 0 0 0 0 0 0 0 0 0 0 0 0
47233- 0 0 0 0 0 0 0 0 0 0 0 0
47234- 0 0 0 0 0 0 0 0 0 0 0 0
47235- 0 0 0 0 0 0 0 0 0 0 0 0
47236- 0 0 0 0 0 0 0 0 0 0 0 0
47237- 0 0 0 0 0 0 0 0 0 0 0 0
47238- 0 0 0 0 0 1 0 0 1 0 0 0
47239- 0 0 0 0 0 0 0 0 0 0 0 0
47240- 0 0 0 0 0 0 0 0 0 0 0 0
47241- 0 0 0 0 0 0 0 0 0 0 0 0
47242- 0 0 0 0 0 0 0 0 0 0 0 0
47243- 0 0 0 0 0 0 0 0 0 0 0 0
47244- 0 0 0 0 0 0 0 0 0 0 0 0
47245- 6 6 6 14 14 14 26 26 26 42 42 42
47246- 54 54 54 66 66 66 78 78 78 78 78 78
47247- 78 78 78 74 74 74 66 66 66 54 54 54
47248- 42 42 42 26 26 26 18 18 18 10 10 10
47249- 6 6 6 0 0 0 0 0 0 0 0 0
47250- 0 0 0 0 0 0 0 0 0 0 0 0
47251- 0 0 0 0 0 0 0 0 0 0 0 0
47252- 0 0 0 0 0 0 0 0 0 0 0 0
47253- 0 0 0 0 0 0 0 0 0 0 0 0
47254- 0 0 0 0 0 0 0 0 0 0 0 0
47255- 0 0 0 0 0 0 0 0 0 0 0 0
47256- 0 0 0 0 0 0 0 0 0 0 0 0
47257- 0 0 0 0 0 0 0 0 0 0 0 0
47258- 0 0 1 0 0 0 0 0 0 0 0 0
47259- 0 0 0 0 0 0 0 0 0 0 0 0
47260- 0 0 0 0 0 0 0 0 0 0 0 0
47261- 0 0 0 0 0 0 0 0 0 0 0 0
47262- 0 0 0 0 0 0 0 0 0 0 0 0
47263- 0 0 0 0 0 0 0 0 0 0 0 0
47264- 0 0 0 0 0 0 0 0 0 10 10 10
47265- 22 22 22 42 42 42 66 66 66 86 86 86
47266- 66 66 66 38 38 38 38 38 38 22 22 22
47267- 26 26 26 34 34 34 54 54 54 66 66 66
47268- 86 86 86 70 70 70 46 46 46 26 26 26
47269- 14 14 14 6 6 6 0 0 0 0 0 0
47270- 0 0 0 0 0 0 0 0 0 0 0 0
47271- 0 0 0 0 0 0 0 0 0 0 0 0
47272- 0 0 0 0 0 0 0 0 0 0 0 0
47273- 0 0 0 0 0 0 0 0 0 0 0 0
47274- 0 0 0 0 0 0 0 0 0 0 0 0
47275- 0 0 0 0 0 0 0 0 0 0 0 0
47276- 0 0 0 0 0 0 0 0 0 0 0 0
47277- 0 0 0 0 0 0 0 0 0 0 0 0
47278- 0 0 1 0 0 1 0 0 1 0 0 0
47279- 0 0 0 0 0 0 0 0 0 0 0 0
47280- 0 0 0 0 0 0 0 0 0 0 0 0
47281- 0 0 0 0 0 0 0 0 0 0 0 0
47282- 0 0 0 0 0 0 0 0 0 0 0 0
47283- 0 0 0 0 0 0 0 0 0 0 0 0
47284- 0 0 0 0 0 0 10 10 10 26 26 26
47285- 50 50 50 82 82 82 58 58 58 6 6 6
47286- 2 2 6 2 2 6 2 2 6 2 2 6
47287- 2 2 6 2 2 6 2 2 6 2 2 6
47288- 6 6 6 54 54 54 86 86 86 66 66 66
47289- 38 38 38 18 18 18 6 6 6 0 0 0
47290- 0 0 0 0 0 0 0 0 0 0 0 0
47291- 0 0 0 0 0 0 0 0 0 0 0 0
47292- 0 0 0 0 0 0 0 0 0 0 0 0
47293- 0 0 0 0 0 0 0 0 0 0 0 0
47294- 0 0 0 0 0 0 0 0 0 0 0 0
47295- 0 0 0 0 0 0 0 0 0 0 0 0
47296- 0 0 0 0 0 0 0 0 0 0 0 0
47297- 0 0 0 0 0 0 0 0 0 0 0 0
47298- 0 0 0 0 0 0 0 0 0 0 0 0
47299- 0 0 0 0 0 0 0 0 0 0 0 0
47300- 0 0 0 0 0 0 0 0 0 0 0 0
47301- 0 0 0 0 0 0 0 0 0 0 0 0
47302- 0 0 0 0 0 0 0 0 0 0 0 0
47303- 0 0 0 0 0 0 0 0 0 0 0 0
47304- 0 0 0 6 6 6 22 22 22 50 50 50
47305- 78 78 78 34 34 34 2 2 6 2 2 6
47306- 2 2 6 2 2 6 2 2 6 2 2 6
47307- 2 2 6 2 2 6 2 2 6 2 2 6
47308- 2 2 6 2 2 6 6 6 6 70 70 70
47309- 78 78 78 46 46 46 22 22 22 6 6 6
47310- 0 0 0 0 0 0 0 0 0 0 0 0
47311- 0 0 0 0 0 0 0 0 0 0 0 0
47312- 0 0 0 0 0 0 0 0 0 0 0 0
47313- 0 0 0 0 0 0 0 0 0 0 0 0
47314- 0 0 0 0 0 0 0 0 0 0 0 0
47315- 0 0 0 0 0 0 0 0 0 0 0 0
47316- 0 0 0 0 0 0 0 0 0 0 0 0
47317- 0 0 0 0 0 0 0 0 0 0 0 0
47318- 0 0 1 0 0 1 0 0 1 0 0 0
47319- 0 0 0 0 0 0 0 0 0 0 0 0
47320- 0 0 0 0 0 0 0 0 0 0 0 0
47321- 0 0 0 0 0 0 0 0 0 0 0 0
47322- 0 0 0 0 0 0 0 0 0 0 0 0
47323- 0 0 0 0 0 0 0 0 0 0 0 0
47324- 6 6 6 18 18 18 42 42 42 82 82 82
47325- 26 26 26 2 2 6 2 2 6 2 2 6
47326- 2 2 6 2 2 6 2 2 6 2 2 6
47327- 2 2 6 2 2 6 2 2 6 14 14 14
47328- 46 46 46 34 34 34 6 6 6 2 2 6
47329- 42 42 42 78 78 78 42 42 42 18 18 18
47330- 6 6 6 0 0 0 0 0 0 0 0 0
47331- 0 0 0 0 0 0 0 0 0 0 0 0
47332- 0 0 0 0 0 0 0 0 0 0 0 0
47333- 0 0 0 0 0 0 0 0 0 0 0 0
47334- 0 0 0 0 0 0 0 0 0 0 0 0
47335- 0 0 0 0 0 0 0 0 0 0 0 0
47336- 0 0 0 0 0 0 0 0 0 0 0 0
47337- 0 0 0 0 0 0 0 0 0 0 0 0
47338- 0 0 1 0 0 0 0 0 1 0 0 0
47339- 0 0 0 0 0 0 0 0 0 0 0 0
47340- 0 0 0 0 0 0 0 0 0 0 0 0
47341- 0 0 0 0 0 0 0 0 0 0 0 0
47342- 0 0 0 0 0 0 0 0 0 0 0 0
47343- 0 0 0 0 0 0 0 0 0 0 0 0
47344- 10 10 10 30 30 30 66 66 66 58 58 58
47345- 2 2 6 2 2 6 2 2 6 2 2 6
47346- 2 2 6 2 2 6 2 2 6 2 2 6
47347- 2 2 6 2 2 6 2 2 6 26 26 26
47348- 86 86 86 101 101 101 46 46 46 10 10 10
47349- 2 2 6 58 58 58 70 70 70 34 34 34
47350- 10 10 10 0 0 0 0 0 0 0 0 0
47351- 0 0 0 0 0 0 0 0 0 0 0 0
47352- 0 0 0 0 0 0 0 0 0 0 0 0
47353- 0 0 0 0 0 0 0 0 0 0 0 0
47354- 0 0 0 0 0 0 0 0 0 0 0 0
47355- 0 0 0 0 0 0 0 0 0 0 0 0
47356- 0 0 0 0 0 0 0 0 0 0 0 0
47357- 0 0 0 0 0 0 0 0 0 0 0 0
47358- 0 0 1 0 0 1 0 0 1 0 0 0
47359- 0 0 0 0 0 0 0 0 0 0 0 0
47360- 0 0 0 0 0 0 0 0 0 0 0 0
47361- 0 0 0 0 0 0 0 0 0 0 0 0
47362- 0 0 0 0 0 0 0 0 0 0 0 0
47363- 0 0 0 0 0 0 0 0 0 0 0 0
47364- 14 14 14 42 42 42 86 86 86 10 10 10
47365- 2 2 6 2 2 6 2 2 6 2 2 6
47366- 2 2 6 2 2 6 2 2 6 2 2 6
47367- 2 2 6 2 2 6 2 2 6 30 30 30
47368- 94 94 94 94 94 94 58 58 58 26 26 26
47369- 2 2 6 6 6 6 78 78 78 54 54 54
47370- 22 22 22 6 6 6 0 0 0 0 0 0
47371- 0 0 0 0 0 0 0 0 0 0 0 0
47372- 0 0 0 0 0 0 0 0 0 0 0 0
47373- 0 0 0 0 0 0 0 0 0 0 0 0
47374- 0 0 0 0 0 0 0 0 0 0 0 0
47375- 0 0 0 0 0 0 0 0 0 0 0 0
47376- 0 0 0 0 0 0 0 0 0 0 0 0
47377- 0 0 0 0 0 0 0 0 0 0 0 0
47378- 0 0 0 0 0 0 0 0 0 0 0 0
47379- 0 0 0 0 0 0 0 0 0 0 0 0
47380- 0 0 0 0 0 0 0 0 0 0 0 0
47381- 0 0 0 0 0 0 0 0 0 0 0 0
47382- 0 0 0 0 0 0 0 0 0 0 0 0
47383- 0 0 0 0 0 0 0 0 0 6 6 6
47384- 22 22 22 62 62 62 62 62 62 2 2 6
47385- 2 2 6 2 2 6 2 2 6 2 2 6
47386- 2 2 6 2 2 6 2 2 6 2 2 6
47387- 2 2 6 2 2 6 2 2 6 26 26 26
47388- 54 54 54 38 38 38 18 18 18 10 10 10
47389- 2 2 6 2 2 6 34 34 34 82 82 82
47390- 38 38 38 14 14 14 0 0 0 0 0 0
47391- 0 0 0 0 0 0 0 0 0 0 0 0
47392- 0 0 0 0 0 0 0 0 0 0 0 0
47393- 0 0 0 0 0 0 0 0 0 0 0 0
47394- 0 0 0 0 0 0 0 0 0 0 0 0
47395- 0 0 0 0 0 0 0 0 0 0 0 0
47396- 0 0 0 0 0 0 0 0 0 0 0 0
47397- 0 0 0 0 0 0 0 0 0 0 0 0
47398- 0 0 0 0 0 1 0 0 1 0 0 0
47399- 0 0 0 0 0 0 0 0 0 0 0 0
47400- 0 0 0 0 0 0 0 0 0 0 0 0
47401- 0 0 0 0 0 0 0 0 0 0 0 0
47402- 0 0 0 0 0 0 0 0 0 0 0 0
47403- 0 0 0 0 0 0 0 0 0 6 6 6
47404- 30 30 30 78 78 78 30 30 30 2 2 6
47405- 2 2 6 2 2 6 2 2 6 2 2 6
47406- 2 2 6 2 2 6 2 2 6 2 2 6
47407- 2 2 6 2 2 6 2 2 6 10 10 10
47408- 10 10 10 2 2 6 2 2 6 2 2 6
47409- 2 2 6 2 2 6 2 2 6 78 78 78
47410- 50 50 50 18 18 18 6 6 6 0 0 0
47411- 0 0 0 0 0 0 0 0 0 0 0 0
47412- 0 0 0 0 0 0 0 0 0 0 0 0
47413- 0 0 0 0 0 0 0 0 0 0 0 0
47414- 0 0 0 0 0 0 0 0 0 0 0 0
47415- 0 0 0 0 0 0 0 0 0 0 0 0
47416- 0 0 0 0 0 0 0 0 0 0 0 0
47417- 0 0 0 0 0 0 0 0 0 0 0 0
47418- 0 0 1 0 0 0 0 0 0 0 0 0
47419- 0 0 0 0 0 0 0 0 0 0 0 0
47420- 0 0 0 0 0 0 0 0 0 0 0 0
47421- 0 0 0 0 0 0 0 0 0 0 0 0
47422- 0 0 0 0 0 0 0 0 0 0 0 0
47423- 0 0 0 0 0 0 0 0 0 10 10 10
47424- 38 38 38 86 86 86 14 14 14 2 2 6
47425- 2 2 6 2 2 6 2 2 6 2 2 6
47426- 2 2 6 2 2 6 2 2 6 2 2 6
47427- 2 2 6 2 2 6 2 2 6 2 2 6
47428- 2 2 6 2 2 6 2 2 6 2 2 6
47429- 2 2 6 2 2 6 2 2 6 54 54 54
47430- 66 66 66 26 26 26 6 6 6 0 0 0
47431- 0 0 0 0 0 0 0 0 0 0 0 0
47432- 0 0 0 0 0 0 0 0 0 0 0 0
47433- 0 0 0 0 0 0 0 0 0 0 0 0
47434- 0 0 0 0 0 0 0 0 0 0 0 0
47435- 0 0 0 0 0 0 0 0 0 0 0 0
47436- 0 0 0 0 0 0 0 0 0 0 0 0
47437- 0 0 0 0 0 0 0 0 0 0 0 0
47438- 0 0 0 0 0 1 0 0 1 0 0 0
47439- 0 0 0 0 0 0 0 0 0 0 0 0
47440- 0 0 0 0 0 0 0 0 0 0 0 0
47441- 0 0 0 0 0 0 0 0 0 0 0 0
47442- 0 0 0 0 0 0 0 0 0 0 0 0
47443- 0 0 0 0 0 0 0 0 0 14 14 14
47444- 42 42 42 82 82 82 2 2 6 2 2 6
47445- 2 2 6 6 6 6 10 10 10 2 2 6
47446- 2 2 6 2 2 6 2 2 6 2 2 6
47447- 2 2 6 2 2 6 2 2 6 6 6 6
47448- 14 14 14 10 10 10 2 2 6 2 2 6
47449- 2 2 6 2 2 6 2 2 6 18 18 18
47450- 82 82 82 34 34 34 10 10 10 0 0 0
47451- 0 0 0 0 0 0 0 0 0 0 0 0
47452- 0 0 0 0 0 0 0 0 0 0 0 0
47453- 0 0 0 0 0 0 0 0 0 0 0 0
47454- 0 0 0 0 0 0 0 0 0 0 0 0
47455- 0 0 0 0 0 0 0 0 0 0 0 0
47456- 0 0 0 0 0 0 0 0 0 0 0 0
47457- 0 0 0 0 0 0 0 0 0 0 0 0
47458- 0 0 1 0 0 0 0 0 0 0 0 0
47459- 0 0 0 0 0 0 0 0 0 0 0 0
47460- 0 0 0 0 0 0 0 0 0 0 0 0
47461- 0 0 0 0 0 0 0 0 0 0 0 0
47462- 0 0 0 0 0 0 0 0 0 0 0 0
47463- 0 0 0 0 0 0 0 0 0 14 14 14
47464- 46 46 46 86 86 86 2 2 6 2 2 6
47465- 6 6 6 6 6 6 22 22 22 34 34 34
47466- 6 6 6 2 2 6 2 2 6 2 2 6
47467- 2 2 6 2 2 6 18 18 18 34 34 34
47468- 10 10 10 50 50 50 22 22 22 2 2 6
47469- 2 2 6 2 2 6 2 2 6 10 10 10
47470- 86 86 86 42 42 42 14 14 14 0 0 0
47471- 0 0 0 0 0 0 0 0 0 0 0 0
47472- 0 0 0 0 0 0 0 0 0 0 0 0
47473- 0 0 0 0 0 0 0 0 0 0 0 0
47474- 0 0 0 0 0 0 0 0 0 0 0 0
47475- 0 0 0 0 0 0 0 0 0 0 0 0
47476- 0 0 0 0 0 0 0 0 0 0 0 0
47477- 0 0 0 0 0 0 0 0 0 0 0 0
47478- 0 0 1 0 0 1 0 0 1 0 0 0
47479- 0 0 0 0 0 0 0 0 0 0 0 0
47480- 0 0 0 0 0 0 0 0 0 0 0 0
47481- 0 0 0 0 0 0 0 0 0 0 0 0
47482- 0 0 0 0 0 0 0 0 0 0 0 0
47483- 0 0 0 0 0 0 0 0 0 14 14 14
47484- 46 46 46 86 86 86 2 2 6 2 2 6
47485- 38 38 38 116 116 116 94 94 94 22 22 22
47486- 22 22 22 2 2 6 2 2 6 2 2 6
47487- 14 14 14 86 86 86 138 138 138 162 162 162
47488-154 154 154 38 38 38 26 26 26 6 6 6
47489- 2 2 6 2 2 6 2 2 6 2 2 6
47490- 86 86 86 46 46 46 14 14 14 0 0 0
47491- 0 0 0 0 0 0 0 0 0 0 0 0
47492- 0 0 0 0 0 0 0 0 0 0 0 0
47493- 0 0 0 0 0 0 0 0 0 0 0 0
47494- 0 0 0 0 0 0 0 0 0 0 0 0
47495- 0 0 0 0 0 0 0 0 0 0 0 0
47496- 0 0 0 0 0 0 0 0 0 0 0 0
47497- 0 0 0 0 0 0 0 0 0 0 0 0
47498- 0 0 0 0 0 0 0 0 0 0 0 0
47499- 0 0 0 0 0 0 0 0 0 0 0 0
47500- 0 0 0 0 0 0 0 0 0 0 0 0
47501- 0 0 0 0 0 0 0 0 0 0 0 0
47502- 0 0 0 0 0 0 0 0 0 0 0 0
47503- 0 0 0 0 0 0 0 0 0 14 14 14
47504- 46 46 46 86 86 86 2 2 6 14 14 14
47505-134 134 134 198 198 198 195 195 195 116 116 116
47506- 10 10 10 2 2 6 2 2 6 6 6 6
47507-101 98 89 187 187 187 210 210 210 218 218 218
47508-214 214 214 134 134 134 14 14 14 6 6 6
47509- 2 2 6 2 2 6 2 2 6 2 2 6
47510- 86 86 86 50 50 50 18 18 18 6 6 6
47511- 0 0 0 0 0 0 0 0 0 0 0 0
47512- 0 0 0 0 0 0 0 0 0 0 0 0
47513- 0 0 0 0 0 0 0 0 0 0 0 0
47514- 0 0 0 0 0 0 0 0 0 0 0 0
47515- 0 0 0 0 0 0 0 0 0 0 0 0
47516- 0 0 0 0 0 0 0 0 0 0 0 0
47517- 0 0 0 0 0 0 0 0 1 0 0 0
47518- 0 0 1 0 0 1 0 0 1 0 0 0
47519- 0 0 0 0 0 0 0 0 0 0 0 0
47520- 0 0 0 0 0 0 0 0 0 0 0 0
47521- 0 0 0 0 0 0 0 0 0 0 0 0
47522- 0 0 0 0 0 0 0 0 0 0 0 0
47523- 0 0 0 0 0 0 0 0 0 14 14 14
47524- 46 46 46 86 86 86 2 2 6 54 54 54
47525-218 218 218 195 195 195 226 226 226 246 246 246
47526- 58 58 58 2 2 6 2 2 6 30 30 30
47527-210 210 210 253 253 253 174 174 174 123 123 123
47528-221 221 221 234 234 234 74 74 74 2 2 6
47529- 2 2 6 2 2 6 2 2 6 2 2 6
47530- 70 70 70 58 58 58 22 22 22 6 6 6
47531- 0 0 0 0 0 0 0 0 0 0 0 0
47532- 0 0 0 0 0 0 0 0 0 0 0 0
47533- 0 0 0 0 0 0 0 0 0 0 0 0
47534- 0 0 0 0 0 0 0 0 0 0 0 0
47535- 0 0 0 0 0 0 0 0 0 0 0 0
47536- 0 0 0 0 0 0 0 0 0 0 0 0
47537- 0 0 0 0 0 0 0 0 0 0 0 0
47538- 0 0 0 0 0 0 0 0 0 0 0 0
47539- 0 0 0 0 0 0 0 0 0 0 0 0
47540- 0 0 0 0 0 0 0 0 0 0 0 0
47541- 0 0 0 0 0 0 0 0 0 0 0 0
47542- 0 0 0 0 0 0 0 0 0 0 0 0
47543- 0 0 0 0 0 0 0 0 0 14 14 14
47544- 46 46 46 82 82 82 2 2 6 106 106 106
47545-170 170 170 26 26 26 86 86 86 226 226 226
47546-123 123 123 10 10 10 14 14 14 46 46 46
47547-231 231 231 190 190 190 6 6 6 70 70 70
47548- 90 90 90 238 238 238 158 158 158 2 2 6
47549- 2 2 6 2 2 6 2 2 6 2 2 6
47550- 70 70 70 58 58 58 22 22 22 6 6 6
47551- 0 0 0 0 0 0 0 0 0 0 0 0
47552- 0 0 0 0 0 0 0 0 0 0 0 0
47553- 0 0 0 0 0 0 0 0 0 0 0 0
47554- 0 0 0 0 0 0 0 0 0 0 0 0
47555- 0 0 0 0 0 0 0 0 0 0 0 0
47556- 0 0 0 0 0 0 0 0 0 0 0 0
47557- 0 0 0 0 0 0 0 0 1 0 0 0
47558- 0 0 1 0 0 1 0 0 1 0 0 0
47559- 0 0 0 0 0 0 0 0 0 0 0 0
47560- 0 0 0 0 0 0 0 0 0 0 0 0
47561- 0 0 0 0 0 0 0 0 0 0 0 0
47562- 0 0 0 0 0 0 0 0 0 0 0 0
47563- 0 0 0 0 0 0 0 0 0 14 14 14
47564- 42 42 42 86 86 86 6 6 6 116 116 116
47565-106 106 106 6 6 6 70 70 70 149 149 149
47566-128 128 128 18 18 18 38 38 38 54 54 54
47567-221 221 221 106 106 106 2 2 6 14 14 14
47568- 46 46 46 190 190 190 198 198 198 2 2 6
47569- 2 2 6 2 2 6 2 2 6 2 2 6
47570- 74 74 74 62 62 62 22 22 22 6 6 6
47571- 0 0 0 0 0 0 0 0 0 0 0 0
47572- 0 0 0 0 0 0 0 0 0 0 0 0
47573- 0 0 0 0 0 0 0 0 0 0 0 0
47574- 0 0 0 0 0 0 0 0 0 0 0 0
47575- 0 0 0 0 0 0 0 0 0 0 0 0
47576- 0 0 0 0 0 0 0 0 0 0 0 0
47577- 0 0 0 0 0 0 0 0 1 0 0 0
47578- 0 0 1 0 0 0 0 0 1 0 0 0
47579- 0 0 0 0 0 0 0 0 0 0 0 0
47580- 0 0 0 0 0 0 0 0 0 0 0 0
47581- 0 0 0 0 0 0 0 0 0 0 0 0
47582- 0 0 0 0 0 0 0 0 0 0 0 0
47583- 0 0 0 0 0 0 0 0 0 14 14 14
47584- 42 42 42 94 94 94 14 14 14 101 101 101
47585-128 128 128 2 2 6 18 18 18 116 116 116
47586-118 98 46 121 92 8 121 92 8 98 78 10
47587-162 162 162 106 106 106 2 2 6 2 2 6
47588- 2 2 6 195 195 195 195 195 195 6 6 6
47589- 2 2 6 2 2 6 2 2 6 2 2 6
47590- 74 74 74 62 62 62 22 22 22 6 6 6
47591- 0 0 0 0 0 0 0 0 0 0 0 0
47592- 0 0 0 0 0 0 0 0 0 0 0 0
47593- 0 0 0 0 0 0 0 0 0 0 0 0
47594- 0 0 0 0 0 0 0 0 0 0 0 0
47595- 0 0 0 0 0 0 0 0 0 0 0 0
47596- 0 0 0 0 0 0 0 0 0 0 0 0
47597- 0 0 0 0 0 0 0 0 1 0 0 1
47598- 0 0 1 0 0 0 0 0 1 0 0 0
47599- 0 0 0 0 0 0 0 0 0 0 0 0
47600- 0 0 0 0 0 0 0 0 0 0 0 0
47601- 0 0 0 0 0 0 0 0 0 0 0 0
47602- 0 0 0 0 0 0 0 0 0 0 0 0
47603- 0 0 0 0 0 0 0 0 0 10 10 10
47604- 38 38 38 90 90 90 14 14 14 58 58 58
47605-210 210 210 26 26 26 54 38 6 154 114 10
47606-226 170 11 236 186 11 225 175 15 184 144 12
47607-215 174 15 175 146 61 37 26 9 2 2 6
47608- 70 70 70 246 246 246 138 138 138 2 2 6
47609- 2 2 6 2 2 6 2 2 6 2 2 6
47610- 70 70 70 66 66 66 26 26 26 6 6 6
47611- 0 0 0 0 0 0 0 0 0 0 0 0
47612- 0 0 0 0 0 0 0 0 0 0 0 0
47613- 0 0 0 0 0 0 0 0 0 0 0 0
47614- 0 0 0 0 0 0 0 0 0 0 0 0
47615- 0 0 0 0 0 0 0 0 0 0 0 0
47616- 0 0 0 0 0 0 0 0 0 0 0 0
47617- 0 0 0 0 0 0 0 0 0 0 0 0
47618- 0 0 0 0 0 0 0 0 0 0 0 0
47619- 0 0 0 0 0 0 0 0 0 0 0 0
47620- 0 0 0 0 0 0 0 0 0 0 0 0
47621- 0 0 0 0 0 0 0 0 0 0 0 0
47622- 0 0 0 0 0 0 0 0 0 0 0 0
47623- 0 0 0 0 0 0 0 0 0 10 10 10
47624- 38 38 38 86 86 86 14 14 14 10 10 10
47625-195 195 195 188 164 115 192 133 9 225 175 15
47626-239 182 13 234 190 10 232 195 16 232 200 30
47627-245 207 45 241 208 19 232 195 16 184 144 12
47628-218 194 134 211 206 186 42 42 42 2 2 6
47629- 2 2 6 2 2 6 2 2 6 2 2 6
47630- 50 50 50 74 74 74 30 30 30 6 6 6
47631- 0 0 0 0 0 0 0 0 0 0 0 0
47632- 0 0 0 0 0 0 0 0 0 0 0 0
47633- 0 0 0 0 0 0 0 0 0 0 0 0
47634- 0 0 0 0 0 0 0 0 0 0 0 0
47635- 0 0 0 0 0 0 0 0 0 0 0 0
47636- 0 0 0 0 0 0 0 0 0 0 0 0
47637- 0 0 0 0 0 0 0 0 0 0 0 0
47638- 0 0 0 0 0 0 0 0 0 0 0 0
47639- 0 0 0 0 0 0 0 0 0 0 0 0
47640- 0 0 0 0 0 0 0 0 0 0 0 0
47641- 0 0 0 0 0 0 0 0 0 0 0 0
47642- 0 0 0 0 0 0 0 0 0 0 0 0
47643- 0 0 0 0 0 0 0 0 0 10 10 10
47644- 34 34 34 86 86 86 14 14 14 2 2 6
47645-121 87 25 192 133 9 219 162 10 239 182 13
47646-236 186 11 232 195 16 241 208 19 244 214 54
47647-246 218 60 246 218 38 246 215 20 241 208 19
47648-241 208 19 226 184 13 121 87 25 2 2 6
47649- 2 2 6 2 2 6 2 2 6 2 2 6
47650- 50 50 50 82 82 82 34 34 34 10 10 10
47651- 0 0 0 0 0 0 0 0 0 0 0 0
47652- 0 0 0 0 0 0 0 0 0 0 0 0
47653- 0 0 0 0 0 0 0 0 0 0 0 0
47654- 0 0 0 0 0 0 0 0 0 0 0 0
47655- 0 0 0 0 0 0 0 0 0 0 0 0
47656- 0 0 0 0 0 0 0 0 0 0 0 0
47657- 0 0 0 0 0 0 0 0 0 0 0 0
47658- 0 0 0 0 0 0 0 0 0 0 0 0
47659- 0 0 0 0 0 0 0 0 0 0 0 0
47660- 0 0 0 0 0 0 0 0 0 0 0 0
47661- 0 0 0 0 0 0 0 0 0 0 0 0
47662- 0 0 0 0 0 0 0 0 0 0 0 0
47663- 0 0 0 0 0 0 0 0 0 10 10 10
47664- 34 34 34 82 82 82 30 30 30 61 42 6
47665-180 123 7 206 145 10 230 174 11 239 182 13
47666-234 190 10 238 202 15 241 208 19 246 218 74
47667-246 218 38 246 215 20 246 215 20 246 215 20
47668-226 184 13 215 174 15 184 144 12 6 6 6
47669- 2 2 6 2 2 6 2 2 6 2 2 6
47670- 26 26 26 94 94 94 42 42 42 14 14 14
47671- 0 0 0 0 0 0 0 0 0 0 0 0
47672- 0 0 0 0 0 0 0 0 0 0 0 0
47673- 0 0 0 0 0 0 0 0 0 0 0 0
47674- 0 0 0 0 0 0 0 0 0 0 0 0
47675- 0 0 0 0 0 0 0 0 0 0 0 0
47676- 0 0 0 0 0 0 0 0 0 0 0 0
47677- 0 0 0 0 0 0 0 0 0 0 0 0
47678- 0 0 0 0 0 0 0 0 0 0 0 0
47679- 0 0 0 0 0 0 0 0 0 0 0 0
47680- 0 0 0 0 0 0 0 0 0 0 0 0
47681- 0 0 0 0 0 0 0 0 0 0 0 0
47682- 0 0 0 0 0 0 0 0 0 0 0 0
47683- 0 0 0 0 0 0 0 0 0 10 10 10
47684- 30 30 30 78 78 78 50 50 50 104 69 6
47685-192 133 9 216 158 10 236 178 12 236 186 11
47686-232 195 16 241 208 19 244 214 54 245 215 43
47687-246 215 20 246 215 20 241 208 19 198 155 10
47688-200 144 11 216 158 10 156 118 10 2 2 6
47689- 2 2 6 2 2 6 2 2 6 2 2 6
47690- 6 6 6 90 90 90 54 54 54 18 18 18
47691- 6 6 6 0 0 0 0 0 0 0 0 0
47692- 0 0 0 0 0 0 0 0 0 0 0 0
47693- 0 0 0 0 0 0 0 0 0 0 0 0
47694- 0 0 0 0 0 0 0 0 0 0 0 0
47695- 0 0 0 0 0 0 0 0 0 0 0 0
47696- 0 0 0 0 0 0 0 0 0 0 0 0
47697- 0 0 0 0 0 0 0 0 0 0 0 0
47698- 0 0 0 0 0 0 0 0 0 0 0 0
47699- 0 0 0 0 0 0 0 0 0 0 0 0
47700- 0 0 0 0 0 0 0 0 0 0 0 0
47701- 0 0 0 0 0 0 0 0 0 0 0 0
47702- 0 0 0 0 0 0 0 0 0 0 0 0
47703- 0 0 0 0 0 0 0 0 0 10 10 10
47704- 30 30 30 78 78 78 46 46 46 22 22 22
47705-137 92 6 210 162 10 239 182 13 238 190 10
47706-238 202 15 241 208 19 246 215 20 246 215 20
47707-241 208 19 203 166 17 185 133 11 210 150 10
47708-216 158 10 210 150 10 102 78 10 2 2 6
47709- 6 6 6 54 54 54 14 14 14 2 2 6
47710- 2 2 6 62 62 62 74 74 74 30 30 30
47711- 10 10 10 0 0 0 0 0 0 0 0 0
47712- 0 0 0 0 0 0 0 0 0 0 0 0
47713- 0 0 0 0 0 0 0 0 0 0 0 0
47714- 0 0 0 0 0 0 0 0 0 0 0 0
47715- 0 0 0 0 0 0 0 0 0 0 0 0
47716- 0 0 0 0 0 0 0 0 0 0 0 0
47717- 0 0 0 0 0 0 0 0 0 0 0 0
47718- 0 0 0 0 0 0 0 0 0 0 0 0
47719- 0 0 0 0 0 0 0 0 0 0 0 0
47720- 0 0 0 0 0 0 0 0 0 0 0 0
47721- 0 0 0 0 0 0 0 0 0 0 0 0
47722- 0 0 0 0 0 0 0 0 0 0 0 0
47723- 0 0 0 0 0 0 0 0 0 10 10 10
47724- 34 34 34 78 78 78 50 50 50 6 6 6
47725- 94 70 30 139 102 15 190 146 13 226 184 13
47726-232 200 30 232 195 16 215 174 15 190 146 13
47727-168 122 10 192 133 9 210 150 10 213 154 11
47728-202 150 34 182 157 106 101 98 89 2 2 6
47729- 2 2 6 78 78 78 116 116 116 58 58 58
47730- 2 2 6 22 22 22 90 90 90 46 46 46
47731- 18 18 18 6 6 6 0 0 0 0 0 0
47732- 0 0 0 0 0 0 0 0 0 0 0 0
47733- 0 0 0 0 0 0 0 0 0 0 0 0
47734- 0 0 0 0 0 0 0 0 0 0 0 0
47735- 0 0 0 0 0 0 0 0 0 0 0 0
47736- 0 0 0 0 0 0 0 0 0 0 0 0
47737- 0 0 0 0 0 0 0 0 0 0 0 0
47738- 0 0 0 0 0 0 0 0 0 0 0 0
47739- 0 0 0 0 0 0 0 0 0 0 0 0
47740- 0 0 0 0 0 0 0 0 0 0 0 0
47741- 0 0 0 0 0 0 0 0 0 0 0 0
47742- 0 0 0 0 0 0 0 0 0 0 0 0
47743- 0 0 0 0 0 0 0 0 0 10 10 10
47744- 38 38 38 86 86 86 50 50 50 6 6 6
47745-128 128 128 174 154 114 156 107 11 168 122 10
47746-198 155 10 184 144 12 197 138 11 200 144 11
47747-206 145 10 206 145 10 197 138 11 188 164 115
47748-195 195 195 198 198 198 174 174 174 14 14 14
47749- 2 2 6 22 22 22 116 116 116 116 116 116
47750- 22 22 22 2 2 6 74 74 74 70 70 70
47751- 30 30 30 10 10 10 0 0 0 0 0 0
47752- 0 0 0 0 0 0 0 0 0 0 0 0
47753- 0 0 0 0 0 0 0 0 0 0 0 0
47754- 0 0 0 0 0 0 0 0 0 0 0 0
47755- 0 0 0 0 0 0 0 0 0 0 0 0
47756- 0 0 0 0 0 0 0 0 0 0 0 0
47757- 0 0 0 0 0 0 0 0 0 0 0 0
47758- 0 0 0 0 0 0 0 0 0 0 0 0
47759- 0 0 0 0 0 0 0 0 0 0 0 0
47760- 0 0 0 0 0 0 0 0 0 0 0 0
47761- 0 0 0 0 0 0 0 0 0 0 0 0
47762- 0 0 0 0 0 0 0 0 0 0 0 0
47763- 0 0 0 0 0 0 6 6 6 18 18 18
47764- 50 50 50 101 101 101 26 26 26 10 10 10
47765-138 138 138 190 190 190 174 154 114 156 107 11
47766-197 138 11 200 144 11 197 138 11 192 133 9
47767-180 123 7 190 142 34 190 178 144 187 187 187
47768-202 202 202 221 221 221 214 214 214 66 66 66
47769- 2 2 6 2 2 6 50 50 50 62 62 62
47770- 6 6 6 2 2 6 10 10 10 90 90 90
47771- 50 50 50 18 18 18 6 6 6 0 0 0
47772- 0 0 0 0 0 0 0 0 0 0 0 0
47773- 0 0 0 0 0 0 0 0 0 0 0 0
47774- 0 0 0 0 0 0 0 0 0 0 0 0
47775- 0 0 0 0 0 0 0 0 0 0 0 0
47776- 0 0 0 0 0 0 0 0 0 0 0 0
47777- 0 0 0 0 0 0 0 0 0 0 0 0
47778- 0 0 0 0 0 0 0 0 0 0 0 0
47779- 0 0 0 0 0 0 0 0 0 0 0 0
47780- 0 0 0 0 0 0 0 0 0 0 0 0
47781- 0 0 0 0 0 0 0 0 0 0 0 0
47782- 0 0 0 0 0 0 0 0 0 0 0 0
47783- 0 0 0 0 0 0 10 10 10 34 34 34
47784- 74 74 74 74 74 74 2 2 6 6 6 6
47785-144 144 144 198 198 198 190 190 190 178 166 146
47786-154 121 60 156 107 11 156 107 11 168 124 44
47787-174 154 114 187 187 187 190 190 190 210 210 210
47788-246 246 246 253 253 253 253 253 253 182 182 182
47789- 6 6 6 2 2 6 2 2 6 2 2 6
47790- 2 2 6 2 2 6 2 2 6 62 62 62
47791- 74 74 74 34 34 34 14 14 14 0 0 0
47792- 0 0 0 0 0 0 0 0 0 0 0 0
47793- 0 0 0 0 0 0 0 0 0 0 0 0
47794- 0 0 0 0 0 0 0 0 0 0 0 0
47795- 0 0 0 0 0 0 0 0 0 0 0 0
47796- 0 0 0 0 0 0 0 0 0 0 0 0
47797- 0 0 0 0 0 0 0 0 0 0 0 0
47798- 0 0 0 0 0 0 0 0 0 0 0 0
47799- 0 0 0 0 0 0 0 0 0 0 0 0
47800- 0 0 0 0 0 0 0 0 0 0 0 0
47801- 0 0 0 0 0 0 0 0 0 0 0 0
47802- 0 0 0 0 0 0 0 0 0 0 0 0
47803- 0 0 0 10 10 10 22 22 22 54 54 54
47804- 94 94 94 18 18 18 2 2 6 46 46 46
47805-234 234 234 221 221 221 190 190 190 190 190 190
47806-190 190 190 187 187 187 187 187 187 190 190 190
47807-190 190 190 195 195 195 214 214 214 242 242 242
47808-253 253 253 253 253 253 253 253 253 253 253 253
47809- 82 82 82 2 2 6 2 2 6 2 2 6
47810- 2 2 6 2 2 6 2 2 6 14 14 14
47811- 86 86 86 54 54 54 22 22 22 6 6 6
47812- 0 0 0 0 0 0 0 0 0 0 0 0
47813- 0 0 0 0 0 0 0 0 0 0 0 0
47814- 0 0 0 0 0 0 0 0 0 0 0 0
47815- 0 0 0 0 0 0 0 0 0 0 0 0
47816- 0 0 0 0 0 0 0 0 0 0 0 0
47817- 0 0 0 0 0 0 0 0 0 0 0 0
47818- 0 0 0 0 0 0 0 0 0 0 0 0
47819- 0 0 0 0 0 0 0 0 0 0 0 0
47820- 0 0 0 0 0 0 0 0 0 0 0 0
47821- 0 0 0 0 0 0 0 0 0 0 0 0
47822- 0 0 0 0 0 0 0 0 0 0 0 0
47823- 6 6 6 18 18 18 46 46 46 90 90 90
47824- 46 46 46 18 18 18 6 6 6 182 182 182
47825-253 253 253 246 246 246 206 206 206 190 190 190
47826-190 190 190 190 190 190 190 190 190 190 190 190
47827-206 206 206 231 231 231 250 250 250 253 253 253
47828-253 253 253 253 253 253 253 253 253 253 253 253
47829-202 202 202 14 14 14 2 2 6 2 2 6
47830- 2 2 6 2 2 6 2 2 6 2 2 6
47831- 42 42 42 86 86 86 42 42 42 18 18 18
47832- 6 6 6 0 0 0 0 0 0 0 0 0
47833- 0 0 0 0 0 0 0 0 0 0 0 0
47834- 0 0 0 0 0 0 0 0 0 0 0 0
47835- 0 0 0 0 0 0 0 0 0 0 0 0
47836- 0 0 0 0 0 0 0 0 0 0 0 0
47837- 0 0 0 0 0 0 0 0 0 0 0 0
47838- 0 0 0 0 0 0 0 0 0 0 0 0
47839- 0 0 0 0 0 0 0 0 0 0 0 0
47840- 0 0 0 0 0 0 0 0 0 0 0 0
47841- 0 0 0 0 0 0 0 0 0 0 0 0
47842- 0 0 0 0 0 0 0 0 0 6 6 6
47843- 14 14 14 38 38 38 74 74 74 66 66 66
47844- 2 2 6 6 6 6 90 90 90 250 250 250
47845-253 253 253 253 253 253 238 238 238 198 198 198
47846-190 190 190 190 190 190 195 195 195 221 221 221
47847-246 246 246 253 253 253 253 253 253 253 253 253
47848-253 253 253 253 253 253 253 253 253 253 253 253
47849-253 253 253 82 82 82 2 2 6 2 2 6
47850- 2 2 6 2 2 6 2 2 6 2 2 6
47851- 2 2 6 78 78 78 70 70 70 34 34 34
47852- 14 14 14 6 6 6 0 0 0 0 0 0
47853- 0 0 0 0 0 0 0 0 0 0 0 0
47854- 0 0 0 0 0 0 0 0 0 0 0 0
47855- 0 0 0 0 0 0 0 0 0 0 0 0
47856- 0 0 0 0 0 0 0 0 0 0 0 0
47857- 0 0 0 0 0 0 0 0 0 0 0 0
47858- 0 0 0 0 0 0 0 0 0 0 0 0
47859- 0 0 0 0 0 0 0 0 0 0 0 0
47860- 0 0 0 0 0 0 0 0 0 0 0 0
47861- 0 0 0 0 0 0 0 0 0 0 0 0
47862- 0 0 0 0 0 0 0 0 0 14 14 14
47863- 34 34 34 66 66 66 78 78 78 6 6 6
47864- 2 2 6 18 18 18 218 218 218 253 253 253
47865-253 253 253 253 253 253 253 253 253 246 246 246
47866-226 226 226 231 231 231 246 246 246 253 253 253
47867-253 253 253 253 253 253 253 253 253 253 253 253
47868-253 253 253 253 253 253 253 253 253 253 253 253
47869-253 253 253 178 178 178 2 2 6 2 2 6
47870- 2 2 6 2 2 6 2 2 6 2 2 6
47871- 2 2 6 18 18 18 90 90 90 62 62 62
47872- 30 30 30 10 10 10 0 0 0 0 0 0
47873- 0 0 0 0 0 0 0 0 0 0 0 0
47874- 0 0 0 0 0 0 0 0 0 0 0 0
47875- 0 0 0 0 0 0 0 0 0 0 0 0
47876- 0 0 0 0 0 0 0 0 0 0 0 0
47877- 0 0 0 0 0 0 0 0 0 0 0 0
47878- 0 0 0 0 0 0 0 0 0 0 0 0
47879- 0 0 0 0 0 0 0 0 0 0 0 0
47880- 0 0 0 0 0 0 0 0 0 0 0 0
47881- 0 0 0 0 0 0 0 0 0 0 0 0
47882- 0 0 0 0 0 0 10 10 10 26 26 26
47883- 58 58 58 90 90 90 18 18 18 2 2 6
47884- 2 2 6 110 110 110 253 253 253 253 253 253
47885-253 253 253 253 253 253 253 253 253 253 253 253
47886-250 250 250 253 253 253 253 253 253 253 253 253
47887-253 253 253 253 253 253 253 253 253 253 253 253
47888-253 253 253 253 253 253 253 253 253 253 253 253
47889-253 253 253 231 231 231 18 18 18 2 2 6
47890- 2 2 6 2 2 6 2 2 6 2 2 6
47891- 2 2 6 2 2 6 18 18 18 94 94 94
47892- 54 54 54 26 26 26 10 10 10 0 0 0
47893- 0 0 0 0 0 0 0 0 0 0 0 0
47894- 0 0 0 0 0 0 0 0 0 0 0 0
47895- 0 0 0 0 0 0 0 0 0 0 0 0
47896- 0 0 0 0 0 0 0 0 0 0 0 0
47897- 0 0 0 0 0 0 0 0 0 0 0 0
47898- 0 0 0 0 0 0 0 0 0 0 0 0
47899- 0 0 0 0 0 0 0 0 0 0 0 0
47900- 0 0 0 0 0 0 0 0 0 0 0 0
47901- 0 0 0 0 0 0 0 0 0 0 0 0
47902- 0 0 0 6 6 6 22 22 22 50 50 50
47903- 90 90 90 26 26 26 2 2 6 2 2 6
47904- 14 14 14 195 195 195 250 250 250 253 253 253
47905-253 253 253 253 253 253 253 253 253 253 253 253
47906-253 253 253 253 253 253 253 253 253 253 253 253
47907-253 253 253 253 253 253 253 253 253 253 253 253
47908-253 253 253 253 253 253 253 253 253 253 253 253
47909-250 250 250 242 242 242 54 54 54 2 2 6
47910- 2 2 6 2 2 6 2 2 6 2 2 6
47911- 2 2 6 2 2 6 2 2 6 38 38 38
47912- 86 86 86 50 50 50 22 22 22 6 6 6
47913- 0 0 0 0 0 0 0 0 0 0 0 0
47914- 0 0 0 0 0 0 0 0 0 0 0 0
47915- 0 0 0 0 0 0 0 0 0 0 0 0
47916- 0 0 0 0 0 0 0 0 0 0 0 0
47917- 0 0 0 0 0 0 0 0 0 0 0 0
47918- 0 0 0 0 0 0 0 0 0 0 0 0
47919- 0 0 0 0 0 0 0 0 0 0 0 0
47920- 0 0 0 0 0 0 0 0 0 0 0 0
47921- 0 0 0 0 0 0 0 0 0 0 0 0
47922- 6 6 6 14 14 14 38 38 38 82 82 82
47923- 34 34 34 2 2 6 2 2 6 2 2 6
47924- 42 42 42 195 195 195 246 246 246 253 253 253
47925-253 253 253 253 253 253 253 253 253 250 250 250
47926-242 242 242 242 242 242 250 250 250 253 253 253
47927-253 253 253 253 253 253 253 253 253 253 253 253
47928-253 253 253 250 250 250 246 246 246 238 238 238
47929-226 226 226 231 231 231 101 101 101 6 6 6
47930- 2 2 6 2 2 6 2 2 6 2 2 6
47931- 2 2 6 2 2 6 2 2 6 2 2 6
47932- 38 38 38 82 82 82 42 42 42 14 14 14
47933- 6 6 6 0 0 0 0 0 0 0 0 0
47934- 0 0 0 0 0 0 0 0 0 0 0 0
47935- 0 0 0 0 0 0 0 0 0 0 0 0
47936- 0 0 0 0 0 0 0 0 0 0 0 0
47937- 0 0 0 0 0 0 0 0 0 0 0 0
47938- 0 0 0 0 0 0 0 0 0 0 0 0
47939- 0 0 0 0 0 0 0 0 0 0 0 0
47940- 0 0 0 0 0 0 0 0 0 0 0 0
47941- 0 0 0 0 0 0 0 0 0 0 0 0
47942- 10 10 10 26 26 26 62 62 62 66 66 66
47943- 2 2 6 2 2 6 2 2 6 6 6 6
47944- 70 70 70 170 170 170 206 206 206 234 234 234
47945-246 246 246 250 250 250 250 250 250 238 238 238
47946-226 226 226 231 231 231 238 238 238 250 250 250
47947-250 250 250 250 250 250 246 246 246 231 231 231
47948-214 214 214 206 206 206 202 202 202 202 202 202
47949-198 198 198 202 202 202 182 182 182 18 18 18
47950- 2 2 6 2 2 6 2 2 6 2 2 6
47951- 2 2 6 2 2 6 2 2 6 2 2 6
47952- 2 2 6 62 62 62 66 66 66 30 30 30
47953- 10 10 10 0 0 0 0 0 0 0 0 0
47954- 0 0 0 0 0 0 0 0 0 0 0 0
47955- 0 0 0 0 0 0 0 0 0 0 0 0
47956- 0 0 0 0 0 0 0 0 0 0 0 0
47957- 0 0 0 0 0 0 0 0 0 0 0 0
47958- 0 0 0 0 0 0 0 0 0 0 0 0
47959- 0 0 0 0 0 0 0 0 0 0 0 0
47960- 0 0 0 0 0 0 0 0 0 0 0 0
47961- 0 0 0 0 0 0 0 0 0 0 0 0
47962- 14 14 14 42 42 42 82 82 82 18 18 18
47963- 2 2 6 2 2 6 2 2 6 10 10 10
47964- 94 94 94 182 182 182 218 218 218 242 242 242
47965-250 250 250 253 253 253 253 253 253 250 250 250
47966-234 234 234 253 253 253 253 253 253 253 253 253
47967-253 253 253 253 253 253 253 253 253 246 246 246
47968-238 238 238 226 226 226 210 210 210 202 202 202
47969-195 195 195 195 195 195 210 210 210 158 158 158
47970- 6 6 6 14 14 14 50 50 50 14 14 14
47971- 2 2 6 2 2 6 2 2 6 2 2 6
47972- 2 2 6 6 6 6 86 86 86 46 46 46
47973- 18 18 18 6 6 6 0 0 0 0 0 0
47974- 0 0 0 0 0 0 0 0 0 0 0 0
47975- 0 0 0 0 0 0 0 0 0 0 0 0
47976- 0 0 0 0 0 0 0 0 0 0 0 0
47977- 0 0 0 0 0 0 0 0 0 0 0 0
47978- 0 0 0 0 0 0 0 0 0 0 0 0
47979- 0 0 0 0 0 0 0 0 0 0 0 0
47980- 0 0 0 0 0 0 0 0 0 0 0 0
47981- 0 0 0 0 0 0 0 0 0 6 6 6
47982- 22 22 22 54 54 54 70 70 70 2 2 6
47983- 2 2 6 10 10 10 2 2 6 22 22 22
47984-166 166 166 231 231 231 250 250 250 253 253 253
47985-253 253 253 253 253 253 253 253 253 250 250 250
47986-242 242 242 253 253 253 253 253 253 253 253 253
47987-253 253 253 253 253 253 253 253 253 253 253 253
47988-253 253 253 253 253 253 253 253 253 246 246 246
47989-231 231 231 206 206 206 198 198 198 226 226 226
47990- 94 94 94 2 2 6 6 6 6 38 38 38
47991- 30 30 30 2 2 6 2 2 6 2 2 6
47992- 2 2 6 2 2 6 62 62 62 66 66 66
47993- 26 26 26 10 10 10 0 0 0 0 0 0
47994- 0 0 0 0 0 0 0 0 0 0 0 0
47995- 0 0 0 0 0 0 0 0 0 0 0 0
47996- 0 0 0 0 0 0 0 0 0 0 0 0
47997- 0 0 0 0 0 0 0 0 0 0 0 0
47998- 0 0 0 0 0 0 0 0 0 0 0 0
47999- 0 0 0 0 0 0 0 0 0 0 0 0
48000- 0 0 0 0 0 0 0 0 0 0 0 0
48001- 0 0 0 0 0 0 0 0 0 10 10 10
48002- 30 30 30 74 74 74 50 50 50 2 2 6
48003- 26 26 26 26 26 26 2 2 6 106 106 106
48004-238 238 238 253 253 253 253 253 253 253 253 253
48005-253 253 253 253 253 253 253 253 253 253 253 253
48006-253 253 253 253 253 253 253 253 253 253 253 253
48007-253 253 253 253 253 253 253 253 253 253 253 253
48008-253 253 253 253 253 253 253 253 253 253 253 253
48009-253 253 253 246 246 246 218 218 218 202 202 202
48010-210 210 210 14 14 14 2 2 6 2 2 6
48011- 30 30 30 22 22 22 2 2 6 2 2 6
48012- 2 2 6 2 2 6 18 18 18 86 86 86
48013- 42 42 42 14 14 14 0 0 0 0 0 0
48014- 0 0 0 0 0 0 0 0 0 0 0 0
48015- 0 0 0 0 0 0 0 0 0 0 0 0
48016- 0 0 0 0 0 0 0 0 0 0 0 0
48017- 0 0 0 0 0 0 0 0 0 0 0 0
48018- 0 0 0 0 0 0 0 0 0 0 0 0
48019- 0 0 0 0 0 0 0 0 0 0 0 0
48020- 0 0 0 0 0 0 0 0 0 0 0 0
48021- 0 0 0 0 0 0 0 0 0 14 14 14
48022- 42 42 42 90 90 90 22 22 22 2 2 6
48023- 42 42 42 2 2 6 18 18 18 218 218 218
48024-253 253 253 253 253 253 253 253 253 253 253 253
48025-253 253 253 253 253 253 253 253 253 253 253 253
48026-253 253 253 253 253 253 253 253 253 253 253 253
48027-253 253 253 253 253 253 253 253 253 253 253 253
48028-253 253 253 253 253 253 253 253 253 253 253 253
48029-253 253 253 253 253 253 250 250 250 221 221 221
48030-218 218 218 101 101 101 2 2 6 14 14 14
48031- 18 18 18 38 38 38 10 10 10 2 2 6
48032- 2 2 6 2 2 6 2 2 6 78 78 78
48033- 58 58 58 22 22 22 6 6 6 0 0 0
48034- 0 0 0 0 0 0 0 0 0 0 0 0
48035- 0 0 0 0 0 0 0 0 0 0 0 0
48036- 0 0 0 0 0 0 0 0 0 0 0 0
48037- 0 0 0 0 0 0 0 0 0 0 0 0
48038- 0 0 0 0 0 0 0 0 0 0 0 0
48039- 0 0 0 0 0 0 0 0 0 0 0 0
48040- 0 0 0 0 0 0 0 0 0 0 0 0
48041- 0 0 0 0 0 0 6 6 6 18 18 18
48042- 54 54 54 82 82 82 2 2 6 26 26 26
48043- 22 22 22 2 2 6 123 123 123 253 253 253
48044-253 253 253 253 253 253 253 253 253 253 253 253
48045-253 253 253 253 253 253 253 253 253 253 253 253
48046-253 253 253 253 253 253 253 253 253 253 253 253
48047-253 253 253 253 253 253 253 253 253 253 253 253
48048-253 253 253 253 253 253 253 253 253 253 253 253
48049-253 253 253 253 253 253 253 253 253 250 250 250
48050-238 238 238 198 198 198 6 6 6 38 38 38
48051- 58 58 58 26 26 26 38 38 38 2 2 6
48052- 2 2 6 2 2 6 2 2 6 46 46 46
48053- 78 78 78 30 30 30 10 10 10 0 0 0
48054- 0 0 0 0 0 0 0 0 0 0 0 0
48055- 0 0 0 0 0 0 0 0 0 0 0 0
48056- 0 0 0 0 0 0 0 0 0 0 0 0
48057- 0 0 0 0 0 0 0 0 0 0 0 0
48058- 0 0 0 0 0 0 0 0 0 0 0 0
48059- 0 0 0 0 0 0 0 0 0 0 0 0
48060- 0 0 0 0 0 0 0 0 0 0 0 0
48061- 0 0 0 0 0 0 10 10 10 30 30 30
48062- 74 74 74 58 58 58 2 2 6 42 42 42
48063- 2 2 6 22 22 22 231 231 231 253 253 253
48064-253 253 253 253 253 253 253 253 253 253 253 253
48065-253 253 253 253 253 253 253 253 253 250 250 250
48066-253 253 253 253 253 253 253 253 253 253 253 253
48067-253 253 253 253 253 253 253 253 253 253 253 253
48068-253 253 253 253 253 253 253 253 253 253 253 253
48069-253 253 253 253 253 253 253 253 253 253 253 253
48070-253 253 253 246 246 246 46 46 46 38 38 38
48071- 42 42 42 14 14 14 38 38 38 14 14 14
48072- 2 2 6 2 2 6 2 2 6 6 6 6
48073- 86 86 86 46 46 46 14 14 14 0 0 0
48074- 0 0 0 0 0 0 0 0 0 0 0 0
48075- 0 0 0 0 0 0 0 0 0 0 0 0
48076- 0 0 0 0 0 0 0 0 0 0 0 0
48077- 0 0 0 0 0 0 0 0 0 0 0 0
48078- 0 0 0 0 0 0 0 0 0 0 0 0
48079- 0 0 0 0 0 0 0 0 0 0 0 0
48080- 0 0 0 0 0 0 0 0 0 0 0 0
48081- 0 0 0 6 6 6 14 14 14 42 42 42
48082- 90 90 90 18 18 18 18 18 18 26 26 26
48083- 2 2 6 116 116 116 253 253 253 253 253 253
48084-253 253 253 253 253 253 253 253 253 253 253 253
48085-253 253 253 253 253 253 250 250 250 238 238 238
48086-253 253 253 253 253 253 253 253 253 253 253 253
48087-253 253 253 253 253 253 253 253 253 253 253 253
48088-253 253 253 253 253 253 253 253 253 253 253 253
48089-253 253 253 253 253 253 253 253 253 253 253 253
48090-253 253 253 253 253 253 94 94 94 6 6 6
48091- 2 2 6 2 2 6 10 10 10 34 34 34
48092- 2 2 6 2 2 6 2 2 6 2 2 6
48093- 74 74 74 58 58 58 22 22 22 6 6 6
48094- 0 0 0 0 0 0 0 0 0 0 0 0
48095- 0 0 0 0 0 0 0 0 0 0 0 0
48096- 0 0 0 0 0 0 0 0 0 0 0 0
48097- 0 0 0 0 0 0 0 0 0 0 0 0
48098- 0 0 0 0 0 0 0 0 0 0 0 0
48099- 0 0 0 0 0 0 0 0 0 0 0 0
48100- 0 0 0 0 0 0 0 0 0 0 0 0
48101- 0 0 0 10 10 10 26 26 26 66 66 66
48102- 82 82 82 2 2 6 38 38 38 6 6 6
48103- 14 14 14 210 210 210 253 253 253 253 253 253
48104-253 253 253 253 253 253 253 253 253 253 253 253
48105-253 253 253 253 253 253 246 246 246 242 242 242
48106-253 253 253 253 253 253 253 253 253 253 253 253
48107-253 253 253 253 253 253 253 253 253 253 253 253
48108-253 253 253 253 253 253 253 253 253 253 253 253
48109-253 253 253 253 253 253 253 253 253 253 253 253
48110-253 253 253 253 253 253 144 144 144 2 2 6
48111- 2 2 6 2 2 6 2 2 6 46 46 46
48112- 2 2 6 2 2 6 2 2 6 2 2 6
48113- 42 42 42 74 74 74 30 30 30 10 10 10
48114- 0 0 0 0 0 0 0 0 0 0 0 0
48115- 0 0 0 0 0 0 0 0 0 0 0 0
48116- 0 0 0 0 0 0 0 0 0 0 0 0
48117- 0 0 0 0 0 0 0 0 0 0 0 0
48118- 0 0 0 0 0 0 0 0 0 0 0 0
48119- 0 0 0 0 0 0 0 0 0 0 0 0
48120- 0 0 0 0 0 0 0 0 0 0 0 0
48121- 6 6 6 14 14 14 42 42 42 90 90 90
48122- 26 26 26 6 6 6 42 42 42 2 2 6
48123- 74 74 74 250 250 250 253 253 253 253 253 253
48124-253 253 253 253 253 253 253 253 253 253 253 253
48125-253 253 253 253 253 253 242 242 242 242 242 242
48126-253 253 253 253 253 253 253 253 253 253 253 253
48127-253 253 253 253 253 253 253 253 253 253 253 253
48128-253 253 253 253 253 253 253 253 253 253 253 253
48129-253 253 253 253 253 253 253 253 253 253 253 253
48130-253 253 253 253 253 253 182 182 182 2 2 6
48131- 2 2 6 2 2 6 2 2 6 46 46 46
48132- 2 2 6 2 2 6 2 2 6 2 2 6
48133- 10 10 10 86 86 86 38 38 38 10 10 10
48134- 0 0 0 0 0 0 0 0 0 0 0 0
48135- 0 0 0 0 0 0 0 0 0 0 0 0
48136- 0 0 0 0 0 0 0 0 0 0 0 0
48137- 0 0 0 0 0 0 0 0 0 0 0 0
48138- 0 0 0 0 0 0 0 0 0 0 0 0
48139- 0 0 0 0 0 0 0 0 0 0 0 0
48140- 0 0 0 0 0 0 0 0 0 0 0 0
48141- 10 10 10 26 26 26 66 66 66 82 82 82
48142- 2 2 6 22 22 22 18 18 18 2 2 6
48143-149 149 149 253 253 253 253 253 253 253 253 253
48144-253 253 253 253 253 253 253 253 253 253 253 253
48145-253 253 253 253 253 253 234 234 234 242 242 242
48146-253 253 253 253 253 253 253 253 253 253 253 253
48147-253 253 253 253 253 253 253 253 253 253 253 253
48148-253 253 253 253 253 253 253 253 253 253 253 253
48149-253 253 253 253 253 253 253 253 253 253 253 253
48150-253 253 253 253 253 253 206 206 206 2 2 6
48151- 2 2 6 2 2 6 2 2 6 38 38 38
48152- 2 2 6 2 2 6 2 2 6 2 2 6
48153- 6 6 6 86 86 86 46 46 46 14 14 14
48154- 0 0 0 0 0 0 0 0 0 0 0 0
48155- 0 0 0 0 0 0 0 0 0 0 0 0
48156- 0 0 0 0 0 0 0 0 0 0 0 0
48157- 0 0 0 0 0 0 0 0 0 0 0 0
48158- 0 0 0 0 0 0 0 0 0 0 0 0
48159- 0 0 0 0 0 0 0 0 0 0 0 0
48160- 0 0 0 0 0 0 0 0 0 6 6 6
48161- 18 18 18 46 46 46 86 86 86 18 18 18
48162- 2 2 6 34 34 34 10 10 10 6 6 6
48163-210 210 210 253 253 253 253 253 253 253 253 253
48164-253 253 253 253 253 253 253 253 253 253 253 253
48165-253 253 253 253 253 253 234 234 234 242 242 242
48166-253 253 253 253 253 253 253 253 253 253 253 253
48167-253 253 253 253 253 253 253 253 253 253 253 253
48168-253 253 253 253 253 253 253 253 253 253 253 253
48169-253 253 253 253 253 253 253 253 253 253 253 253
48170-253 253 253 253 253 253 221 221 221 6 6 6
48171- 2 2 6 2 2 6 6 6 6 30 30 30
48172- 2 2 6 2 2 6 2 2 6 2 2 6
48173- 2 2 6 82 82 82 54 54 54 18 18 18
48174- 6 6 6 0 0 0 0 0 0 0 0 0
48175- 0 0 0 0 0 0 0 0 0 0 0 0
48176- 0 0 0 0 0 0 0 0 0 0 0 0
48177- 0 0 0 0 0 0 0 0 0 0 0 0
48178- 0 0 0 0 0 0 0 0 0 0 0 0
48179- 0 0 0 0 0 0 0 0 0 0 0 0
48180- 0 0 0 0 0 0 0 0 0 10 10 10
48181- 26 26 26 66 66 66 62 62 62 2 2 6
48182- 2 2 6 38 38 38 10 10 10 26 26 26
48183-238 238 238 253 253 253 253 253 253 253 253 253
48184-253 253 253 253 253 253 253 253 253 253 253 253
48185-253 253 253 253 253 253 231 231 231 238 238 238
48186-253 253 253 253 253 253 253 253 253 253 253 253
48187-253 253 253 253 253 253 253 253 253 253 253 253
48188-253 253 253 253 253 253 253 253 253 253 253 253
48189-253 253 253 253 253 253 253 253 253 253 253 253
48190-253 253 253 253 253 253 231 231 231 6 6 6
48191- 2 2 6 2 2 6 10 10 10 30 30 30
48192- 2 2 6 2 2 6 2 2 6 2 2 6
48193- 2 2 6 66 66 66 58 58 58 22 22 22
48194- 6 6 6 0 0 0 0 0 0 0 0 0
48195- 0 0 0 0 0 0 0 0 0 0 0 0
48196- 0 0 0 0 0 0 0 0 0 0 0 0
48197- 0 0 0 0 0 0 0 0 0 0 0 0
48198- 0 0 0 0 0 0 0 0 0 0 0 0
48199- 0 0 0 0 0 0 0 0 0 0 0 0
48200- 0 0 0 0 0 0 0 0 0 10 10 10
48201- 38 38 38 78 78 78 6 6 6 2 2 6
48202- 2 2 6 46 46 46 14 14 14 42 42 42
48203-246 246 246 253 253 253 253 253 253 253 253 253
48204-253 253 253 253 253 253 253 253 253 253 253 253
48205-253 253 253 253 253 253 231 231 231 242 242 242
48206-253 253 253 253 253 253 253 253 253 253 253 253
48207-253 253 253 253 253 253 253 253 253 253 253 253
48208-253 253 253 253 253 253 253 253 253 253 253 253
48209-253 253 253 253 253 253 253 253 253 253 253 253
48210-253 253 253 253 253 253 234 234 234 10 10 10
48211- 2 2 6 2 2 6 22 22 22 14 14 14
48212- 2 2 6 2 2 6 2 2 6 2 2 6
48213- 2 2 6 66 66 66 62 62 62 22 22 22
48214- 6 6 6 0 0 0 0 0 0 0 0 0
48215- 0 0 0 0 0 0 0 0 0 0 0 0
48216- 0 0 0 0 0 0 0 0 0 0 0 0
48217- 0 0 0 0 0 0 0 0 0 0 0 0
48218- 0 0 0 0 0 0 0 0 0 0 0 0
48219- 0 0 0 0 0 0 0 0 0 0 0 0
48220- 0 0 0 0 0 0 6 6 6 18 18 18
48221- 50 50 50 74 74 74 2 2 6 2 2 6
48222- 14 14 14 70 70 70 34 34 34 62 62 62
48223-250 250 250 253 253 253 253 253 253 253 253 253
48224-253 253 253 253 253 253 253 253 253 253 253 253
48225-253 253 253 253 253 253 231 231 231 246 246 246
48226-253 253 253 253 253 253 253 253 253 253 253 253
48227-253 253 253 253 253 253 253 253 253 253 253 253
48228-253 253 253 253 253 253 253 253 253 253 253 253
48229-253 253 253 253 253 253 253 253 253 253 253 253
48230-253 253 253 253 253 253 234 234 234 14 14 14
48231- 2 2 6 2 2 6 30 30 30 2 2 6
48232- 2 2 6 2 2 6 2 2 6 2 2 6
48233- 2 2 6 66 66 66 62 62 62 22 22 22
48234- 6 6 6 0 0 0 0 0 0 0 0 0
48235- 0 0 0 0 0 0 0 0 0 0 0 0
48236- 0 0 0 0 0 0 0 0 0 0 0 0
48237- 0 0 0 0 0 0 0 0 0 0 0 0
48238- 0 0 0 0 0 0 0 0 0 0 0 0
48239- 0 0 0 0 0 0 0 0 0 0 0 0
48240- 0 0 0 0 0 0 6 6 6 18 18 18
48241- 54 54 54 62 62 62 2 2 6 2 2 6
48242- 2 2 6 30 30 30 46 46 46 70 70 70
48243-250 250 250 253 253 253 253 253 253 253 253 253
48244-253 253 253 253 253 253 253 253 253 253 253 253
48245-253 253 253 253 253 253 231 231 231 246 246 246
48246-253 253 253 253 253 253 253 253 253 253 253 253
48247-253 253 253 253 253 253 253 253 253 253 253 253
48248-253 253 253 253 253 253 253 253 253 253 253 253
48249-253 253 253 253 253 253 253 253 253 253 253 253
48250-253 253 253 253 253 253 226 226 226 10 10 10
48251- 2 2 6 6 6 6 30 30 30 2 2 6
48252- 2 2 6 2 2 6 2 2 6 2 2 6
48253- 2 2 6 66 66 66 58 58 58 22 22 22
48254- 6 6 6 0 0 0 0 0 0 0 0 0
48255- 0 0 0 0 0 0 0 0 0 0 0 0
48256- 0 0 0 0 0 0 0 0 0 0 0 0
48257- 0 0 0 0 0 0 0 0 0 0 0 0
48258- 0 0 0 0 0 0 0 0 0 0 0 0
48259- 0 0 0 0 0 0 0 0 0 0 0 0
48260- 0 0 0 0 0 0 6 6 6 22 22 22
48261- 58 58 58 62 62 62 2 2 6 2 2 6
48262- 2 2 6 2 2 6 30 30 30 78 78 78
48263-250 250 250 253 253 253 253 253 253 253 253 253
48264-253 253 253 253 253 253 253 253 253 253 253 253
48265-253 253 253 253 253 253 231 231 231 246 246 246
48266-253 253 253 253 253 253 253 253 253 253 253 253
48267-253 253 253 253 253 253 253 253 253 253 253 253
48268-253 253 253 253 253 253 253 253 253 253 253 253
48269-253 253 253 253 253 253 253 253 253 253 253 253
48270-253 253 253 253 253 253 206 206 206 2 2 6
48271- 22 22 22 34 34 34 18 14 6 22 22 22
48272- 26 26 26 18 18 18 6 6 6 2 2 6
48273- 2 2 6 82 82 82 54 54 54 18 18 18
48274- 6 6 6 0 0 0 0 0 0 0 0 0
48275- 0 0 0 0 0 0 0 0 0 0 0 0
48276- 0 0 0 0 0 0 0 0 0 0 0 0
48277- 0 0 0 0 0 0 0 0 0 0 0 0
48278- 0 0 0 0 0 0 0 0 0 0 0 0
48279- 0 0 0 0 0 0 0 0 0 0 0 0
48280- 0 0 0 0 0 0 6 6 6 26 26 26
48281- 62 62 62 106 106 106 74 54 14 185 133 11
48282-210 162 10 121 92 8 6 6 6 62 62 62
48283-238 238 238 253 253 253 253 253 253 253 253 253
48284-253 253 253 253 253 253 253 253 253 253 253 253
48285-253 253 253 253 253 253 231 231 231 246 246 246
48286-253 253 253 253 253 253 253 253 253 253 253 253
48287-253 253 253 253 253 253 253 253 253 253 253 253
48288-253 253 253 253 253 253 253 253 253 253 253 253
48289-253 253 253 253 253 253 253 253 253 253 253 253
48290-253 253 253 253 253 253 158 158 158 18 18 18
48291- 14 14 14 2 2 6 2 2 6 2 2 6
48292- 6 6 6 18 18 18 66 66 66 38 38 38
48293- 6 6 6 94 94 94 50 50 50 18 18 18
48294- 6 6 6 0 0 0 0 0 0 0 0 0
48295- 0 0 0 0 0 0 0 0 0 0 0 0
48296- 0 0 0 0 0 0 0 0 0 0 0 0
48297- 0 0 0 0 0 0 0 0 0 0 0 0
48298- 0 0 0 0 0 0 0 0 0 0 0 0
48299- 0 0 0 0 0 0 0 0 0 6 6 6
48300- 10 10 10 10 10 10 18 18 18 38 38 38
48301- 78 78 78 142 134 106 216 158 10 242 186 14
48302-246 190 14 246 190 14 156 118 10 10 10 10
48303- 90 90 90 238 238 238 253 253 253 253 253 253
48304-253 253 253 253 253 253 253 253 253 253 253 253
48305-253 253 253 253 253 253 231 231 231 250 250 250
48306-253 253 253 253 253 253 253 253 253 253 253 253
48307-253 253 253 253 253 253 253 253 253 253 253 253
48308-253 253 253 253 253 253 253 253 253 253 253 253
48309-253 253 253 253 253 253 253 253 253 246 230 190
48310-238 204 91 238 204 91 181 142 44 37 26 9
48311- 2 2 6 2 2 6 2 2 6 2 2 6
48312- 2 2 6 2 2 6 38 38 38 46 46 46
48313- 26 26 26 106 106 106 54 54 54 18 18 18
48314- 6 6 6 0 0 0 0 0 0 0 0 0
48315- 0 0 0 0 0 0 0 0 0 0 0 0
48316- 0 0 0 0 0 0 0 0 0 0 0 0
48317- 0 0 0 0 0 0 0 0 0 0 0 0
48318- 0 0 0 0 0 0 0 0 0 0 0 0
48319- 0 0 0 6 6 6 14 14 14 22 22 22
48320- 30 30 30 38 38 38 50 50 50 70 70 70
48321-106 106 106 190 142 34 226 170 11 242 186 14
48322-246 190 14 246 190 14 246 190 14 154 114 10
48323- 6 6 6 74 74 74 226 226 226 253 253 253
48324-253 253 253 253 253 253 253 253 253 253 253 253
48325-253 253 253 253 253 253 231 231 231 250 250 250
48326-253 253 253 253 253 253 253 253 253 253 253 253
48327-253 253 253 253 253 253 253 253 253 253 253 253
48328-253 253 253 253 253 253 253 253 253 253 253 253
48329-253 253 253 253 253 253 253 253 253 228 184 62
48330-241 196 14 241 208 19 232 195 16 38 30 10
48331- 2 2 6 2 2 6 2 2 6 2 2 6
48332- 2 2 6 6 6 6 30 30 30 26 26 26
48333-203 166 17 154 142 90 66 66 66 26 26 26
48334- 6 6 6 0 0 0 0 0 0 0 0 0
48335- 0 0 0 0 0 0 0 0 0 0 0 0
48336- 0 0 0 0 0 0 0 0 0 0 0 0
48337- 0 0 0 0 0 0 0 0 0 0 0 0
48338- 0 0 0 0 0 0 0 0 0 0 0 0
48339- 6 6 6 18 18 18 38 38 38 58 58 58
48340- 78 78 78 86 86 86 101 101 101 123 123 123
48341-175 146 61 210 150 10 234 174 13 246 186 14
48342-246 190 14 246 190 14 246 190 14 238 190 10
48343-102 78 10 2 2 6 46 46 46 198 198 198
48344-253 253 253 253 253 253 253 253 253 253 253 253
48345-253 253 253 253 253 253 234 234 234 242 242 242
48346-253 253 253 253 253 253 253 253 253 253 253 253
48347-253 253 253 253 253 253 253 253 253 253 253 253
48348-253 253 253 253 253 253 253 253 253 253 253 253
48349-253 253 253 253 253 253 253 253 253 224 178 62
48350-242 186 14 241 196 14 210 166 10 22 18 6
48351- 2 2 6 2 2 6 2 2 6 2 2 6
48352- 2 2 6 2 2 6 6 6 6 121 92 8
48353-238 202 15 232 195 16 82 82 82 34 34 34
48354- 10 10 10 0 0 0 0 0 0 0 0 0
48355- 0 0 0 0 0 0 0 0 0 0 0 0
48356- 0 0 0 0 0 0 0 0 0 0 0 0
48357- 0 0 0 0 0 0 0 0 0 0 0 0
48358- 0 0 0 0 0 0 0 0 0 0 0 0
48359- 14 14 14 38 38 38 70 70 70 154 122 46
48360-190 142 34 200 144 11 197 138 11 197 138 11
48361-213 154 11 226 170 11 242 186 14 246 190 14
48362-246 190 14 246 190 14 246 190 14 246 190 14
48363-225 175 15 46 32 6 2 2 6 22 22 22
48364-158 158 158 250 250 250 253 253 253 253 253 253
48365-253 253 253 253 253 253 253 253 253 253 253 253
48366-253 253 253 253 253 253 253 253 253 253 253 253
48367-253 253 253 253 253 253 253 253 253 253 253 253
48368-253 253 253 253 253 253 253 253 253 253 253 253
48369-253 253 253 250 250 250 242 242 242 224 178 62
48370-239 182 13 236 186 11 213 154 11 46 32 6
48371- 2 2 6 2 2 6 2 2 6 2 2 6
48372- 2 2 6 2 2 6 61 42 6 225 175 15
48373-238 190 10 236 186 11 112 100 78 42 42 42
48374- 14 14 14 0 0 0 0 0 0 0 0 0
48375- 0 0 0 0 0 0 0 0 0 0 0 0
48376- 0 0 0 0 0 0 0 0 0 0 0 0
48377- 0 0 0 0 0 0 0 0 0 0 0 0
48378- 0 0 0 0 0 0 0 0 0 6 6 6
48379- 22 22 22 54 54 54 154 122 46 213 154 11
48380-226 170 11 230 174 11 226 170 11 226 170 11
48381-236 178 12 242 186 14 246 190 14 246 190 14
48382-246 190 14 246 190 14 246 190 14 246 190 14
48383-241 196 14 184 144 12 10 10 10 2 2 6
48384- 6 6 6 116 116 116 242 242 242 253 253 253
48385-253 253 253 253 253 253 253 253 253 253 253 253
48386-253 253 253 253 253 253 253 253 253 253 253 253
48387-253 253 253 253 253 253 253 253 253 253 253 253
48388-253 253 253 253 253 253 253 253 253 253 253 253
48389-253 253 253 231 231 231 198 198 198 214 170 54
48390-236 178 12 236 178 12 210 150 10 137 92 6
48391- 18 14 6 2 2 6 2 2 6 2 2 6
48392- 6 6 6 70 47 6 200 144 11 236 178 12
48393-239 182 13 239 182 13 124 112 88 58 58 58
48394- 22 22 22 6 6 6 0 0 0 0 0 0
48395- 0 0 0 0 0 0 0 0 0 0 0 0
48396- 0 0 0 0 0 0 0 0 0 0 0 0
48397- 0 0 0 0 0 0 0 0 0 0 0 0
48398- 0 0 0 0 0 0 0 0 0 10 10 10
48399- 30 30 30 70 70 70 180 133 36 226 170 11
48400-239 182 13 242 186 14 242 186 14 246 186 14
48401-246 190 14 246 190 14 246 190 14 246 190 14
48402-246 190 14 246 190 14 246 190 14 246 190 14
48403-246 190 14 232 195 16 98 70 6 2 2 6
48404- 2 2 6 2 2 6 66 66 66 221 221 221
48405-253 253 253 253 253 253 253 253 253 253 253 253
48406-253 253 253 253 253 253 253 253 253 253 253 253
48407-253 253 253 253 253 253 253 253 253 253 253 253
48408-253 253 253 253 253 253 253 253 253 253 253 253
48409-253 253 253 206 206 206 198 198 198 214 166 58
48410-230 174 11 230 174 11 216 158 10 192 133 9
48411-163 110 8 116 81 8 102 78 10 116 81 8
48412-167 114 7 197 138 11 226 170 11 239 182 13
48413-242 186 14 242 186 14 162 146 94 78 78 78
48414- 34 34 34 14 14 14 6 6 6 0 0 0
48415- 0 0 0 0 0 0 0 0 0 0 0 0
48416- 0 0 0 0 0 0 0 0 0 0 0 0
48417- 0 0 0 0 0 0 0 0 0 0 0 0
48418- 0 0 0 0 0 0 0 0 0 6 6 6
48419- 30 30 30 78 78 78 190 142 34 226 170 11
48420-239 182 13 246 190 14 246 190 14 246 190 14
48421-246 190 14 246 190 14 246 190 14 246 190 14
48422-246 190 14 246 190 14 246 190 14 246 190 14
48423-246 190 14 241 196 14 203 166 17 22 18 6
48424- 2 2 6 2 2 6 2 2 6 38 38 38
48425-218 218 218 253 253 253 253 253 253 253 253 253
48426-253 253 253 253 253 253 253 253 253 253 253 253
48427-253 253 253 253 253 253 253 253 253 253 253 253
48428-253 253 253 253 253 253 253 253 253 253 253 253
48429-250 250 250 206 206 206 198 198 198 202 162 69
48430-226 170 11 236 178 12 224 166 10 210 150 10
48431-200 144 11 197 138 11 192 133 9 197 138 11
48432-210 150 10 226 170 11 242 186 14 246 190 14
48433-246 190 14 246 186 14 225 175 15 124 112 88
48434- 62 62 62 30 30 30 14 14 14 6 6 6
48435- 0 0 0 0 0 0 0 0 0 0 0 0
48436- 0 0 0 0 0 0 0 0 0 0 0 0
48437- 0 0 0 0 0 0 0 0 0 0 0 0
48438- 0 0 0 0 0 0 0 0 0 10 10 10
48439- 30 30 30 78 78 78 174 135 50 224 166 10
48440-239 182 13 246 190 14 246 190 14 246 190 14
48441-246 190 14 246 190 14 246 190 14 246 190 14
48442-246 190 14 246 190 14 246 190 14 246 190 14
48443-246 190 14 246 190 14 241 196 14 139 102 15
48444- 2 2 6 2 2 6 2 2 6 2 2 6
48445- 78 78 78 250 250 250 253 253 253 253 253 253
48446-253 253 253 253 253 253 253 253 253 253 253 253
48447-253 253 253 253 253 253 253 253 253 253 253 253
48448-253 253 253 253 253 253 253 253 253 253 253 253
48449-250 250 250 214 214 214 198 198 198 190 150 46
48450-219 162 10 236 178 12 234 174 13 224 166 10
48451-216 158 10 213 154 11 213 154 11 216 158 10
48452-226 170 11 239 182 13 246 190 14 246 190 14
48453-246 190 14 246 190 14 242 186 14 206 162 42
48454-101 101 101 58 58 58 30 30 30 14 14 14
48455- 6 6 6 0 0 0 0 0 0 0 0 0
48456- 0 0 0 0 0 0 0 0 0 0 0 0
48457- 0 0 0 0 0 0 0 0 0 0 0 0
48458- 0 0 0 0 0 0 0 0 0 10 10 10
48459- 30 30 30 74 74 74 174 135 50 216 158 10
48460-236 178 12 246 190 14 246 190 14 246 190 14
48461-246 190 14 246 190 14 246 190 14 246 190 14
48462-246 190 14 246 190 14 246 190 14 246 190 14
48463-246 190 14 246 190 14 241 196 14 226 184 13
48464- 61 42 6 2 2 6 2 2 6 2 2 6
48465- 22 22 22 238 238 238 253 253 253 253 253 253
48466-253 253 253 253 253 253 253 253 253 253 253 253
48467-253 253 253 253 253 253 253 253 253 253 253 253
48468-253 253 253 253 253 253 253 253 253 253 253 253
48469-253 253 253 226 226 226 187 187 187 180 133 36
48470-216 158 10 236 178 12 239 182 13 236 178 12
48471-230 174 11 226 170 11 226 170 11 230 174 11
48472-236 178 12 242 186 14 246 190 14 246 190 14
48473-246 190 14 246 190 14 246 186 14 239 182 13
48474-206 162 42 106 106 106 66 66 66 34 34 34
48475- 14 14 14 6 6 6 0 0 0 0 0 0
48476- 0 0 0 0 0 0 0 0 0 0 0 0
48477- 0 0 0 0 0 0 0 0 0 0 0 0
48478- 0 0 0 0 0 0 0 0 0 6 6 6
48479- 26 26 26 70 70 70 163 133 67 213 154 11
48480-236 178 12 246 190 14 246 190 14 246 190 14
48481-246 190 14 246 190 14 246 190 14 246 190 14
48482-246 190 14 246 190 14 246 190 14 246 190 14
48483-246 190 14 246 190 14 246 190 14 241 196 14
48484-190 146 13 18 14 6 2 2 6 2 2 6
48485- 46 46 46 246 246 246 253 253 253 253 253 253
48486-253 253 253 253 253 253 253 253 253 253 253 253
48487-253 253 253 253 253 253 253 253 253 253 253 253
48488-253 253 253 253 253 253 253 253 253 253 253 253
48489-253 253 253 221 221 221 86 86 86 156 107 11
48490-216 158 10 236 178 12 242 186 14 246 186 14
48491-242 186 14 239 182 13 239 182 13 242 186 14
48492-242 186 14 246 186 14 246 190 14 246 190 14
48493-246 190 14 246 190 14 246 190 14 246 190 14
48494-242 186 14 225 175 15 142 122 72 66 66 66
48495- 30 30 30 10 10 10 0 0 0 0 0 0
48496- 0 0 0 0 0 0 0 0 0 0 0 0
48497- 0 0 0 0 0 0 0 0 0 0 0 0
48498- 0 0 0 0 0 0 0 0 0 6 6 6
48499- 26 26 26 70 70 70 163 133 67 210 150 10
48500-236 178 12 246 190 14 246 190 14 246 190 14
48501-246 190 14 246 190 14 246 190 14 246 190 14
48502-246 190 14 246 190 14 246 190 14 246 190 14
48503-246 190 14 246 190 14 246 190 14 246 190 14
48504-232 195 16 121 92 8 34 34 34 106 106 106
48505-221 221 221 253 253 253 253 253 253 253 253 253
48506-253 253 253 253 253 253 253 253 253 253 253 253
48507-253 253 253 253 253 253 253 253 253 253 253 253
48508-253 253 253 253 253 253 253 253 253 253 253 253
48509-242 242 242 82 82 82 18 14 6 163 110 8
48510-216 158 10 236 178 12 242 186 14 246 190 14
48511-246 190 14 246 190 14 246 190 14 246 190 14
48512-246 190 14 246 190 14 246 190 14 246 190 14
48513-246 190 14 246 190 14 246 190 14 246 190 14
48514-246 190 14 246 190 14 242 186 14 163 133 67
48515- 46 46 46 18 18 18 6 6 6 0 0 0
48516- 0 0 0 0 0 0 0 0 0 0 0 0
48517- 0 0 0 0 0 0 0 0 0 0 0 0
48518- 0 0 0 0 0 0 0 0 0 10 10 10
48519- 30 30 30 78 78 78 163 133 67 210 150 10
48520-236 178 12 246 186 14 246 190 14 246 190 14
48521-246 190 14 246 190 14 246 190 14 246 190 14
48522-246 190 14 246 190 14 246 190 14 246 190 14
48523-246 190 14 246 190 14 246 190 14 246 190 14
48524-241 196 14 215 174 15 190 178 144 253 253 253
48525-253 253 253 253 253 253 253 253 253 253 253 253
48526-253 253 253 253 253 253 253 253 253 253 253 253
48527-253 253 253 253 253 253 253 253 253 253 253 253
48528-253 253 253 253 253 253 253 253 253 218 218 218
48529- 58 58 58 2 2 6 22 18 6 167 114 7
48530-216 158 10 236 178 12 246 186 14 246 190 14
48531-246 190 14 246 190 14 246 190 14 246 190 14
48532-246 190 14 246 190 14 246 190 14 246 190 14
48533-246 190 14 246 190 14 246 190 14 246 190 14
48534-246 190 14 246 186 14 242 186 14 190 150 46
48535- 54 54 54 22 22 22 6 6 6 0 0 0
48536- 0 0 0 0 0 0 0 0 0 0 0 0
48537- 0 0 0 0 0 0 0 0 0 0 0 0
48538- 0 0 0 0 0 0 0 0 0 14 14 14
48539- 38 38 38 86 86 86 180 133 36 213 154 11
48540-236 178 12 246 186 14 246 190 14 246 190 14
48541-246 190 14 246 190 14 246 190 14 246 190 14
48542-246 190 14 246 190 14 246 190 14 246 190 14
48543-246 190 14 246 190 14 246 190 14 246 190 14
48544-246 190 14 232 195 16 190 146 13 214 214 214
48545-253 253 253 253 253 253 253 253 253 253 253 253
48546-253 253 253 253 253 253 253 253 253 253 253 253
48547-253 253 253 253 253 253 253 253 253 253 253 253
48548-253 253 253 250 250 250 170 170 170 26 26 26
48549- 2 2 6 2 2 6 37 26 9 163 110 8
48550-219 162 10 239 182 13 246 186 14 246 190 14
48551-246 190 14 246 190 14 246 190 14 246 190 14
48552-246 190 14 246 190 14 246 190 14 246 190 14
48553-246 190 14 246 190 14 246 190 14 246 190 14
48554-246 186 14 236 178 12 224 166 10 142 122 72
48555- 46 46 46 18 18 18 6 6 6 0 0 0
48556- 0 0 0 0 0 0 0 0 0 0 0 0
48557- 0 0 0 0 0 0 0 0 0 0 0 0
48558- 0 0 0 0 0 0 6 6 6 18 18 18
48559- 50 50 50 109 106 95 192 133 9 224 166 10
48560-242 186 14 246 190 14 246 190 14 246 190 14
48561-246 190 14 246 190 14 246 190 14 246 190 14
48562-246 190 14 246 190 14 246 190 14 246 190 14
48563-246 190 14 246 190 14 246 190 14 246 190 14
48564-242 186 14 226 184 13 210 162 10 142 110 46
48565-226 226 226 253 253 253 253 253 253 253 253 253
48566-253 253 253 253 253 253 253 253 253 253 253 253
48567-253 253 253 253 253 253 253 253 253 253 253 253
48568-198 198 198 66 66 66 2 2 6 2 2 6
48569- 2 2 6 2 2 6 50 34 6 156 107 11
48570-219 162 10 239 182 13 246 186 14 246 190 14
48571-246 190 14 246 190 14 246 190 14 246 190 14
48572-246 190 14 246 190 14 246 190 14 246 190 14
48573-246 190 14 246 190 14 246 190 14 242 186 14
48574-234 174 13 213 154 11 154 122 46 66 66 66
48575- 30 30 30 10 10 10 0 0 0 0 0 0
48576- 0 0 0 0 0 0 0 0 0 0 0 0
48577- 0 0 0 0 0 0 0 0 0 0 0 0
48578- 0 0 0 0 0 0 6 6 6 22 22 22
48579- 58 58 58 154 121 60 206 145 10 234 174 13
48580-242 186 14 246 186 14 246 190 14 246 190 14
48581-246 190 14 246 190 14 246 190 14 246 190 14
48582-246 190 14 246 190 14 246 190 14 246 190 14
48583-246 190 14 246 190 14 246 190 14 246 190 14
48584-246 186 14 236 178 12 210 162 10 163 110 8
48585- 61 42 6 138 138 138 218 218 218 250 250 250
48586-253 253 253 253 253 253 253 253 253 250 250 250
48587-242 242 242 210 210 210 144 144 144 66 66 66
48588- 6 6 6 2 2 6 2 2 6 2 2 6
48589- 2 2 6 2 2 6 61 42 6 163 110 8
48590-216 158 10 236 178 12 246 190 14 246 190 14
48591-246 190 14 246 190 14 246 190 14 246 190 14
48592-246 190 14 246 190 14 246 190 14 246 190 14
48593-246 190 14 239 182 13 230 174 11 216 158 10
48594-190 142 34 124 112 88 70 70 70 38 38 38
48595- 18 18 18 6 6 6 0 0 0 0 0 0
48596- 0 0 0 0 0 0 0 0 0 0 0 0
48597- 0 0 0 0 0 0 0 0 0 0 0 0
48598- 0 0 0 0 0 0 6 6 6 22 22 22
48599- 62 62 62 168 124 44 206 145 10 224 166 10
48600-236 178 12 239 182 13 242 186 14 242 186 14
48601-246 186 14 246 190 14 246 190 14 246 190 14
48602-246 190 14 246 190 14 246 190 14 246 190 14
48603-246 190 14 246 190 14 246 190 14 246 190 14
48604-246 190 14 236 178 12 216 158 10 175 118 6
48605- 80 54 7 2 2 6 6 6 6 30 30 30
48606- 54 54 54 62 62 62 50 50 50 38 38 38
48607- 14 14 14 2 2 6 2 2 6 2 2 6
48608- 2 2 6 2 2 6 2 2 6 2 2 6
48609- 2 2 6 6 6 6 80 54 7 167 114 7
48610-213 154 11 236 178 12 246 190 14 246 190 14
48611-246 190 14 246 190 14 246 190 14 246 190 14
48612-246 190 14 242 186 14 239 182 13 239 182 13
48613-230 174 11 210 150 10 174 135 50 124 112 88
48614- 82 82 82 54 54 54 34 34 34 18 18 18
48615- 6 6 6 0 0 0 0 0 0 0 0 0
48616- 0 0 0 0 0 0 0 0 0 0 0 0
48617- 0 0 0 0 0 0 0 0 0 0 0 0
48618- 0 0 0 0 0 0 6 6 6 18 18 18
48619- 50 50 50 158 118 36 192 133 9 200 144 11
48620-216 158 10 219 162 10 224 166 10 226 170 11
48621-230 174 11 236 178 12 239 182 13 239 182 13
48622-242 186 14 246 186 14 246 190 14 246 190 14
48623-246 190 14 246 190 14 246 190 14 246 190 14
48624-246 186 14 230 174 11 210 150 10 163 110 8
48625-104 69 6 10 10 10 2 2 6 2 2 6
48626- 2 2 6 2 2 6 2 2 6 2 2 6
48627- 2 2 6 2 2 6 2 2 6 2 2 6
48628- 2 2 6 2 2 6 2 2 6 2 2 6
48629- 2 2 6 6 6 6 91 60 6 167 114 7
48630-206 145 10 230 174 11 242 186 14 246 190 14
48631-246 190 14 246 190 14 246 186 14 242 186 14
48632-239 182 13 230 174 11 224 166 10 213 154 11
48633-180 133 36 124 112 88 86 86 86 58 58 58
48634- 38 38 38 22 22 22 10 10 10 6 6 6
48635- 0 0 0 0 0 0 0 0 0 0 0 0
48636- 0 0 0 0 0 0 0 0 0 0 0 0
48637- 0 0 0 0 0 0 0 0 0 0 0 0
48638- 0 0 0 0 0 0 0 0 0 14 14 14
48639- 34 34 34 70 70 70 138 110 50 158 118 36
48640-167 114 7 180 123 7 192 133 9 197 138 11
48641-200 144 11 206 145 10 213 154 11 219 162 10
48642-224 166 10 230 174 11 239 182 13 242 186 14
48643-246 186 14 246 186 14 246 186 14 246 186 14
48644-239 182 13 216 158 10 185 133 11 152 99 6
48645-104 69 6 18 14 6 2 2 6 2 2 6
48646- 2 2 6 2 2 6 2 2 6 2 2 6
48647- 2 2 6 2 2 6 2 2 6 2 2 6
48648- 2 2 6 2 2 6 2 2 6 2 2 6
48649- 2 2 6 6 6 6 80 54 7 152 99 6
48650-192 133 9 219 162 10 236 178 12 239 182 13
48651-246 186 14 242 186 14 239 182 13 236 178 12
48652-224 166 10 206 145 10 192 133 9 154 121 60
48653- 94 94 94 62 62 62 42 42 42 22 22 22
48654- 14 14 14 6 6 6 0 0 0 0 0 0
48655- 0 0 0 0 0 0 0 0 0 0 0 0
48656- 0 0 0 0 0 0 0 0 0 0 0 0
48657- 0 0 0 0 0 0 0 0 0 0 0 0
48658- 0 0 0 0 0 0 0 0 0 6 6 6
48659- 18 18 18 34 34 34 58 58 58 78 78 78
48660-101 98 89 124 112 88 142 110 46 156 107 11
48661-163 110 8 167 114 7 175 118 6 180 123 7
48662-185 133 11 197 138 11 210 150 10 219 162 10
48663-226 170 11 236 178 12 236 178 12 234 174 13
48664-219 162 10 197 138 11 163 110 8 130 83 6
48665- 91 60 6 10 10 10 2 2 6 2 2 6
48666- 18 18 18 38 38 38 38 38 38 38 38 38
48667- 38 38 38 38 38 38 38 38 38 38 38 38
48668- 38 38 38 38 38 38 26 26 26 2 2 6
48669- 2 2 6 6 6 6 70 47 6 137 92 6
48670-175 118 6 200 144 11 219 162 10 230 174 11
48671-234 174 13 230 174 11 219 162 10 210 150 10
48672-192 133 9 163 110 8 124 112 88 82 82 82
48673- 50 50 50 30 30 30 14 14 14 6 6 6
48674- 0 0 0 0 0 0 0 0 0 0 0 0
48675- 0 0 0 0 0 0 0 0 0 0 0 0
48676- 0 0 0 0 0 0 0 0 0 0 0 0
48677- 0 0 0 0 0 0 0 0 0 0 0 0
48678- 0 0 0 0 0 0 0 0 0 0 0 0
48679- 6 6 6 14 14 14 22 22 22 34 34 34
48680- 42 42 42 58 58 58 74 74 74 86 86 86
48681-101 98 89 122 102 70 130 98 46 121 87 25
48682-137 92 6 152 99 6 163 110 8 180 123 7
48683-185 133 11 197 138 11 206 145 10 200 144 11
48684-180 123 7 156 107 11 130 83 6 104 69 6
48685- 50 34 6 54 54 54 110 110 110 101 98 89
48686- 86 86 86 82 82 82 78 78 78 78 78 78
48687- 78 78 78 78 78 78 78 78 78 78 78 78
48688- 78 78 78 82 82 82 86 86 86 94 94 94
48689-106 106 106 101 101 101 86 66 34 124 80 6
48690-156 107 11 180 123 7 192 133 9 200 144 11
48691-206 145 10 200 144 11 192 133 9 175 118 6
48692-139 102 15 109 106 95 70 70 70 42 42 42
48693- 22 22 22 10 10 10 0 0 0 0 0 0
48694- 0 0 0 0 0 0 0 0 0 0 0 0
48695- 0 0 0 0 0 0 0 0 0 0 0 0
48696- 0 0 0 0 0 0 0 0 0 0 0 0
48697- 0 0 0 0 0 0 0 0 0 0 0 0
48698- 0 0 0 0 0 0 0 0 0 0 0 0
48699- 0 0 0 0 0 0 6 6 6 10 10 10
48700- 14 14 14 22 22 22 30 30 30 38 38 38
48701- 50 50 50 62 62 62 74 74 74 90 90 90
48702-101 98 89 112 100 78 121 87 25 124 80 6
48703-137 92 6 152 99 6 152 99 6 152 99 6
48704-138 86 6 124 80 6 98 70 6 86 66 30
48705-101 98 89 82 82 82 58 58 58 46 46 46
48706- 38 38 38 34 34 34 34 34 34 34 34 34
48707- 34 34 34 34 34 34 34 34 34 34 34 34
48708- 34 34 34 34 34 34 38 38 38 42 42 42
48709- 54 54 54 82 82 82 94 86 76 91 60 6
48710-134 86 6 156 107 11 167 114 7 175 118 6
48711-175 118 6 167 114 7 152 99 6 121 87 25
48712-101 98 89 62 62 62 34 34 34 18 18 18
48713- 6 6 6 0 0 0 0 0 0 0 0 0
48714- 0 0 0 0 0 0 0 0 0 0 0 0
48715- 0 0 0 0 0 0 0 0 0 0 0 0
48716- 0 0 0 0 0 0 0 0 0 0 0 0
48717- 0 0 0 0 0 0 0 0 0 0 0 0
48718- 0 0 0 0 0 0 0 0 0 0 0 0
48719- 0 0 0 0 0 0 0 0 0 0 0 0
48720- 0 0 0 6 6 6 6 6 6 10 10 10
48721- 18 18 18 22 22 22 30 30 30 42 42 42
48722- 50 50 50 66 66 66 86 86 86 101 98 89
48723-106 86 58 98 70 6 104 69 6 104 69 6
48724-104 69 6 91 60 6 82 62 34 90 90 90
48725- 62 62 62 38 38 38 22 22 22 14 14 14
48726- 10 10 10 10 10 10 10 10 10 10 10 10
48727- 10 10 10 10 10 10 6 6 6 10 10 10
48728- 10 10 10 10 10 10 10 10 10 14 14 14
48729- 22 22 22 42 42 42 70 70 70 89 81 66
48730- 80 54 7 104 69 6 124 80 6 137 92 6
48731-134 86 6 116 81 8 100 82 52 86 86 86
48732- 58 58 58 30 30 30 14 14 14 6 6 6
48733- 0 0 0 0 0 0 0 0 0 0 0 0
48734- 0 0 0 0 0 0 0 0 0 0 0 0
48735- 0 0 0 0 0 0 0 0 0 0 0 0
48736- 0 0 0 0 0 0 0 0 0 0 0 0
48737- 0 0 0 0 0 0 0 0 0 0 0 0
48738- 0 0 0 0 0 0 0 0 0 0 0 0
48739- 0 0 0 0 0 0 0 0 0 0 0 0
48740- 0 0 0 0 0 0 0 0 0 0 0 0
48741- 0 0 0 6 6 6 10 10 10 14 14 14
48742- 18 18 18 26 26 26 38 38 38 54 54 54
48743- 70 70 70 86 86 86 94 86 76 89 81 66
48744- 89 81 66 86 86 86 74 74 74 50 50 50
48745- 30 30 30 14 14 14 6 6 6 0 0 0
48746- 0 0 0 0 0 0 0 0 0 0 0 0
48747- 0 0 0 0 0 0 0 0 0 0 0 0
48748- 0 0 0 0 0 0 0 0 0 0 0 0
48749- 6 6 6 18 18 18 34 34 34 58 58 58
48750- 82 82 82 89 81 66 89 81 66 89 81 66
48751- 94 86 66 94 86 76 74 74 74 50 50 50
48752- 26 26 26 14 14 14 6 6 6 0 0 0
48753- 0 0 0 0 0 0 0 0 0 0 0 0
48754- 0 0 0 0 0 0 0 0 0 0 0 0
48755- 0 0 0 0 0 0 0 0 0 0 0 0
48756- 0 0 0 0 0 0 0 0 0 0 0 0
48757- 0 0 0 0 0 0 0 0 0 0 0 0
48758- 0 0 0 0 0 0 0 0 0 0 0 0
48759- 0 0 0 0 0 0 0 0 0 0 0 0
48760- 0 0 0 0 0 0 0 0 0 0 0 0
48761- 0 0 0 0 0 0 0 0 0 0 0 0
48762- 6 6 6 6 6 6 14 14 14 18 18 18
48763- 30 30 30 38 38 38 46 46 46 54 54 54
48764- 50 50 50 42 42 42 30 30 30 18 18 18
48765- 10 10 10 0 0 0 0 0 0 0 0 0
48766- 0 0 0 0 0 0 0 0 0 0 0 0
48767- 0 0 0 0 0 0 0 0 0 0 0 0
48768- 0 0 0 0 0 0 0 0 0 0 0 0
48769- 0 0 0 6 6 6 14 14 14 26 26 26
48770- 38 38 38 50 50 50 58 58 58 58 58 58
48771- 54 54 54 42 42 42 30 30 30 18 18 18
48772- 10 10 10 0 0 0 0 0 0 0 0 0
48773- 0 0 0 0 0 0 0 0 0 0 0 0
48774- 0 0 0 0 0 0 0 0 0 0 0 0
48775- 0 0 0 0 0 0 0 0 0 0 0 0
48776- 0 0 0 0 0 0 0 0 0 0 0 0
48777- 0 0 0 0 0 0 0 0 0 0 0 0
48778- 0 0 0 0 0 0 0 0 0 0 0 0
48779- 0 0 0 0 0 0 0 0 0 0 0 0
48780- 0 0 0 0 0 0 0 0 0 0 0 0
48781- 0 0 0 0 0 0 0 0 0 0 0 0
48782- 0 0 0 0 0 0 0 0 0 6 6 6
48783- 6 6 6 10 10 10 14 14 14 18 18 18
48784- 18 18 18 14 14 14 10 10 10 6 6 6
48785- 0 0 0 0 0 0 0 0 0 0 0 0
48786- 0 0 0 0 0 0 0 0 0 0 0 0
48787- 0 0 0 0 0 0 0 0 0 0 0 0
48788- 0 0 0 0 0 0 0 0 0 0 0 0
48789- 0 0 0 0 0 0 0 0 0 6 6 6
48790- 14 14 14 18 18 18 22 22 22 22 22 22
48791- 18 18 18 14 14 14 10 10 10 6 6 6
48792- 0 0 0 0 0 0 0 0 0 0 0 0
48793- 0 0 0 0 0 0 0 0 0 0 0 0
48794- 0 0 0 0 0 0 0 0 0 0 0 0
48795- 0 0 0 0 0 0 0 0 0 0 0 0
48796- 0 0 0 0 0 0 0 0 0 0 0 0
48797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48810+4 4 4 4 4 4
48811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48824+4 4 4 4 4 4
48825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48838+4 4 4 4 4 4
48839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48852+4 4 4 4 4 4
48853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48866+4 4 4 4 4 4
48867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48880+4 4 4 4 4 4
48881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48885+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
48886+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
48887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48890+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
48891+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
48892+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
48893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48894+4 4 4 4 4 4
48895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48899+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
48900+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
48901+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48904+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
48905+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
48906+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
48907+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48908+4 4 4 4 4 4
48909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48913+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
48914+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
48915+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
48916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48918+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
48919+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
48920+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
48921+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
48922+4 4 4 4 4 4
48923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48926+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
48927+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
48928+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
48929+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
48930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48931+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
48932+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
48933+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
48934+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
48935+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
48936+4 4 4 4 4 4
48937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48940+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
48941+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
48942+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
48943+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
48944+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
48945+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
48946+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
48947+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
48948+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
48949+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
48950+4 4 4 4 4 4
48951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
48954+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
48955+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
48956+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
48957+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
48958+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
48959+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
48960+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
48961+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
48962+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
48963+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
48964+4 4 4 4 4 4
48965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48967+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
48968+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
48969+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
48970+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
48971+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
48972+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
48973+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
48974+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
48975+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
48976+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
48977+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
48978+4 4 4 4 4 4
48979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48981+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
48982+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
48983+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
48984+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
48985+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
48986+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
48987+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
48988+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
48989+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
48990+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
48991+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
48992+4 4 4 4 4 4
48993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
48995+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
48996+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
48997+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
48998+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
48999+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
49000+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
49001+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
49002+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
49003+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
49004+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
49005+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
49006+4 4 4 4 4 4
49007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49009+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
49010+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
49011+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
49012+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
49013+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
49014+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
49015+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
49016+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
49017+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
49018+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
49019+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
49020+4 4 4 4 4 4
49021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49022+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
49023+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
49024+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
49025+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
49026+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
49027+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
49028+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
49029+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
49030+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
49031+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
49032+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
49033+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
49034+4 4 4 4 4 4
49035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49036+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
49037+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
49038+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
49039+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
49040+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
49041+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
49042+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
49043+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
49044+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
49045+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
49046+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
49047+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
49048+0 0 0 4 4 4
49049+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
49050+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
49051+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
49052+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
49053+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
49054+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
49055+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
49056+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
49057+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
49058+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
49059+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
49060+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
49061+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
49062+2 0 0 0 0 0
49063+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
49064+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
49065+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
49066+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
49067+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
49068+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
49069+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
49070+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
49071+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
49072+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
49073+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
49074+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
49075+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
49076+37 38 37 0 0 0
49077+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
49078+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
49079+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
49080+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
49081+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
49082+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
49083+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
49084+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
49085+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
49086+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
49087+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
49088+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
49089+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
49090+85 115 134 4 0 0
49091+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
49092+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
49093+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
49094+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
49095+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
49096+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
49097+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
49098+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
49099+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
49100+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
49101+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
49102+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
49103+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
49104+60 73 81 4 0 0
49105+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
49106+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
49107+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
49108+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
49109+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
49110+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
49111+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
49112+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
49113+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
49114+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
49115+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
49116+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
49117+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
49118+16 19 21 4 0 0
49119+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
49120+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
49121+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
49122+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
49123+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
49124+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
49125+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
49126+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
49127+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
49128+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
49129+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
49130+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
49131+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
49132+4 0 0 4 3 3
49133+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
49134+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
49135+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
49136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
49137+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
49138+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
49139+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
49140+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
49141+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
49142+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
49143+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
49144+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
49145+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
49146+3 2 2 4 4 4
49147+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
49148+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
49149+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
49150+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
49151+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
49152+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
49153+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
49154+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
49155+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
49156+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
49157+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
49158+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
49159+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
49160+4 4 4 4 4 4
49161+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
49162+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
49163+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
49164+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
49165+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
49166+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
49167+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
49168+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
49169+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
49170+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
49171+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
49172+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
49173+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
49174+4 4 4 4 4 4
49175+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
49176+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
49177+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
49178+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
49179+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
49180+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
49181+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
49182+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
49183+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
49184+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
49185+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
49186+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
49187+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
49188+5 5 5 5 5 5
49189+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
49190+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
49191+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
49192+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
49193+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
49194+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49195+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
49196+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
49197+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
49198+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
49199+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
49200+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
49201+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
49202+5 5 5 4 4 4
49203+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
49204+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
49205+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
49206+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
49207+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
49208+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
49209+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
49210+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
49211+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
49212+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
49213+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
49214+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
49215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49216+4 4 4 4 4 4
49217+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
49218+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
49219+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
49220+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
49221+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
49222+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49223+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49224+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
49225+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
49226+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
49227+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
49228+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
49229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49230+4 4 4 4 4 4
49231+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
49232+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
49233+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
49234+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
49235+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
49236+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
49237+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
49238+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
49239+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
49240+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
49241+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
49242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49244+4 4 4 4 4 4
49245+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
49246+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
49247+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
49248+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
49249+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
49250+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49251+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
49252+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
49253+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
49254+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
49255+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
49256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49258+4 4 4 4 4 4
49259+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
49260+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
49261+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
49262+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
49263+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
49264+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
49265+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
49266+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
49267+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
49268+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
49269+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49272+4 4 4 4 4 4
49273+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
49274+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
49275+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
49276+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
49277+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
49278+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
49279+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
49280+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
49281+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
49282+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
49283+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
49284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49286+4 4 4 4 4 4
49287+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
49288+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
49289+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
49290+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
49291+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
49292+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
49293+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
49294+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
49295+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
49296+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
49297+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
49298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49300+4 4 4 4 4 4
49301+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
49302+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
49303+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
49304+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
49305+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
49306+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
49307+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
49308+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
49309+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
49310+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
49311+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49314+4 4 4 4 4 4
49315+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
49316+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
49317+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
49318+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
49319+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49320+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
49321+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
49322+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
49323+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
49324+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
49325+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49328+4 4 4 4 4 4
49329+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
49330+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
49331+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
49332+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
49333+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49334+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
49335+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
49336+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
49337+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
49338+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
49339+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49342+4 4 4 4 4 4
49343+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
49344+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
49345+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
49346+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
49347+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49348+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
49349+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
49350+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
49351+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
49352+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49353+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49356+4 4 4 4 4 4
49357+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
49358+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
49359+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
49360+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
49361+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
49362+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
49363+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
49364+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
49365+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49366+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49367+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49370+4 4 4 4 4 4
49371+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
49372+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
49373+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
49374+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
49375+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49376+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
49377+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
49378+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
49379+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
49380+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49381+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49384+4 4 4 4 4 4
49385+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
49386+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
49387+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
49388+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
49389+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
49390+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
49391+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
49392+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
49393+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49394+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49395+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49398+4 4 4 4 4 4
49399+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
49400+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
49401+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
49402+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
49403+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
49404+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
49405+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
49406+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
49407+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
49408+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49409+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49412+4 4 4 4 4 4
49413+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
49414+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
49415+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
49416+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
49417+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
49418+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
49419+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
49420+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
49421+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49422+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49423+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49426+4 4 4 4 4 4
49427+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
49428+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
49429+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
49430+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
49431+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
49432+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
49433+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
49434+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
49435+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
49436+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49437+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49440+4 4 4 4 4 4
49441+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
49442+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
49443+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
49444+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
49445+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
49446+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
49447+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
49448+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
49449+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49450+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49451+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49454+4 4 4 4 4 4
49455+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
49456+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
49457+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
49458+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
49459+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
49460+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
49461+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
49462+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
49463+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
49464+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49465+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49468+4 4 4 4 4 4
49469+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
49470+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
49471+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
49472+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
49473+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
49474+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
49475+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49476+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
49477+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
49478+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49479+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49482+4 4 4 4 4 4
49483+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
49484+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
49485+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
49486+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
49487+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
49488+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
49489+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
49490+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
49491+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
49492+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49493+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49496+4 4 4 4 4 4
49497+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
49498+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
49499+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
49500+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
49501+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
49502+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
49503+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
49504+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
49505+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
49506+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49507+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49510+4 4 4 4 4 4
49511+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
49512+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
49513+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
49514+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
49515+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
49516+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
49517+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
49518+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
49519+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
49520+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49521+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49524+4 4 4 4 4 4
49525+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
49526+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
49527+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
49528+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
49529+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
49530+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
49531+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
49532+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
49533+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
49534+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49535+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49538+4 4 4 4 4 4
49539+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
49540+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
49541+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
49542+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
49543+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
49544+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
49545+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
49546+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
49547+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
49548+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
49549+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49552+4 4 4 4 4 4
49553+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
49554+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
49555+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
49556+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
49557+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
49558+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
49559+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
49560+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
49561+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
49562+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
49563+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49566+4 4 4 4 4 4
49567+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
49568+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
49569+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
49570+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
49571+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
49572+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
49573+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
49574+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
49575+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
49576+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
49577+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49580+4 4 4 4 4 4
49581+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
49582+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
49583+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
49584+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
49585+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
49586+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
49587+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49588+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
49589+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
49590+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
49591+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
49592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49594+4 4 4 4 4 4
49595+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
49596+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
49597+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
49598+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
49599+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
49600+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
49601+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
49602+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
49603+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
49604+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
49605+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49608+4 4 4 4 4 4
49609+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
49610+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
49611+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
49612+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
49613+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
49614+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
49615+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
49616+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
49617+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
49618+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
49619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49622+4 4 4 4 4 4
49623+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
49624+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
49625+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
49626+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
49627+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
49628+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
49629+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
49630+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
49631+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
49632+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
49633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49636+4 4 4 4 4 4
49637+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
49638+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
49639+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
49640+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
49641+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
49642+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
49643+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
49644+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
49645+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
49646+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
49647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49650+4 4 4 4 4 4
49651+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
49652+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
49653+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
49654+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
49655+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
49656+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
49657+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
49658+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
49659+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
49660+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
49661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49664+4 4 4 4 4 4
49665+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
49666+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
49667+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
49668+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
49669+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
49670+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
49671+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
49672+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
49673+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
49674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49678+4 4 4 4 4 4
49679+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
49680+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
49681+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
49682+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
49683+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
49684+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
49685+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
49686+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
49687+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
49688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49692+4 4 4 4 4 4
49693+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
49694+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
49695+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
49696+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
49697+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
49698+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
49699+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
49700+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
49701+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49706+4 4 4 4 4 4
49707+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
49708+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
49709+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
49710+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
49711+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
49712+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
49713+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
49714+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
49715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49720+4 4 4 4 4 4
49721+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
49722+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
49723+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
49724+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
49725+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
49726+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
49727+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
49728+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
49729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49734+4 4 4 4 4 4
49735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
49736+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
49737+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
49738+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
49739+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
49740+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
49741+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
49742+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
49743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49748+4 4 4 4 4 4
49749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49750+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
49751+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
49752+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
49753+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
49754+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
49755+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
49756+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
49757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49762+4 4 4 4 4 4
49763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49764+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
49765+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
49766+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
49767+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
49768+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
49769+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
49770+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
49771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49776+4 4 4 4 4 4
49777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49779+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
49780+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
49781+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
49782+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
49783+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
49784+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
49785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49790+4 4 4 4 4 4
49791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
49794+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
49795+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
49796+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
49797+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
49798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49804+4 4 4 4 4 4
49805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49808+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
49809+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
49810+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
49811+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
49812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49818+4 4 4 4 4 4
49819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49822+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
49823+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
49824+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
49825+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
49826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49832+4 4 4 4 4 4
49833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49836+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
49837+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
49838+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
49839+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
49840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49846+4 4 4 4 4 4
49847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
49851+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
49852+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
49853+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
49854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49860+4 4 4 4 4 4
49861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49865+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
49866+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
49867+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
49868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49874+4 4 4 4 4 4
49875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49879+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
49880+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
49881+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49888+4 4 4 4 4 4
49889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49893+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
49894+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
49895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49902+4 4 4 4 4 4
49903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49907+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
49908+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
49909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
49916+4 4 4 4 4 4
49917diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
49918index fe92eed..106e085 100644
49919--- a/drivers/video/mb862xx/mb862xxfb_accel.c
49920+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
49921@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
49922 struct mb862xxfb_par *par = info->par;
49923
49924 if (info->var.bits_per_pixel == 32) {
49925- info->fbops->fb_fillrect = cfb_fillrect;
49926- info->fbops->fb_copyarea = cfb_copyarea;
49927- info->fbops->fb_imageblit = cfb_imageblit;
49928+ pax_open_kernel();
49929+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
49930+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
49931+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
49932+ pax_close_kernel();
49933 } else {
49934 outreg(disp, GC_L0EM, 3);
49935- info->fbops->fb_fillrect = mb86290fb_fillrect;
49936- info->fbops->fb_copyarea = mb86290fb_copyarea;
49937- info->fbops->fb_imageblit = mb86290fb_imageblit;
49938+ pax_open_kernel();
49939+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
49940+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
49941+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
49942+ pax_close_kernel();
49943 }
49944 outreg(draw, GDC_REG_DRAW_BASE, 0);
49945 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
49946diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
49947index ff22871..b129bed 100644
49948--- a/drivers/video/nvidia/nvidia.c
49949+++ b/drivers/video/nvidia/nvidia.c
49950@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
49951 info->fix.line_length = (info->var.xres_virtual *
49952 info->var.bits_per_pixel) >> 3;
49953 if (info->var.accel_flags) {
49954- info->fbops->fb_imageblit = nvidiafb_imageblit;
49955- info->fbops->fb_fillrect = nvidiafb_fillrect;
49956- info->fbops->fb_copyarea = nvidiafb_copyarea;
49957- info->fbops->fb_sync = nvidiafb_sync;
49958+ pax_open_kernel();
49959+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
49960+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
49961+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
49962+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
49963+ pax_close_kernel();
49964 info->pixmap.scan_align = 4;
49965 info->flags &= ~FBINFO_HWACCEL_DISABLED;
49966 info->flags |= FBINFO_READS_FAST;
49967 NVResetGraphics(info);
49968 } else {
49969- info->fbops->fb_imageblit = cfb_imageblit;
49970- info->fbops->fb_fillrect = cfb_fillrect;
49971- info->fbops->fb_copyarea = cfb_copyarea;
49972- info->fbops->fb_sync = NULL;
49973+ pax_open_kernel();
49974+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
49975+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
49976+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
49977+ *(void **)&info->fbops->fb_sync = NULL;
49978+ pax_close_kernel();
49979 info->pixmap.scan_align = 1;
49980 info->flags |= FBINFO_HWACCEL_DISABLED;
49981 info->flags &= ~FBINFO_READS_FAST;
49982@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
49983 info->pixmap.size = 8 * 1024;
49984 info->pixmap.flags = FB_PIXMAP_SYSTEM;
49985
49986- if (!hwcur)
49987- info->fbops->fb_cursor = NULL;
49988+ if (!hwcur) {
49989+ pax_open_kernel();
49990+ *(void **)&info->fbops->fb_cursor = NULL;
49991+ pax_close_kernel();
49992+ }
49993
49994 info->var.accel_flags = (!noaccel);
49995
49996diff --git a/drivers/video/output.c b/drivers/video/output.c
49997index 0d6f2cd..6285b97 100644
49998--- a/drivers/video/output.c
49999+++ b/drivers/video/output.c
50000@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
50001 new_dev->props = op;
50002 new_dev->dev.class = &video_output_class;
50003 new_dev->dev.parent = dev;
50004- dev_set_name(&new_dev->dev, name);
50005+ dev_set_name(&new_dev->dev, "%s", name);
50006 dev_set_drvdata(&new_dev->dev, devdata);
50007 ret_code = device_register(&new_dev->dev);
50008 if (ret_code) {
50009diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
50010index 05c2dc3..ea1f391 100644
50011--- a/drivers/video/s1d13xxxfb.c
50012+++ b/drivers/video/s1d13xxxfb.c
50013@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
50014
50015 switch(prod_id) {
50016 case S1D13506_PROD_ID: /* activate acceleration */
50017- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
50018- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
50019+ pax_open_kernel();
50020+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
50021+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
50022+ pax_close_kernel();
50023 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
50024 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
50025 break;
50026diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
50027index b2b33fc..f9f4658 100644
50028--- a/drivers/video/smscufx.c
50029+++ b/drivers/video/smscufx.c
50030@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
50031 fb_deferred_io_cleanup(info);
50032 kfree(info->fbdefio);
50033 info->fbdefio = NULL;
50034- info->fbops->fb_mmap = ufx_ops_mmap;
50035+ pax_open_kernel();
50036+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
50037+ pax_close_kernel();
50038 }
50039
50040 pr_debug("released /dev/fb%d user=%d count=%d",
50041diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
50042index ec03e72..f578436 100644
50043--- a/drivers/video/udlfb.c
50044+++ b/drivers/video/udlfb.c
50045@@ -623,11 +623,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
50046 dlfb_urb_completion(urb);
50047
50048 error:
50049- atomic_add(bytes_sent, &dev->bytes_sent);
50050- atomic_add(bytes_identical, &dev->bytes_identical);
50051- atomic_add(width*height*2, &dev->bytes_rendered);
50052+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
50053+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
50054+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
50055 end_cycles = get_cycles();
50056- atomic_add(((unsigned int) ((end_cycles - start_cycles)
50057+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
50058 >> 10)), /* Kcycles */
50059 &dev->cpu_kcycles_used);
50060
50061@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
50062 dlfb_urb_completion(urb);
50063
50064 error:
50065- atomic_add(bytes_sent, &dev->bytes_sent);
50066- atomic_add(bytes_identical, &dev->bytes_identical);
50067- atomic_add(bytes_rendered, &dev->bytes_rendered);
50068+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
50069+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
50070+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
50071 end_cycles = get_cycles();
50072- atomic_add(((unsigned int) ((end_cycles - start_cycles)
50073+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
50074 >> 10)), /* Kcycles */
50075 &dev->cpu_kcycles_used);
50076 }
50077@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
50078 fb_deferred_io_cleanup(info);
50079 kfree(info->fbdefio);
50080 info->fbdefio = NULL;
50081- info->fbops->fb_mmap = dlfb_ops_mmap;
50082+ pax_open_kernel();
50083+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
50084+ pax_close_kernel();
50085 }
50086
50087 pr_warn("released /dev/fb%d user=%d count=%d\n",
50088@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
50089 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50090 struct dlfb_data *dev = fb_info->par;
50091 return snprintf(buf, PAGE_SIZE, "%u\n",
50092- atomic_read(&dev->bytes_rendered));
50093+ atomic_read_unchecked(&dev->bytes_rendered));
50094 }
50095
50096 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
50097@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
50098 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50099 struct dlfb_data *dev = fb_info->par;
50100 return snprintf(buf, PAGE_SIZE, "%u\n",
50101- atomic_read(&dev->bytes_identical));
50102+ atomic_read_unchecked(&dev->bytes_identical));
50103 }
50104
50105 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
50106@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
50107 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50108 struct dlfb_data *dev = fb_info->par;
50109 return snprintf(buf, PAGE_SIZE, "%u\n",
50110- atomic_read(&dev->bytes_sent));
50111+ atomic_read_unchecked(&dev->bytes_sent));
50112 }
50113
50114 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
50115@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
50116 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50117 struct dlfb_data *dev = fb_info->par;
50118 return snprintf(buf, PAGE_SIZE, "%u\n",
50119- atomic_read(&dev->cpu_kcycles_used));
50120+ atomic_read_unchecked(&dev->cpu_kcycles_used));
50121 }
50122
50123 static ssize_t edid_show(
50124@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
50125 struct fb_info *fb_info = dev_get_drvdata(fbdev);
50126 struct dlfb_data *dev = fb_info->par;
50127
50128- atomic_set(&dev->bytes_rendered, 0);
50129- atomic_set(&dev->bytes_identical, 0);
50130- atomic_set(&dev->bytes_sent, 0);
50131- atomic_set(&dev->cpu_kcycles_used, 0);
50132+ atomic_set_unchecked(&dev->bytes_rendered, 0);
50133+ atomic_set_unchecked(&dev->bytes_identical, 0);
50134+ atomic_set_unchecked(&dev->bytes_sent, 0);
50135+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
50136
50137 return count;
50138 }
50139diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
50140index e328a61..1b08ecb 100644
50141--- a/drivers/video/uvesafb.c
50142+++ b/drivers/video/uvesafb.c
50143@@ -19,6 +19,7 @@
50144 #include <linux/io.h>
50145 #include <linux/mutex.h>
50146 #include <linux/slab.h>
50147+#include <linux/moduleloader.h>
50148 #include <video/edid.h>
50149 #include <video/uvesafb.h>
50150 #ifdef CONFIG_X86
50151@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
50152 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
50153 par->pmi_setpal = par->ypan = 0;
50154 } else {
50155+
50156+#ifdef CONFIG_PAX_KERNEXEC
50157+#ifdef CONFIG_MODULES
50158+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
50159+#endif
50160+ if (!par->pmi_code) {
50161+ par->pmi_setpal = par->ypan = 0;
50162+ return 0;
50163+ }
50164+#endif
50165+
50166 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
50167 + task->t.regs.edi);
50168+
50169+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50170+ pax_open_kernel();
50171+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
50172+ pax_close_kernel();
50173+
50174+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
50175+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
50176+#else
50177 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
50178 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
50179+#endif
50180+
50181 printk(KERN_INFO "uvesafb: protected mode interface info at "
50182 "%04x:%04x\n",
50183 (u16)task->t.regs.es, (u16)task->t.regs.edi);
50184@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
50185 par->ypan = ypan;
50186
50187 if (par->pmi_setpal || par->ypan) {
50188+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
50189 if (__supported_pte_mask & _PAGE_NX) {
50190 par->pmi_setpal = par->ypan = 0;
50191 printk(KERN_WARNING "uvesafb: NX protection is actively."
50192 "We have better not to use the PMI.\n");
50193- } else {
50194+ } else
50195+#endif
50196 uvesafb_vbe_getpmi(task, par);
50197- }
50198 }
50199 #else
50200 /* The protected mode interface is not available on non-x86. */
50201@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
50202 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
50203
50204 /* Disable blanking if the user requested so. */
50205- if (!blank)
50206- info->fbops->fb_blank = NULL;
50207+ if (!blank) {
50208+ pax_open_kernel();
50209+ *(void **)&info->fbops->fb_blank = NULL;
50210+ pax_close_kernel();
50211+ }
50212
50213 /*
50214 * Find out how much IO memory is required for the mode with
50215@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
50216 info->flags = FBINFO_FLAG_DEFAULT |
50217 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
50218
50219- if (!par->ypan)
50220- info->fbops->fb_pan_display = NULL;
50221+ if (!par->ypan) {
50222+ pax_open_kernel();
50223+ *(void **)&info->fbops->fb_pan_display = NULL;
50224+ pax_close_kernel();
50225+ }
50226 }
50227
50228 static void uvesafb_init_mtrr(struct fb_info *info)
50229@@ -1836,6 +1866,11 @@ out:
50230 if (par->vbe_modes)
50231 kfree(par->vbe_modes);
50232
50233+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50234+ if (par->pmi_code)
50235+ module_free_exec(NULL, par->pmi_code);
50236+#endif
50237+
50238 framebuffer_release(info);
50239 return err;
50240 }
50241@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
50242 kfree(par->vbe_state_orig);
50243 if (par->vbe_state_saved)
50244 kfree(par->vbe_state_saved);
50245+
50246+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50247+ if (par->pmi_code)
50248+ module_free_exec(NULL, par->pmi_code);
50249+#endif
50250+
50251 }
50252
50253 framebuffer_release(info);
50254diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
50255index 501b340..d80aa17 100644
50256--- a/drivers/video/vesafb.c
50257+++ b/drivers/video/vesafb.c
50258@@ -9,6 +9,7 @@
50259 */
50260
50261 #include <linux/module.h>
50262+#include <linux/moduleloader.h>
50263 #include <linux/kernel.h>
50264 #include <linux/errno.h>
50265 #include <linux/string.h>
50266@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
50267 static int vram_total __initdata; /* Set total amount of memory */
50268 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
50269 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
50270-static void (*pmi_start)(void) __read_mostly;
50271-static void (*pmi_pal) (void) __read_mostly;
50272+static void (*pmi_start)(void) __read_only;
50273+static void (*pmi_pal) (void) __read_only;
50274 static int depth __read_mostly;
50275 static int vga_compat __read_mostly;
50276 /* --------------------------------------------------------------------- */
50277@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
50278 unsigned int size_vmode;
50279 unsigned int size_remap;
50280 unsigned int size_total;
50281+ void *pmi_code = NULL;
50282
50283 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
50284 return -ENODEV;
50285@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
50286 size_remap = size_total;
50287 vesafb_fix.smem_len = size_remap;
50288
50289-#ifndef __i386__
50290- screen_info.vesapm_seg = 0;
50291-#endif
50292-
50293 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
50294 printk(KERN_WARNING
50295 "vesafb: cannot reserve video memory at 0x%lx\n",
50296@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
50297 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
50298 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
50299
50300+#ifdef __i386__
50301+
50302+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50303+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
50304+ if (!pmi_code)
50305+#elif !defined(CONFIG_PAX_KERNEXEC)
50306+ if (0)
50307+#endif
50308+
50309+#endif
50310+ screen_info.vesapm_seg = 0;
50311+
50312 if (screen_info.vesapm_seg) {
50313- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
50314- screen_info.vesapm_seg,screen_info.vesapm_off);
50315+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
50316+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
50317 }
50318
50319 if (screen_info.vesapm_seg < 0xc000)
50320@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
50321
50322 if (ypan || pmi_setpal) {
50323 unsigned short *pmi_base;
50324+
50325 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
50326- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
50327- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
50328+
50329+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50330+ pax_open_kernel();
50331+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
50332+#else
50333+ pmi_code = pmi_base;
50334+#endif
50335+
50336+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
50337+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
50338+
50339+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50340+ pmi_start = ktva_ktla(pmi_start);
50341+ pmi_pal = ktva_ktla(pmi_pal);
50342+ pax_close_kernel();
50343+#endif
50344+
50345 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
50346 if (pmi_base[3]) {
50347 printk(KERN_INFO "vesafb: pmi: ports = ");
50348@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
50349 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
50350 (ypan ? FBINFO_HWACCEL_YPAN : 0);
50351
50352- if (!ypan)
50353- info->fbops->fb_pan_display = NULL;
50354+ if (!ypan) {
50355+ pax_open_kernel();
50356+ *(void **)&info->fbops->fb_pan_display = NULL;
50357+ pax_close_kernel();
50358+ }
50359
50360 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
50361 err = -ENOMEM;
50362@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
50363 info->node, info->fix.id);
50364 return 0;
50365 err:
50366+
50367+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
50368+ module_free_exec(NULL, pmi_code);
50369+#endif
50370+
50371 if (info->screen_base)
50372 iounmap(info->screen_base);
50373 framebuffer_release(info);
50374diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
50375index 88714ae..16c2e11 100644
50376--- a/drivers/video/via/via_clock.h
50377+++ b/drivers/video/via/via_clock.h
50378@@ -56,7 +56,7 @@ struct via_clock {
50379
50380 void (*set_engine_pll_state)(u8 state);
50381 void (*set_engine_pll)(struct via_pll_config config);
50382-};
50383+} __no_const;
50384
50385
50386 static inline u32 get_pll_internal_frequency(u32 ref_freq,
50387diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
50388index fef20db..d28b1ab 100644
50389--- a/drivers/xen/xenfs/xenstored.c
50390+++ b/drivers/xen/xenfs/xenstored.c
50391@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
50392 static int xsd_kva_open(struct inode *inode, struct file *file)
50393 {
50394 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
50395+#ifdef CONFIG_GRKERNSEC_HIDESYM
50396+ NULL);
50397+#else
50398 xen_store_interface);
50399+#endif
50400+
50401 if (!file->private_data)
50402 return -ENOMEM;
50403 return 0;
50404diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
50405index 055562c..fdfb10d 100644
50406--- a/fs/9p/vfs_addr.c
50407+++ b/fs/9p/vfs_addr.c
50408@@ -186,7 +186,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
50409
50410 retval = v9fs_file_write_internal(inode,
50411 v9inode->writeback_fid,
50412- (__force const char __user *)buffer,
50413+ (const char __force_user *)buffer,
50414 len, &offset, 0);
50415 if (retval > 0)
50416 retval = 0;
50417diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
50418index d86edc8..40ff2fb 100644
50419--- a/fs/9p/vfs_inode.c
50420+++ b/fs/9p/vfs_inode.c
50421@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
50422 void
50423 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
50424 {
50425- char *s = nd_get_link(nd);
50426+ const char *s = nd_get_link(nd);
50427
50428 p9_debug(P9_DEBUG_VFS, " %s %s\n",
50429 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
50430diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
50431index 370b24c..ff0be7b 100644
50432--- a/fs/Kconfig.binfmt
50433+++ b/fs/Kconfig.binfmt
50434@@ -103,7 +103,7 @@ config HAVE_AOUT
50435
50436 config BINFMT_AOUT
50437 tristate "Kernel support for a.out and ECOFF binaries"
50438- depends on HAVE_AOUT
50439+ depends on HAVE_AOUT && BROKEN
50440 ---help---
50441 A.out (Assembler.OUTput) is a set of formats for libraries and
50442 executables used in the earliest versions of UNIX. Linux used
50443diff --git a/fs/aio.c b/fs/aio.c
50444index 2bbcacf..8614116 100644
50445--- a/fs/aio.c
50446+++ b/fs/aio.c
50447@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
50448 size += sizeof(struct io_event) * nr_events;
50449 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
50450
50451- if (nr_pages < 0)
50452+ if (nr_pages <= 0)
50453 return -EINVAL;
50454
50455 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
50456@@ -950,6 +950,7 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
50457 static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
50458 {
50459 ssize_t ret;
50460+ struct iovec iovstack;
50461
50462 kiocb->ki_nr_segs = kiocb->ki_nbytes;
50463
50464@@ -957,17 +958,22 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
50465 if (compat)
50466 ret = compat_rw_copy_check_uvector(rw,
50467 (struct compat_iovec __user *)kiocb->ki_buf,
50468- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
50469+ kiocb->ki_nr_segs, 1, &iovstack,
50470 &kiocb->ki_iovec);
50471 else
50472 #endif
50473 ret = rw_copy_check_uvector(rw,
50474 (struct iovec __user *)kiocb->ki_buf,
50475- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
50476+ kiocb->ki_nr_segs, 1, &iovstack,
50477 &kiocb->ki_iovec);
50478 if (ret < 0)
50479 return ret;
50480
50481+ if (kiocb->ki_iovec == &iovstack) {
50482+ kiocb->ki_inline_vec = iovstack;
50483+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
50484+ }
50485+
50486 /* ki_nbytes now reflect bytes instead of segs */
50487 kiocb->ki_nbytes = ret;
50488 return 0;
50489diff --git a/fs/attr.c b/fs/attr.c
50490index 1449adb..a2038c2 100644
50491--- a/fs/attr.c
50492+++ b/fs/attr.c
50493@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
50494 unsigned long limit;
50495
50496 limit = rlimit(RLIMIT_FSIZE);
50497+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
50498 if (limit != RLIM_INFINITY && offset > limit)
50499 goto out_sig;
50500 if (offset > inode->i_sb->s_maxbytes)
50501diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
50502index 3db70da..7aeec5b 100644
50503--- a/fs/autofs4/waitq.c
50504+++ b/fs/autofs4/waitq.c
50505@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
50506 {
50507 unsigned long sigpipe, flags;
50508 mm_segment_t fs;
50509- const char *data = (const char *)addr;
50510+ const char __user *data = (const char __force_user *)addr;
50511 ssize_t wr = 0;
50512
50513 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
50514@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
50515 return 1;
50516 }
50517
50518+#ifdef CONFIG_GRKERNSEC_HIDESYM
50519+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
50520+#endif
50521+
50522 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
50523 enum autofs_notify notify)
50524 {
50525@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
50526
50527 /* If this is a direct mount request create a dummy name */
50528 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
50529+#ifdef CONFIG_GRKERNSEC_HIDESYM
50530+ /* this name does get written to userland via autofs4_write() */
50531+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
50532+#else
50533 qstr.len = sprintf(name, "%p", dentry);
50534+#endif
50535 else {
50536 qstr.len = autofs4_getpath(sbi, dentry, &name);
50537 if (!qstr.len) {
50538diff --git a/fs/befs/endian.h b/fs/befs/endian.h
50539index 2722387..c8dd2a7 100644
50540--- a/fs/befs/endian.h
50541+++ b/fs/befs/endian.h
50542@@ -11,7 +11,7 @@
50543
50544 #include <asm/byteorder.h>
50545
50546-static inline u64
50547+static inline u64 __intentional_overflow(-1)
50548 fs64_to_cpu(const struct super_block *sb, fs64 n)
50549 {
50550 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
50551@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
50552 return (__force fs64)cpu_to_be64(n);
50553 }
50554
50555-static inline u32
50556+static inline u32 __intentional_overflow(-1)
50557 fs32_to_cpu(const struct super_block *sb, fs32 n)
50558 {
50559 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
50560diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
50561index f95dddc..b1e2c1c 100644
50562--- a/fs/befs/linuxvfs.c
50563+++ b/fs/befs/linuxvfs.c
50564@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
50565 {
50566 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
50567 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
50568- char *link = nd_get_link(nd);
50569+ const char *link = nd_get_link(nd);
50570 if (!IS_ERR(link))
50571 kfree(link);
50572 }
50573diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
50574index bce8769..7fc7544 100644
50575--- a/fs/binfmt_aout.c
50576+++ b/fs/binfmt_aout.c
50577@@ -16,6 +16,7 @@
50578 #include <linux/string.h>
50579 #include <linux/fs.h>
50580 #include <linux/file.h>
50581+#include <linux/security.h>
50582 #include <linux/stat.h>
50583 #include <linux/fcntl.h>
50584 #include <linux/ptrace.h>
50585@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
50586 #endif
50587 # define START_STACK(u) ((void __user *)u.start_stack)
50588
50589+ memset(&dump, 0, sizeof(dump));
50590+
50591 fs = get_fs();
50592 set_fs(KERNEL_DS);
50593 has_dumped = 1;
50594@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
50595
50596 /* If the size of the dump file exceeds the rlimit, then see what would happen
50597 if we wrote the stack, but not the data area. */
50598+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
50599 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
50600 dump.u_dsize = 0;
50601
50602 /* Make sure we have enough room to write the stack and data areas. */
50603+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
50604 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
50605 dump.u_ssize = 0;
50606
50607@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
50608 rlim = rlimit(RLIMIT_DATA);
50609 if (rlim >= RLIM_INFINITY)
50610 rlim = ~0;
50611+
50612+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
50613 if (ex.a_data + ex.a_bss > rlim)
50614 return -ENOMEM;
50615
50616@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
50617
50618 install_exec_creds(bprm);
50619
50620+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50621+ current->mm->pax_flags = 0UL;
50622+#endif
50623+
50624+#ifdef CONFIG_PAX_PAGEEXEC
50625+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
50626+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
50627+
50628+#ifdef CONFIG_PAX_EMUTRAMP
50629+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
50630+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
50631+#endif
50632+
50633+#ifdef CONFIG_PAX_MPROTECT
50634+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
50635+ current->mm->pax_flags |= MF_PAX_MPROTECT;
50636+#endif
50637+
50638+ }
50639+#endif
50640+
50641 if (N_MAGIC(ex) == OMAGIC) {
50642 unsigned long text_addr, map_size;
50643 loff_t pos;
50644@@ -324,7 +352,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
50645 }
50646
50647 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
50648- PROT_READ | PROT_WRITE | PROT_EXEC,
50649+ PROT_READ | PROT_WRITE,
50650 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
50651 fd_offset + ex.a_text);
50652 if (error != N_DATADDR(ex)) {
50653diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
50654index f8a0b0e..6f036ed 100644
50655--- a/fs/binfmt_elf.c
50656+++ b/fs/binfmt_elf.c
50657@@ -34,6 +34,7 @@
50658 #include <linux/utsname.h>
50659 #include <linux/coredump.h>
50660 #include <linux/sched.h>
50661+#include <linux/xattr.h>
50662 #include <asm/uaccess.h>
50663 #include <asm/param.h>
50664 #include <asm/page.h>
50665@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
50666 #define elf_core_dump NULL
50667 #endif
50668
50669+#ifdef CONFIG_PAX_MPROTECT
50670+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
50671+#endif
50672+
50673+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50674+static void elf_handle_mmap(struct file *file);
50675+#endif
50676+
50677 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
50678 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
50679 #else
50680@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
50681 .load_binary = load_elf_binary,
50682 .load_shlib = load_elf_library,
50683 .core_dump = elf_core_dump,
50684+
50685+#ifdef CONFIG_PAX_MPROTECT
50686+ .handle_mprotect= elf_handle_mprotect,
50687+#endif
50688+
50689+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50690+ .handle_mmap = elf_handle_mmap,
50691+#endif
50692+
50693 .min_coredump = ELF_EXEC_PAGESIZE,
50694 };
50695
50696@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
50697
50698 static int set_brk(unsigned long start, unsigned long end)
50699 {
50700+ unsigned long e = end;
50701+
50702 start = ELF_PAGEALIGN(start);
50703 end = ELF_PAGEALIGN(end);
50704 if (end > start) {
50705@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
50706 if (BAD_ADDR(addr))
50707 return addr;
50708 }
50709- current->mm->start_brk = current->mm->brk = end;
50710+ current->mm->start_brk = current->mm->brk = e;
50711 return 0;
50712 }
50713
50714@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
50715 elf_addr_t __user *u_rand_bytes;
50716 const char *k_platform = ELF_PLATFORM;
50717 const char *k_base_platform = ELF_BASE_PLATFORM;
50718- unsigned char k_rand_bytes[16];
50719+ u32 k_rand_bytes[4];
50720 int items;
50721 elf_addr_t *elf_info;
50722 int ei_index = 0;
50723 const struct cred *cred = current_cred();
50724 struct vm_area_struct *vma;
50725+ unsigned long saved_auxv[AT_VECTOR_SIZE];
50726
50727 /*
50728 * In some cases (e.g. Hyper-Threading), we want to avoid L1
50729@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
50730 * Generate 16 random bytes for userspace PRNG seeding.
50731 */
50732 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
50733- u_rand_bytes = (elf_addr_t __user *)
50734- STACK_ALLOC(p, sizeof(k_rand_bytes));
50735+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
50736+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
50737+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
50738+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
50739+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
50740+ u_rand_bytes = (elf_addr_t __user *) p;
50741 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
50742 return -EFAULT;
50743
50744@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
50745 return -EFAULT;
50746 current->mm->env_end = p;
50747
50748+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
50749+
50750 /* Put the elf_info on the stack in the right place. */
50751 sp = (elf_addr_t __user *)envp + 1;
50752- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
50753+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
50754 return -EFAULT;
50755 return 0;
50756 }
50757@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
50758 an ELF header */
50759
50760 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
50761- struct file *interpreter, unsigned long *interp_map_addr,
50762- unsigned long no_base)
50763+ struct file *interpreter, unsigned long no_base)
50764 {
50765 struct elf_phdr *elf_phdata;
50766 struct elf_phdr *eppnt;
50767- unsigned long load_addr = 0;
50768+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
50769 int load_addr_set = 0;
50770 unsigned long last_bss = 0, elf_bss = 0;
50771- unsigned long error = ~0UL;
50772+ unsigned long error = -EINVAL;
50773 unsigned long total_size;
50774 int retval, i, size;
50775
50776@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
50777 goto out_close;
50778 }
50779
50780+#ifdef CONFIG_PAX_SEGMEXEC
50781+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
50782+ pax_task_size = SEGMEXEC_TASK_SIZE;
50783+#endif
50784+
50785 eppnt = elf_phdata;
50786 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
50787 if (eppnt->p_type == PT_LOAD) {
50788@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
50789 map_addr = elf_map(interpreter, load_addr + vaddr,
50790 eppnt, elf_prot, elf_type, total_size);
50791 total_size = 0;
50792- if (!*interp_map_addr)
50793- *interp_map_addr = map_addr;
50794 error = map_addr;
50795 if (BAD_ADDR(map_addr))
50796 goto out_close;
50797@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
50798 k = load_addr + eppnt->p_vaddr;
50799 if (BAD_ADDR(k) ||
50800 eppnt->p_filesz > eppnt->p_memsz ||
50801- eppnt->p_memsz > TASK_SIZE ||
50802- TASK_SIZE - eppnt->p_memsz < k) {
50803+ eppnt->p_memsz > pax_task_size ||
50804+ pax_task_size - eppnt->p_memsz < k) {
50805 error = -ENOMEM;
50806 goto out_close;
50807 }
50808@@ -538,6 +567,315 @@ out:
50809 return error;
50810 }
50811
50812+#ifdef CONFIG_PAX_PT_PAX_FLAGS
50813+#ifdef CONFIG_PAX_SOFTMODE
50814+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
50815+{
50816+ unsigned long pax_flags = 0UL;
50817+
50818+#ifdef CONFIG_PAX_PAGEEXEC
50819+ if (elf_phdata->p_flags & PF_PAGEEXEC)
50820+ pax_flags |= MF_PAX_PAGEEXEC;
50821+#endif
50822+
50823+#ifdef CONFIG_PAX_SEGMEXEC
50824+ if (elf_phdata->p_flags & PF_SEGMEXEC)
50825+ pax_flags |= MF_PAX_SEGMEXEC;
50826+#endif
50827+
50828+#ifdef CONFIG_PAX_EMUTRAMP
50829+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
50830+ pax_flags |= MF_PAX_EMUTRAMP;
50831+#endif
50832+
50833+#ifdef CONFIG_PAX_MPROTECT
50834+ if (elf_phdata->p_flags & PF_MPROTECT)
50835+ pax_flags |= MF_PAX_MPROTECT;
50836+#endif
50837+
50838+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
50839+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
50840+ pax_flags |= MF_PAX_RANDMMAP;
50841+#endif
50842+
50843+ return pax_flags;
50844+}
50845+#endif
50846+
50847+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
50848+{
50849+ unsigned long pax_flags = 0UL;
50850+
50851+#ifdef CONFIG_PAX_PAGEEXEC
50852+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
50853+ pax_flags |= MF_PAX_PAGEEXEC;
50854+#endif
50855+
50856+#ifdef CONFIG_PAX_SEGMEXEC
50857+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
50858+ pax_flags |= MF_PAX_SEGMEXEC;
50859+#endif
50860+
50861+#ifdef CONFIG_PAX_EMUTRAMP
50862+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
50863+ pax_flags |= MF_PAX_EMUTRAMP;
50864+#endif
50865+
50866+#ifdef CONFIG_PAX_MPROTECT
50867+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
50868+ pax_flags |= MF_PAX_MPROTECT;
50869+#endif
50870+
50871+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
50872+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
50873+ pax_flags |= MF_PAX_RANDMMAP;
50874+#endif
50875+
50876+ return pax_flags;
50877+}
50878+#endif
50879+
50880+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
50881+#ifdef CONFIG_PAX_SOFTMODE
50882+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
50883+{
50884+ unsigned long pax_flags = 0UL;
50885+
50886+#ifdef CONFIG_PAX_PAGEEXEC
50887+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
50888+ pax_flags |= MF_PAX_PAGEEXEC;
50889+#endif
50890+
50891+#ifdef CONFIG_PAX_SEGMEXEC
50892+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
50893+ pax_flags |= MF_PAX_SEGMEXEC;
50894+#endif
50895+
50896+#ifdef CONFIG_PAX_EMUTRAMP
50897+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
50898+ pax_flags |= MF_PAX_EMUTRAMP;
50899+#endif
50900+
50901+#ifdef CONFIG_PAX_MPROTECT
50902+ if (pax_flags_softmode & MF_PAX_MPROTECT)
50903+ pax_flags |= MF_PAX_MPROTECT;
50904+#endif
50905+
50906+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
50907+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
50908+ pax_flags |= MF_PAX_RANDMMAP;
50909+#endif
50910+
50911+ return pax_flags;
50912+}
50913+#endif
50914+
50915+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
50916+{
50917+ unsigned long pax_flags = 0UL;
50918+
50919+#ifdef CONFIG_PAX_PAGEEXEC
50920+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
50921+ pax_flags |= MF_PAX_PAGEEXEC;
50922+#endif
50923+
50924+#ifdef CONFIG_PAX_SEGMEXEC
50925+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
50926+ pax_flags |= MF_PAX_SEGMEXEC;
50927+#endif
50928+
50929+#ifdef CONFIG_PAX_EMUTRAMP
50930+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
50931+ pax_flags |= MF_PAX_EMUTRAMP;
50932+#endif
50933+
50934+#ifdef CONFIG_PAX_MPROTECT
50935+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
50936+ pax_flags |= MF_PAX_MPROTECT;
50937+#endif
50938+
50939+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
50940+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
50941+ pax_flags |= MF_PAX_RANDMMAP;
50942+#endif
50943+
50944+ return pax_flags;
50945+}
50946+#endif
50947+
50948+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
50949+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
50950+{
50951+ unsigned long pax_flags = 0UL;
50952+
50953+#ifdef CONFIG_PAX_EI_PAX
50954+
50955+#ifdef CONFIG_PAX_PAGEEXEC
50956+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
50957+ pax_flags |= MF_PAX_PAGEEXEC;
50958+#endif
50959+
50960+#ifdef CONFIG_PAX_SEGMEXEC
50961+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
50962+ pax_flags |= MF_PAX_SEGMEXEC;
50963+#endif
50964+
50965+#ifdef CONFIG_PAX_EMUTRAMP
50966+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
50967+ pax_flags |= MF_PAX_EMUTRAMP;
50968+#endif
50969+
50970+#ifdef CONFIG_PAX_MPROTECT
50971+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
50972+ pax_flags |= MF_PAX_MPROTECT;
50973+#endif
50974+
50975+#ifdef CONFIG_PAX_ASLR
50976+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
50977+ pax_flags |= MF_PAX_RANDMMAP;
50978+#endif
50979+
50980+#else
50981+
50982+#ifdef CONFIG_PAX_PAGEEXEC
50983+ pax_flags |= MF_PAX_PAGEEXEC;
50984+#endif
50985+
50986+#ifdef CONFIG_PAX_SEGMEXEC
50987+ pax_flags |= MF_PAX_SEGMEXEC;
50988+#endif
50989+
50990+#ifdef CONFIG_PAX_MPROTECT
50991+ pax_flags |= MF_PAX_MPROTECT;
50992+#endif
50993+
50994+#ifdef CONFIG_PAX_RANDMMAP
50995+ if (randomize_va_space)
50996+ pax_flags |= MF_PAX_RANDMMAP;
50997+#endif
50998+
50999+#endif
51000+
51001+ return pax_flags;
51002+}
51003+
51004+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
51005+{
51006+
51007+#ifdef CONFIG_PAX_PT_PAX_FLAGS
51008+ unsigned long i;
51009+
51010+ for (i = 0UL; i < elf_ex->e_phnum; i++)
51011+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
51012+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
51013+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
51014+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
51015+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
51016+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
51017+ return ~0UL;
51018+
51019+#ifdef CONFIG_PAX_SOFTMODE
51020+ if (pax_softmode)
51021+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
51022+ else
51023+#endif
51024+
51025+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
51026+ break;
51027+ }
51028+#endif
51029+
51030+ return ~0UL;
51031+}
51032+
51033+static unsigned long pax_parse_xattr_pax(struct file * const file)
51034+{
51035+
51036+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
51037+ ssize_t xattr_size, i;
51038+ unsigned char xattr_value[sizeof("pemrs") - 1];
51039+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
51040+
51041+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
51042+ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
51043+ return ~0UL;
51044+
51045+ for (i = 0; i < xattr_size; i++)
51046+ switch (xattr_value[i]) {
51047+ default:
51048+ return ~0UL;
51049+
51050+#define parse_flag(option1, option2, flag) \
51051+ case option1: \
51052+ if (pax_flags_hardmode & MF_PAX_##flag) \
51053+ return ~0UL; \
51054+ pax_flags_hardmode |= MF_PAX_##flag; \
51055+ break; \
51056+ case option2: \
51057+ if (pax_flags_softmode & MF_PAX_##flag) \
51058+ return ~0UL; \
51059+ pax_flags_softmode |= MF_PAX_##flag; \
51060+ break;
51061+
51062+ parse_flag('p', 'P', PAGEEXEC);
51063+ parse_flag('e', 'E', EMUTRAMP);
51064+ parse_flag('m', 'M', MPROTECT);
51065+ parse_flag('r', 'R', RANDMMAP);
51066+ parse_flag('s', 'S', SEGMEXEC);
51067+
51068+#undef parse_flag
51069+ }
51070+
51071+ if (pax_flags_hardmode & pax_flags_softmode)
51072+ return ~0UL;
51073+
51074+#ifdef CONFIG_PAX_SOFTMODE
51075+ if (pax_softmode)
51076+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
51077+ else
51078+#endif
51079+
51080+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
51081+#else
51082+ return ~0UL;
51083+#endif
51084+
51085+}
51086+
51087+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
51088+{
51089+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
51090+
51091+ pax_flags = pax_parse_ei_pax(elf_ex);
51092+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
51093+ xattr_pax_flags = pax_parse_xattr_pax(file);
51094+
51095+ if (pt_pax_flags == ~0UL)
51096+ pt_pax_flags = xattr_pax_flags;
51097+ else if (xattr_pax_flags == ~0UL)
51098+ xattr_pax_flags = pt_pax_flags;
51099+ if (pt_pax_flags != xattr_pax_flags)
51100+ return -EINVAL;
51101+ if (pt_pax_flags != ~0UL)
51102+ pax_flags = pt_pax_flags;
51103+
51104+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
51105+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
51106+ if ((__supported_pte_mask & _PAGE_NX))
51107+ pax_flags &= ~MF_PAX_SEGMEXEC;
51108+ else
51109+ pax_flags &= ~MF_PAX_PAGEEXEC;
51110+ }
51111+#endif
51112+
51113+ if (0 > pax_check_flags(&pax_flags))
51114+ return -EINVAL;
51115+
51116+ current->mm->pax_flags = pax_flags;
51117+ return 0;
51118+}
51119+#endif
51120+
51121 /*
51122 * These are the functions used to load ELF style executables and shared
51123 * libraries. There is no binary dependent code anywhere else.
51124@@ -554,6 +892,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
51125 {
51126 unsigned int random_variable = 0;
51127
51128+#ifdef CONFIG_PAX_RANDUSTACK
51129+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
51130+ return stack_top - current->mm->delta_stack;
51131+#endif
51132+
51133 if ((current->flags & PF_RANDOMIZE) &&
51134 !(current->personality & ADDR_NO_RANDOMIZE)) {
51135 random_variable = get_random_int() & STACK_RND_MASK;
51136@@ -572,7 +915,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
51137 unsigned long load_addr = 0, load_bias = 0;
51138 int load_addr_set = 0;
51139 char * elf_interpreter = NULL;
51140- unsigned long error;
51141+ unsigned long error = 0;
51142 struct elf_phdr *elf_ppnt, *elf_phdata;
51143 unsigned long elf_bss, elf_brk;
51144 int retval, i;
51145@@ -582,12 +925,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
51146 unsigned long start_code, end_code, start_data, end_data;
51147 unsigned long reloc_func_desc __maybe_unused = 0;
51148 int executable_stack = EXSTACK_DEFAULT;
51149- unsigned long def_flags = 0;
51150 struct pt_regs *regs = current_pt_regs();
51151 struct {
51152 struct elfhdr elf_ex;
51153 struct elfhdr interp_elf_ex;
51154 } *loc;
51155+ unsigned long pax_task_size = TASK_SIZE;
51156
51157 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
51158 if (!loc) {
51159@@ -723,11 +1066,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
51160 goto out_free_dentry;
51161
51162 /* OK, This is the point of no return */
51163- current->mm->def_flags = def_flags;
51164+
51165+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
51166+ current->mm->pax_flags = 0UL;
51167+#endif
51168+
51169+#ifdef CONFIG_PAX_DLRESOLVE
51170+ current->mm->call_dl_resolve = 0UL;
51171+#endif
51172+
51173+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
51174+ current->mm->call_syscall = 0UL;
51175+#endif
51176+
51177+#ifdef CONFIG_PAX_ASLR
51178+ current->mm->delta_mmap = 0UL;
51179+ current->mm->delta_stack = 0UL;
51180+#endif
51181+
51182+ current->mm->def_flags = 0;
51183+
51184+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
51185+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
51186+ send_sig(SIGKILL, current, 0);
51187+ goto out_free_dentry;
51188+ }
51189+#endif
51190+
51191+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
51192+ pax_set_initial_flags(bprm);
51193+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
51194+ if (pax_set_initial_flags_func)
51195+ (pax_set_initial_flags_func)(bprm);
51196+#endif
51197+
51198+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
51199+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
51200+ current->mm->context.user_cs_limit = PAGE_SIZE;
51201+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
51202+ }
51203+#endif
51204+
51205+#ifdef CONFIG_PAX_SEGMEXEC
51206+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
51207+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
51208+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
51209+ pax_task_size = SEGMEXEC_TASK_SIZE;
51210+ current->mm->def_flags |= VM_NOHUGEPAGE;
51211+ }
51212+#endif
51213+
51214+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
51215+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
51216+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
51217+ put_cpu();
51218+ }
51219+#endif
51220
51221 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
51222 may depend on the personality. */
51223 SET_PERSONALITY(loc->elf_ex);
51224+
51225+#ifdef CONFIG_PAX_ASLR
51226+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
51227+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
51228+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
51229+ }
51230+#endif
51231+
51232+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51233+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
51234+ executable_stack = EXSTACK_DISABLE_X;
51235+ current->personality &= ~READ_IMPLIES_EXEC;
51236+ } else
51237+#endif
51238+
51239 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
51240 current->personality |= READ_IMPLIES_EXEC;
51241
51242@@ -819,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
51243 #else
51244 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
51245 #endif
51246+
51247+#ifdef CONFIG_PAX_RANDMMAP
51248+ /* PaX: randomize base address at the default exe base if requested */
51249+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
51250+#ifdef CONFIG_SPARC64
51251+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
51252+#else
51253+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
51254+#endif
51255+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
51256+ elf_flags |= MAP_FIXED;
51257+ }
51258+#endif
51259+
51260 }
51261
51262 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
51263@@ -851,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
51264 * allowed task size. Note that p_filesz must always be
51265 * <= p_memsz so it is only necessary to check p_memsz.
51266 */
51267- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
51268- elf_ppnt->p_memsz > TASK_SIZE ||
51269- TASK_SIZE - elf_ppnt->p_memsz < k) {
51270+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
51271+ elf_ppnt->p_memsz > pax_task_size ||
51272+ pax_task_size - elf_ppnt->p_memsz < k) {
51273 /* set_brk can never work. Avoid overflows. */
51274 send_sig(SIGKILL, current, 0);
51275 retval = -EINVAL;
51276@@ -892,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
51277 goto out_free_dentry;
51278 }
51279 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
51280- send_sig(SIGSEGV, current, 0);
51281- retval = -EFAULT; /* Nobody gets to see this, but.. */
51282- goto out_free_dentry;
51283+ /*
51284+ * This bss-zeroing can fail if the ELF
51285+ * file specifies odd protections. So
51286+ * we don't check the return value
51287+ */
51288 }
51289
51290+#ifdef CONFIG_PAX_RANDMMAP
51291+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
51292+ unsigned long start, size, flags;
51293+ vm_flags_t vm_flags;
51294+
51295+ start = ELF_PAGEALIGN(elf_brk);
51296+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
51297+ flags = MAP_FIXED | MAP_PRIVATE;
51298+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
51299+
51300+ down_write(&current->mm->mmap_sem);
51301+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
51302+ retval = -ENOMEM;
51303+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
51304+// if (current->personality & ADDR_NO_RANDOMIZE)
51305+// vm_flags |= VM_READ | VM_MAYREAD;
51306+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
51307+ retval = IS_ERR_VALUE(start) ? start : 0;
51308+ }
51309+ up_write(&current->mm->mmap_sem);
51310+ if (retval == 0)
51311+ retval = set_brk(start + size, start + size + PAGE_SIZE);
51312+ if (retval < 0) {
51313+ send_sig(SIGKILL, current, 0);
51314+ goto out_free_dentry;
51315+ }
51316+ }
51317+#endif
51318+
51319 if (elf_interpreter) {
51320- unsigned long interp_map_addr = 0;
51321-
51322 elf_entry = load_elf_interp(&loc->interp_elf_ex,
51323 interpreter,
51324- &interp_map_addr,
51325 load_bias);
51326 if (!IS_ERR((void *)elf_entry)) {
51327 /*
51328@@ -1124,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
51329 * Decide what to dump of a segment, part, all or none.
51330 */
51331 static unsigned long vma_dump_size(struct vm_area_struct *vma,
51332- unsigned long mm_flags)
51333+ unsigned long mm_flags, long signr)
51334 {
51335 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
51336
51337@@ -1162,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
51338 if (vma->vm_file == NULL)
51339 return 0;
51340
51341- if (FILTER(MAPPED_PRIVATE))
51342+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
51343 goto whole;
51344
51345 /*
51346@@ -1387,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
51347 {
51348 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
51349 int i = 0;
51350- do
51351+ do {
51352 i += 2;
51353- while (auxv[i - 2] != AT_NULL);
51354+ } while (auxv[i - 2] != AT_NULL);
51355 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
51356 }
51357
51358@@ -1398,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
51359 {
51360 mm_segment_t old_fs = get_fs();
51361 set_fs(KERNEL_DS);
51362- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
51363+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
51364 set_fs(old_fs);
51365 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
51366 }
51367@@ -2019,14 +2474,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
51368 }
51369
51370 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
51371- unsigned long mm_flags)
51372+ struct coredump_params *cprm)
51373 {
51374 struct vm_area_struct *vma;
51375 size_t size = 0;
51376
51377 for (vma = first_vma(current, gate_vma); vma != NULL;
51378 vma = next_vma(vma, gate_vma))
51379- size += vma_dump_size(vma, mm_flags);
51380+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
51381 return size;
51382 }
51383
51384@@ -2119,7 +2574,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51385
51386 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
51387
51388- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
51389+ offset += elf_core_vma_data_size(gate_vma, cprm);
51390 offset += elf_core_extra_data_size();
51391 e_shoff = offset;
51392
51393@@ -2133,10 +2588,12 @@ static int elf_core_dump(struct coredump_params *cprm)
51394 offset = dataoff;
51395
51396 size += sizeof(*elf);
51397+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
51398 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
51399 goto end_coredump;
51400
51401 size += sizeof(*phdr4note);
51402+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
51403 if (size > cprm->limit
51404 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
51405 goto end_coredump;
51406@@ -2150,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51407 phdr.p_offset = offset;
51408 phdr.p_vaddr = vma->vm_start;
51409 phdr.p_paddr = 0;
51410- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
51411+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
51412 phdr.p_memsz = vma->vm_end - vma->vm_start;
51413 offset += phdr.p_filesz;
51414 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
51415@@ -2161,6 +2618,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51416 phdr.p_align = ELF_EXEC_PAGESIZE;
51417
51418 size += sizeof(phdr);
51419+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
51420 if (size > cprm->limit
51421 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
51422 goto end_coredump;
51423@@ -2185,7 +2643,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51424 unsigned long addr;
51425 unsigned long end;
51426
51427- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
51428+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
51429
51430 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
51431 struct page *page;
51432@@ -2194,6 +2652,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51433 page = get_dump_page(addr);
51434 if (page) {
51435 void *kaddr = kmap(page);
51436+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
51437 stop = ((size += PAGE_SIZE) > cprm->limit) ||
51438 !dump_write(cprm->file, kaddr,
51439 PAGE_SIZE);
51440@@ -2211,6 +2670,7 @@ static int elf_core_dump(struct coredump_params *cprm)
51441
51442 if (e_phnum == PN_XNUM) {
51443 size += sizeof(*shdr4extnum);
51444+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
51445 if (size > cprm->limit
51446 || !dump_write(cprm->file, shdr4extnum,
51447 sizeof(*shdr4extnum)))
51448@@ -2231,6 +2691,167 @@ out:
51449
51450 #endif /* CONFIG_ELF_CORE */
51451
51452+#ifdef CONFIG_PAX_MPROTECT
51453+/* PaX: non-PIC ELF libraries need relocations on their executable segments
51454+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
51455+ * we'll remove VM_MAYWRITE for good on RELRO segments.
51456+ *
51457+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
51458+ * basis because we want to allow the common case and not the special ones.
51459+ */
51460+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
51461+{
51462+ struct elfhdr elf_h;
51463+ struct elf_phdr elf_p;
51464+ unsigned long i;
51465+ unsigned long oldflags;
51466+ bool is_textrel_rw, is_textrel_rx, is_relro;
51467+
51468+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
51469+ return;
51470+
51471+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
51472+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
51473+
51474+#ifdef CONFIG_PAX_ELFRELOCS
51475+ /* possible TEXTREL */
51476+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
51477+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
51478+#else
51479+ is_textrel_rw = false;
51480+ is_textrel_rx = false;
51481+#endif
51482+
51483+ /* possible RELRO */
51484+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
51485+
51486+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
51487+ return;
51488+
51489+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
51490+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
51491+
51492+#ifdef CONFIG_PAX_ETEXECRELOCS
51493+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
51494+#else
51495+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
51496+#endif
51497+
51498+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
51499+ !elf_check_arch(&elf_h) ||
51500+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
51501+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
51502+ return;
51503+
51504+ for (i = 0UL; i < elf_h.e_phnum; i++) {
51505+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
51506+ return;
51507+ switch (elf_p.p_type) {
51508+ case PT_DYNAMIC:
51509+ if (!is_textrel_rw && !is_textrel_rx)
51510+ continue;
51511+ i = 0UL;
51512+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
51513+ elf_dyn dyn;
51514+
51515+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
51516+ break;
51517+ if (dyn.d_tag == DT_NULL)
51518+ break;
51519+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
51520+ gr_log_textrel(vma);
51521+ if (is_textrel_rw)
51522+ vma->vm_flags |= VM_MAYWRITE;
51523+ else
51524+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
51525+ vma->vm_flags &= ~VM_MAYWRITE;
51526+ break;
51527+ }
51528+ i++;
51529+ }
51530+ is_textrel_rw = false;
51531+ is_textrel_rx = false;
51532+ continue;
51533+
51534+ case PT_GNU_RELRO:
51535+ if (!is_relro)
51536+ continue;
51537+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
51538+ vma->vm_flags &= ~VM_MAYWRITE;
51539+ is_relro = false;
51540+ continue;
51541+
51542+#ifdef CONFIG_PAX_PT_PAX_FLAGS
51543+ case PT_PAX_FLAGS: {
51544+ const char *msg_mprotect = "", *msg_emutramp = "";
51545+ char *buffer_lib, *buffer_exe;
51546+
51547+ if (elf_p.p_flags & PF_NOMPROTECT)
51548+ msg_mprotect = "MPROTECT disabled";
51549+
51550+#ifdef CONFIG_PAX_EMUTRAMP
51551+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
51552+ msg_emutramp = "EMUTRAMP enabled";
51553+#endif
51554+
51555+ if (!msg_mprotect[0] && !msg_emutramp[0])
51556+ continue;
51557+
51558+ if (!printk_ratelimit())
51559+ continue;
51560+
51561+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
51562+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
51563+ if (buffer_lib && buffer_exe) {
51564+ char *path_lib, *path_exe;
51565+
51566+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
51567+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
51568+
51569+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
51570+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
51571+
51572+ }
51573+ free_page((unsigned long)buffer_exe);
51574+ free_page((unsigned long)buffer_lib);
51575+ continue;
51576+ }
51577+#endif
51578+
51579+ }
51580+ }
51581+}
51582+#endif
51583+
51584+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51585+
51586+extern int grsec_enable_log_rwxmaps;
51587+
51588+static void elf_handle_mmap(struct file *file)
51589+{
51590+ struct elfhdr elf_h;
51591+ struct elf_phdr elf_p;
51592+ unsigned long i;
51593+
51594+ if (!grsec_enable_log_rwxmaps)
51595+ return;
51596+
51597+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
51598+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
51599+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
51600+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
51601+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
51602+ return;
51603+
51604+ for (i = 0UL; i < elf_h.e_phnum; i++) {
51605+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
51606+ return;
51607+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
51608+ gr_log_ptgnustack(file);
51609+ }
51610+}
51611+#endif
51612+
51613 static int __init init_elf_binfmt(void)
51614 {
51615 register_binfmt(&elf_format);
51616diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
51617index d50bbe5..af3b649 100644
51618--- a/fs/binfmt_flat.c
51619+++ b/fs/binfmt_flat.c
51620@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
51621 realdatastart = (unsigned long) -ENOMEM;
51622 printk("Unable to allocate RAM for process data, errno %d\n",
51623 (int)-realdatastart);
51624+ down_write(&current->mm->mmap_sem);
51625 vm_munmap(textpos, text_len);
51626+ up_write(&current->mm->mmap_sem);
51627 ret = realdatastart;
51628 goto err;
51629 }
51630@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
51631 }
51632 if (IS_ERR_VALUE(result)) {
51633 printk("Unable to read data+bss, errno %d\n", (int)-result);
51634+ down_write(&current->mm->mmap_sem);
51635 vm_munmap(textpos, text_len);
51636 vm_munmap(realdatastart, len);
51637+ up_write(&current->mm->mmap_sem);
51638 ret = result;
51639 goto err;
51640 }
51641@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
51642 }
51643 if (IS_ERR_VALUE(result)) {
51644 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
51645+ down_write(&current->mm->mmap_sem);
51646 vm_munmap(textpos, text_len + data_len + extra +
51647 MAX_SHARED_LIBS * sizeof(unsigned long));
51648+ up_write(&current->mm->mmap_sem);
51649 ret = result;
51650 goto err;
51651 }
51652diff --git a/fs/bio.c b/fs/bio.c
51653index 94bbc04..6fe78a4 100644
51654--- a/fs/bio.c
51655+++ b/fs/bio.c
51656@@ -1096,7 +1096,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
51657 /*
51658 * Overflow, abort
51659 */
51660- if (end < start)
51661+ if (end < start || end - start > INT_MAX - nr_pages)
51662 return ERR_PTR(-EINVAL);
51663
51664 nr_pages += end - start;
51665@@ -1230,7 +1230,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
51666 /*
51667 * Overflow, abort
51668 */
51669- if (end < start)
51670+ if (end < start || end - start > INT_MAX - nr_pages)
51671 return ERR_PTR(-EINVAL);
51672
51673 nr_pages += end - start;
51674@@ -1492,7 +1492,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
51675 const int read = bio_data_dir(bio) == READ;
51676 struct bio_map_data *bmd = bio->bi_private;
51677 int i;
51678- char *p = bmd->sgvecs[0].iov_base;
51679+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
51680
51681 bio_for_each_segment_all(bvec, bio, i) {
51682 char *addr = page_address(bvec->bv_page);
51683diff --git a/fs/block_dev.c b/fs/block_dev.c
51684index 85f5c85..d6f0b1a 100644
51685--- a/fs/block_dev.c
51686+++ b/fs/block_dev.c
51687@@ -658,7 +658,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
51688 else if (bdev->bd_contains == bdev)
51689 return true; /* is a whole device which isn't held */
51690
51691- else if (whole->bd_holder == bd_may_claim)
51692+ else if (whole->bd_holder == (void *)bd_may_claim)
51693 return true; /* is a partition of a device that is being partitioned */
51694 else if (whole->bd_holder != NULL)
51695 return false; /* is a partition of a held device */
51696diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
51697index 7fb054b..ad36c67 100644
51698--- a/fs/btrfs/ctree.c
51699+++ b/fs/btrfs/ctree.c
51700@@ -1076,9 +1076,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
51701 free_extent_buffer(buf);
51702 add_root_to_dirty_list(root);
51703 } else {
51704- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
51705- parent_start = parent->start;
51706- else
51707+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
51708+ if (parent)
51709+ parent_start = parent->start;
51710+ else
51711+ parent_start = 0;
51712+ } else
51713 parent_start = 0;
51714
51715 WARN_ON(trans->transid != btrfs_header_generation(parent));
51716diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
51717index 0f81d67..0ad55fe 100644
51718--- a/fs/btrfs/ioctl.c
51719+++ b/fs/btrfs/ioctl.c
51720@@ -3084,9 +3084,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
51721 for (i = 0; i < num_types; i++) {
51722 struct btrfs_space_info *tmp;
51723
51724+ /* Don't copy in more than we allocated */
51725 if (!slot_count)
51726 break;
51727
51728+ slot_count--;
51729+
51730 info = NULL;
51731 rcu_read_lock();
51732 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
51733@@ -3108,10 +3111,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
51734 memcpy(dest, &space, sizeof(space));
51735 dest++;
51736 space_args.total_spaces++;
51737- slot_count--;
51738 }
51739- if (!slot_count)
51740- break;
51741 }
51742 up_read(&info->groups_sem);
51743 }
51744diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
51745index f0857e0..e7023c5 100644
51746--- a/fs/btrfs/super.c
51747+++ b/fs/btrfs/super.c
51748@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
51749 function, line, errstr);
51750 return;
51751 }
51752- ACCESS_ONCE(trans->transaction->aborted) = errno;
51753+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
51754 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
51755 }
51756 /*
51757diff --git a/fs/buffer.c b/fs/buffer.c
51758index d2a4d1b..df798ca 100644
51759--- a/fs/buffer.c
51760+++ b/fs/buffer.c
51761@@ -3367,7 +3367,7 @@ void __init buffer_init(void)
51762 bh_cachep = kmem_cache_create("buffer_head",
51763 sizeof(struct buffer_head), 0,
51764 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
51765- SLAB_MEM_SPREAD),
51766+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
51767 NULL);
51768
51769 /*
51770diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
51771index 622f469..e8d2d55 100644
51772--- a/fs/cachefiles/bind.c
51773+++ b/fs/cachefiles/bind.c
51774@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
51775 args);
51776
51777 /* start by checking things over */
51778- ASSERT(cache->fstop_percent >= 0 &&
51779- cache->fstop_percent < cache->fcull_percent &&
51780+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
51781 cache->fcull_percent < cache->frun_percent &&
51782 cache->frun_percent < 100);
51783
51784- ASSERT(cache->bstop_percent >= 0 &&
51785- cache->bstop_percent < cache->bcull_percent &&
51786+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
51787 cache->bcull_percent < cache->brun_percent &&
51788 cache->brun_percent < 100);
51789
51790diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
51791index 0a1467b..6a53245 100644
51792--- a/fs/cachefiles/daemon.c
51793+++ b/fs/cachefiles/daemon.c
51794@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
51795 if (n > buflen)
51796 return -EMSGSIZE;
51797
51798- if (copy_to_user(_buffer, buffer, n) != 0)
51799+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
51800 return -EFAULT;
51801
51802 return n;
51803@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
51804 if (test_bit(CACHEFILES_DEAD, &cache->flags))
51805 return -EIO;
51806
51807- if (datalen < 0 || datalen > PAGE_SIZE - 1)
51808+ if (datalen > PAGE_SIZE - 1)
51809 return -EOPNOTSUPP;
51810
51811 /* drag the command string into the kernel so we can parse it */
51812@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
51813 if (args[0] != '%' || args[1] != '\0')
51814 return -EINVAL;
51815
51816- if (fstop < 0 || fstop >= cache->fcull_percent)
51817+ if (fstop >= cache->fcull_percent)
51818 return cachefiles_daemon_range_error(cache, args);
51819
51820 cache->fstop_percent = fstop;
51821@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
51822 if (args[0] != '%' || args[1] != '\0')
51823 return -EINVAL;
51824
51825- if (bstop < 0 || bstop >= cache->bcull_percent)
51826+ if (bstop >= cache->bcull_percent)
51827 return cachefiles_daemon_range_error(cache, args);
51828
51829 cache->bstop_percent = bstop;
51830diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
51831index 4938251..7e01445 100644
51832--- a/fs/cachefiles/internal.h
51833+++ b/fs/cachefiles/internal.h
51834@@ -59,7 +59,7 @@ struct cachefiles_cache {
51835 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
51836 struct rb_root active_nodes; /* active nodes (can't be culled) */
51837 rwlock_t active_lock; /* lock for active_nodes */
51838- atomic_t gravecounter; /* graveyard uniquifier */
51839+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
51840 unsigned frun_percent; /* when to stop culling (% files) */
51841 unsigned fcull_percent; /* when to start culling (% files) */
51842 unsigned fstop_percent; /* when to stop allocating (% files) */
51843@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
51844 * proc.c
51845 */
51846 #ifdef CONFIG_CACHEFILES_HISTOGRAM
51847-extern atomic_t cachefiles_lookup_histogram[HZ];
51848-extern atomic_t cachefiles_mkdir_histogram[HZ];
51849-extern atomic_t cachefiles_create_histogram[HZ];
51850+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
51851+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
51852+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
51853
51854 extern int __init cachefiles_proc_init(void);
51855 extern void cachefiles_proc_cleanup(void);
51856 static inline
51857-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
51858+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
51859 {
51860 unsigned long jif = jiffies - start_jif;
51861 if (jif >= HZ)
51862 jif = HZ - 1;
51863- atomic_inc(&histogram[jif]);
51864+ atomic_inc_unchecked(&histogram[jif]);
51865 }
51866
51867 #else
51868diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
51869index 8c01c5fc..15f982e 100644
51870--- a/fs/cachefiles/namei.c
51871+++ b/fs/cachefiles/namei.c
51872@@ -317,7 +317,7 @@ try_again:
51873 /* first step is to make up a grave dentry in the graveyard */
51874 sprintf(nbuffer, "%08x%08x",
51875 (uint32_t) get_seconds(),
51876- (uint32_t) atomic_inc_return(&cache->gravecounter));
51877+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
51878
51879 /* do the multiway lock magic */
51880 trap = lock_rename(cache->graveyard, dir);
51881diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
51882index eccd339..4c1d995 100644
51883--- a/fs/cachefiles/proc.c
51884+++ b/fs/cachefiles/proc.c
51885@@ -14,9 +14,9 @@
51886 #include <linux/seq_file.h>
51887 #include "internal.h"
51888
51889-atomic_t cachefiles_lookup_histogram[HZ];
51890-atomic_t cachefiles_mkdir_histogram[HZ];
51891-atomic_t cachefiles_create_histogram[HZ];
51892+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
51893+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
51894+atomic_unchecked_t cachefiles_create_histogram[HZ];
51895
51896 /*
51897 * display the latency histogram
51898@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
51899 return 0;
51900 default:
51901 index = (unsigned long) v - 3;
51902- x = atomic_read(&cachefiles_lookup_histogram[index]);
51903- y = atomic_read(&cachefiles_mkdir_histogram[index]);
51904- z = atomic_read(&cachefiles_create_histogram[index]);
51905+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
51906+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
51907+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
51908 if (x == 0 && y == 0 && z == 0)
51909 return 0;
51910
51911diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
51912index 317f9ee..3d24511 100644
51913--- a/fs/cachefiles/rdwr.c
51914+++ b/fs/cachefiles/rdwr.c
51915@@ -966,7 +966,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
51916 old_fs = get_fs();
51917 set_fs(KERNEL_DS);
51918 ret = file->f_op->write(
51919- file, (const void __user *) data, len, &pos);
51920+ file, (const void __force_user *) data, len, &pos);
51921 set_fs(old_fs);
51922 kunmap(page);
51923 file_end_write(file);
51924diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
51925index f02d82b..2632cf86 100644
51926--- a/fs/ceph/dir.c
51927+++ b/fs/ceph/dir.c
51928@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
51929 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
51930 struct ceph_mds_client *mdsc = fsc->mdsc;
51931 unsigned frag = fpos_frag(filp->f_pos);
51932- int off = fpos_off(filp->f_pos);
51933+ unsigned int off = fpos_off(filp->f_pos);
51934 int err;
51935 u32 ftype;
51936 struct ceph_mds_reply_info_parsed *rinfo;
51937diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
51938index d597483..747901b 100644
51939--- a/fs/cifs/cifs_debug.c
51940+++ b/fs/cifs/cifs_debug.c
51941@@ -284,8 +284,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
51942
51943 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
51944 #ifdef CONFIG_CIFS_STATS2
51945- atomic_set(&totBufAllocCount, 0);
51946- atomic_set(&totSmBufAllocCount, 0);
51947+ atomic_set_unchecked(&totBufAllocCount, 0);
51948+ atomic_set_unchecked(&totSmBufAllocCount, 0);
51949 #endif /* CONFIG_CIFS_STATS2 */
51950 spin_lock(&cifs_tcp_ses_lock);
51951 list_for_each(tmp1, &cifs_tcp_ses_list) {
51952@@ -298,7 +298,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
51953 tcon = list_entry(tmp3,
51954 struct cifs_tcon,
51955 tcon_list);
51956- atomic_set(&tcon->num_smbs_sent, 0);
51957+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
51958 if (server->ops->clear_stats)
51959 server->ops->clear_stats(tcon);
51960 }
51961@@ -330,8 +330,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
51962 smBufAllocCount.counter, cifs_min_small);
51963 #ifdef CONFIG_CIFS_STATS2
51964 seq_printf(m, "Total Large %d Small %d Allocations\n",
51965- atomic_read(&totBufAllocCount),
51966- atomic_read(&totSmBufAllocCount));
51967+ atomic_read_unchecked(&totBufAllocCount),
51968+ atomic_read_unchecked(&totSmBufAllocCount));
51969 #endif /* CONFIG_CIFS_STATS2 */
51970
51971 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
51972@@ -360,7 +360,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
51973 if (tcon->need_reconnect)
51974 seq_puts(m, "\tDISCONNECTED ");
51975 seq_printf(m, "\nSMBs: %d",
51976- atomic_read(&tcon->num_smbs_sent));
51977+ atomic_read_unchecked(&tcon->num_smbs_sent));
51978 if (server->ops->print_stats)
51979 server->ops->print_stats(m, tcon);
51980 }
51981diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
51982index 3752b9f..8db5569 100644
51983--- a/fs/cifs/cifsfs.c
51984+++ b/fs/cifs/cifsfs.c
51985@@ -1035,7 +1035,7 @@ cifs_init_request_bufs(void)
51986 */
51987 cifs_req_cachep = kmem_cache_create("cifs_request",
51988 CIFSMaxBufSize + max_hdr_size, 0,
51989- SLAB_HWCACHE_ALIGN, NULL);
51990+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
51991 if (cifs_req_cachep == NULL)
51992 return -ENOMEM;
51993
51994@@ -1062,7 +1062,7 @@ cifs_init_request_bufs(void)
51995 efficient to alloc 1 per page off the slab compared to 17K (5page)
51996 alloc of large cifs buffers even when page debugging is on */
51997 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
51998- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
51999+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
52000 NULL);
52001 if (cifs_sm_req_cachep == NULL) {
52002 mempool_destroy(cifs_req_poolp);
52003@@ -1147,8 +1147,8 @@ init_cifs(void)
52004 atomic_set(&bufAllocCount, 0);
52005 atomic_set(&smBufAllocCount, 0);
52006 #ifdef CONFIG_CIFS_STATS2
52007- atomic_set(&totBufAllocCount, 0);
52008- atomic_set(&totSmBufAllocCount, 0);
52009+ atomic_set_unchecked(&totBufAllocCount, 0);
52010+ atomic_set_unchecked(&totSmBufAllocCount, 0);
52011 #endif /* CONFIG_CIFS_STATS2 */
52012
52013 atomic_set(&midCount, 0);
52014diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
52015index ea3a0b3..0194e39 100644
52016--- a/fs/cifs/cifsglob.h
52017+++ b/fs/cifs/cifsglob.h
52018@@ -752,35 +752,35 @@ struct cifs_tcon {
52019 __u16 Flags; /* optional support bits */
52020 enum statusEnum tidStatus;
52021 #ifdef CONFIG_CIFS_STATS
52022- atomic_t num_smbs_sent;
52023+ atomic_unchecked_t num_smbs_sent;
52024 union {
52025 struct {
52026- atomic_t num_writes;
52027- atomic_t num_reads;
52028- atomic_t num_flushes;
52029- atomic_t num_oplock_brks;
52030- atomic_t num_opens;
52031- atomic_t num_closes;
52032- atomic_t num_deletes;
52033- atomic_t num_mkdirs;
52034- atomic_t num_posixopens;
52035- atomic_t num_posixmkdirs;
52036- atomic_t num_rmdirs;
52037- atomic_t num_renames;
52038- atomic_t num_t2renames;
52039- atomic_t num_ffirst;
52040- atomic_t num_fnext;
52041- atomic_t num_fclose;
52042- atomic_t num_hardlinks;
52043- atomic_t num_symlinks;
52044- atomic_t num_locks;
52045- atomic_t num_acl_get;
52046- atomic_t num_acl_set;
52047+ atomic_unchecked_t num_writes;
52048+ atomic_unchecked_t num_reads;
52049+ atomic_unchecked_t num_flushes;
52050+ atomic_unchecked_t num_oplock_brks;
52051+ atomic_unchecked_t num_opens;
52052+ atomic_unchecked_t num_closes;
52053+ atomic_unchecked_t num_deletes;
52054+ atomic_unchecked_t num_mkdirs;
52055+ atomic_unchecked_t num_posixopens;
52056+ atomic_unchecked_t num_posixmkdirs;
52057+ atomic_unchecked_t num_rmdirs;
52058+ atomic_unchecked_t num_renames;
52059+ atomic_unchecked_t num_t2renames;
52060+ atomic_unchecked_t num_ffirst;
52061+ atomic_unchecked_t num_fnext;
52062+ atomic_unchecked_t num_fclose;
52063+ atomic_unchecked_t num_hardlinks;
52064+ atomic_unchecked_t num_symlinks;
52065+ atomic_unchecked_t num_locks;
52066+ atomic_unchecked_t num_acl_get;
52067+ atomic_unchecked_t num_acl_set;
52068 } cifs_stats;
52069 #ifdef CONFIG_CIFS_SMB2
52070 struct {
52071- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
52072- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
52073+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
52074+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
52075 } smb2_stats;
52076 #endif /* CONFIG_CIFS_SMB2 */
52077 } stats;
52078@@ -1081,7 +1081,7 @@ convert_delimiter(char *path, char delim)
52079 }
52080
52081 #ifdef CONFIG_CIFS_STATS
52082-#define cifs_stats_inc atomic_inc
52083+#define cifs_stats_inc atomic_inc_unchecked
52084
52085 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
52086 unsigned int bytes)
52087@@ -1446,8 +1446,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
52088 /* Various Debug counters */
52089 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
52090 #ifdef CONFIG_CIFS_STATS2
52091-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
52092-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
52093+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
52094+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
52095 #endif
52096 GLOBAL_EXTERN atomic_t smBufAllocCount;
52097 GLOBAL_EXTERN atomic_t midCount;
52098diff --git a/fs/cifs/link.c b/fs/cifs/link.c
52099index b83c3f5..6437caa 100644
52100--- a/fs/cifs/link.c
52101+++ b/fs/cifs/link.c
52102@@ -616,7 +616,7 @@ symlink_exit:
52103
52104 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
52105 {
52106- char *p = nd_get_link(nd);
52107+ const char *p = nd_get_link(nd);
52108 if (!IS_ERR(p))
52109 kfree(p);
52110 }
52111diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
52112index 1bec014..f329411 100644
52113--- a/fs/cifs/misc.c
52114+++ b/fs/cifs/misc.c
52115@@ -169,7 +169,7 @@ cifs_buf_get(void)
52116 memset(ret_buf, 0, buf_size + 3);
52117 atomic_inc(&bufAllocCount);
52118 #ifdef CONFIG_CIFS_STATS2
52119- atomic_inc(&totBufAllocCount);
52120+ atomic_inc_unchecked(&totBufAllocCount);
52121 #endif /* CONFIG_CIFS_STATS2 */
52122 }
52123
52124@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
52125 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
52126 atomic_inc(&smBufAllocCount);
52127 #ifdef CONFIG_CIFS_STATS2
52128- atomic_inc(&totSmBufAllocCount);
52129+ atomic_inc_unchecked(&totSmBufAllocCount);
52130 #endif /* CONFIG_CIFS_STATS2 */
52131
52132 }
52133diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
52134index 3efdb9d..e845a5e 100644
52135--- a/fs/cifs/smb1ops.c
52136+++ b/fs/cifs/smb1ops.c
52137@@ -591,27 +591,27 @@ static void
52138 cifs_clear_stats(struct cifs_tcon *tcon)
52139 {
52140 #ifdef CONFIG_CIFS_STATS
52141- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
52142- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
52143- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
52144- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
52145- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
52146- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
52147- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
52148- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
52149- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
52150- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
52151- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
52152- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
52153- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
52154- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
52155- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
52156- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
52157- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
52158- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
52159- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
52160- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
52161- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
52162+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
52163+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
52164+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
52165+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
52166+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
52167+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
52168+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
52169+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
52170+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
52171+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
52172+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
52173+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
52174+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
52175+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
52176+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
52177+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
52178+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
52179+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
52180+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
52181+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
52182+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
52183 #endif
52184 }
52185
52186@@ -620,36 +620,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
52187 {
52188 #ifdef CONFIG_CIFS_STATS
52189 seq_printf(m, " Oplocks breaks: %d",
52190- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
52191+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
52192 seq_printf(m, "\nReads: %d Bytes: %llu",
52193- atomic_read(&tcon->stats.cifs_stats.num_reads),
52194+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
52195 (long long)(tcon->bytes_read));
52196 seq_printf(m, "\nWrites: %d Bytes: %llu",
52197- atomic_read(&tcon->stats.cifs_stats.num_writes),
52198+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
52199 (long long)(tcon->bytes_written));
52200 seq_printf(m, "\nFlushes: %d",
52201- atomic_read(&tcon->stats.cifs_stats.num_flushes));
52202+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
52203 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
52204- atomic_read(&tcon->stats.cifs_stats.num_locks),
52205- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
52206- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
52207+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
52208+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
52209+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
52210 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
52211- atomic_read(&tcon->stats.cifs_stats.num_opens),
52212- atomic_read(&tcon->stats.cifs_stats.num_closes),
52213- atomic_read(&tcon->stats.cifs_stats.num_deletes));
52214+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
52215+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
52216+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
52217 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
52218- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
52219- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
52220+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
52221+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
52222 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
52223- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
52224- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
52225+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
52226+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
52227 seq_printf(m, "\nRenames: %d T2 Renames %d",
52228- atomic_read(&tcon->stats.cifs_stats.num_renames),
52229- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
52230+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
52231+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
52232 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
52233- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
52234- atomic_read(&tcon->stats.cifs_stats.num_fnext),
52235- atomic_read(&tcon->stats.cifs_stats.num_fclose));
52236+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
52237+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
52238+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
52239 #endif
52240 }
52241
52242diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
52243index f2e76f3..c44fac7 100644
52244--- a/fs/cifs/smb2ops.c
52245+++ b/fs/cifs/smb2ops.c
52246@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
52247 #ifdef CONFIG_CIFS_STATS
52248 int i;
52249 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
52250- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
52251- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
52252+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
52253+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
52254 }
52255 #endif
52256 }
52257@@ -284,66 +284,66 @@ static void
52258 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
52259 {
52260 #ifdef CONFIG_CIFS_STATS
52261- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
52262- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
52263+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
52264+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
52265 seq_printf(m, "\nNegotiates: %d sent %d failed",
52266- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
52267- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
52268+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
52269+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
52270 seq_printf(m, "\nSessionSetups: %d sent %d failed",
52271- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
52272- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
52273+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
52274+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
52275 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
52276 seq_printf(m, "\nLogoffs: %d sent %d failed",
52277- atomic_read(&sent[SMB2_LOGOFF_HE]),
52278- atomic_read(&failed[SMB2_LOGOFF_HE]));
52279+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
52280+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
52281 seq_printf(m, "\nTreeConnects: %d sent %d failed",
52282- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
52283- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
52284+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
52285+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
52286 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
52287- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
52288- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
52289+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
52290+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
52291 seq_printf(m, "\nCreates: %d sent %d failed",
52292- atomic_read(&sent[SMB2_CREATE_HE]),
52293- atomic_read(&failed[SMB2_CREATE_HE]));
52294+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
52295+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
52296 seq_printf(m, "\nCloses: %d sent %d failed",
52297- atomic_read(&sent[SMB2_CLOSE_HE]),
52298- atomic_read(&failed[SMB2_CLOSE_HE]));
52299+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
52300+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
52301 seq_printf(m, "\nFlushes: %d sent %d failed",
52302- atomic_read(&sent[SMB2_FLUSH_HE]),
52303- atomic_read(&failed[SMB2_FLUSH_HE]));
52304+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
52305+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
52306 seq_printf(m, "\nReads: %d sent %d failed",
52307- atomic_read(&sent[SMB2_READ_HE]),
52308- atomic_read(&failed[SMB2_READ_HE]));
52309+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
52310+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
52311 seq_printf(m, "\nWrites: %d sent %d failed",
52312- atomic_read(&sent[SMB2_WRITE_HE]),
52313- atomic_read(&failed[SMB2_WRITE_HE]));
52314+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
52315+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
52316 seq_printf(m, "\nLocks: %d sent %d failed",
52317- atomic_read(&sent[SMB2_LOCK_HE]),
52318- atomic_read(&failed[SMB2_LOCK_HE]));
52319+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
52320+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
52321 seq_printf(m, "\nIOCTLs: %d sent %d failed",
52322- atomic_read(&sent[SMB2_IOCTL_HE]),
52323- atomic_read(&failed[SMB2_IOCTL_HE]));
52324+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
52325+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
52326 seq_printf(m, "\nCancels: %d sent %d failed",
52327- atomic_read(&sent[SMB2_CANCEL_HE]),
52328- atomic_read(&failed[SMB2_CANCEL_HE]));
52329+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
52330+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
52331 seq_printf(m, "\nEchos: %d sent %d failed",
52332- atomic_read(&sent[SMB2_ECHO_HE]),
52333- atomic_read(&failed[SMB2_ECHO_HE]));
52334+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
52335+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
52336 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
52337- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
52338- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
52339+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
52340+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
52341 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
52342- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
52343- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
52344+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
52345+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
52346 seq_printf(m, "\nQueryInfos: %d sent %d failed",
52347- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
52348- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
52349+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
52350+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
52351 seq_printf(m, "\nSetInfos: %d sent %d failed",
52352- atomic_read(&sent[SMB2_SET_INFO_HE]),
52353- atomic_read(&failed[SMB2_SET_INFO_HE]));
52354+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
52355+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
52356 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
52357- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
52358- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
52359+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
52360+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
52361 #endif
52362 }
52363
52364diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
52365index 2b95ce2..d079d75 100644
52366--- a/fs/cifs/smb2pdu.c
52367+++ b/fs/cifs/smb2pdu.c
52368@@ -1760,8 +1760,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
52369 default:
52370 cifs_dbg(VFS, "info level %u isn't supported\n",
52371 srch_inf->info_level);
52372- rc = -EINVAL;
52373- goto qdir_exit;
52374+ return -EINVAL;
52375 }
52376
52377 req->FileIndex = cpu_to_le32(index);
52378diff --git a/fs/coda/cache.c b/fs/coda/cache.c
52379index 1da168c..8bc7ff6 100644
52380--- a/fs/coda/cache.c
52381+++ b/fs/coda/cache.c
52382@@ -24,7 +24,7 @@
52383 #include "coda_linux.h"
52384 #include "coda_cache.h"
52385
52386-static atomic_t permission_epoch = ATOMIC_INIT(0);
52387+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
52388
52389 /* replace or extend an acl cache hit */
52390 void coda_cache_enter(struct inode *inode, int mask)
52391@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
52392 struct coda_inode_info *cii = ITOC(inode);
52393
52394 spin_lock(&cii->c_lock);
52395- cii->c_cached_epoch = atomic_read(&permission_epoch);
52396+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
52397 if (!uid_eq(cii->c_uid, current_fsuid())) {
52398 cii->c_uid = current_fsuid();
52399 cii->c_cached_perm = mask;
52400@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
52401 {
52402 struct coda_inode_info *cii = ITOC(inode);
52403 spin_lock(&cii->c_lock);
52404- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
52405+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
52406 spin_unlock(&cii->c_lock);
52407 }
52408
52409 /* remove all acl caches */
52410 void coda_cache_clear_all(struct super_block *sb)
52411 {
52412- atomic_inc(&permission_epoch);
52413+ atomic_inc_unchecked(&permission_epoch);
52414 }
52415
52416
52417@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
52418 spin_lock(&cii->c_lock);
52419 hit = (mask & cii->c_cached_perm) == mask &&
52420 uid_eq(cii->c_uid, current_fsuid()) &&
52421- cii->c_cached_epoch == atomic_read(&permission_epoch);
52422+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
52423 spin_unlock(&cii->c_lock);
52424
52425 return hit;
52426diff --git a/fs/compat.c b/fs/compat.c
52427index fc3b55d..7b568ae 100644
52428--- a/fs/compat.c
52429+++ b/fs/compat.c
52430@@ -54,7 +54,7 @@
52431 #include <asm/ioctls.h>
52432 #include "internal.h"
52433
52434-int compat_log = 1;
52435+int compat_log = 0;
52436
52437 int compat_printk(const char *fmt, ...)
52438 {
52439@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
52440
52441 set_fs(KERNEL_DS);
52442 /* The __user pointer cast is valid because of the set_fs() */
52443- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
52444+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
52445 set_fs(oldfs);
52446 /* truncating is ok because it's a user address */
52447 if (!ret)
52448@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
52449 goto out;
52450
52451 ret = -EINVAL;
52452- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
52453+ if (nr_segs > UIO_MAXIOV)
52454 goto out;
52455 if (nr_segs > fast_segs) {
52456 ret = -ENOMEM;
52457@@ -833,6 +833,7 @@ struct compat_old_linux_dirent {
52458
52459 struct compat_readdir_callback {
52460 struct compat_old_linux_dirent __user *dirent;
52461+ struct file * file;
52462 int result;
52463 };
52464
52465@@ -850,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
52466 buf->result = -EOVERFLOW;
52467 return -EOVERFLOW;
52468 }
52469+
52470+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52471+ return 0;
52472+
52473 buf->result++;
52474 dirent = buf->dirent;
52475 if (!access_ok(VERIFY_WRITE, dirent,
52476@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
52477
52478 buf.result = 0;
52479 buf.dirent = dirent;
52480+ buf.file = f.file;
52481
52482 error = vfs_readdir(f.file, compat_fillonedir, &buf);
52483 if (buf.result)
52484@@ -899,6 +905,7 @@ struct compat_linux_dirent {
52485 struct compat_getdents_callback {
52486 struct compat_linux_dirent __user *current_dir;
52487 struct compat_linux_dirent __user *previous;
52488+ struct file * file;
52489 int count;
52490 int error;
52491 };
52492@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
52493 buf->error = -EOVERFLOW;
52494 return -EOVERFLOW;
52495 }
52496+
52497+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52498+ return 0;
52499+
52500 dirent = buf->previous;
52501 if (dirent) {
52502 if (__put_user(offset, &dirent->d_off))
52503@@ -965,6 +976,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
52504 buf.previous = NULL;
52505 buf.count = count;
52506 buf.error = 0;
52507+ buf.file = f.file;
52508
52509 error = vfs_readdir(f.file, compat_filldir, &buf);
52510 if (error >= 0)
52511@@ -985,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
52512 struct compat_getdents_callback64 {
52513 struct linux_dirent64 __user *current_dir;
52514 struct linux_dirent64 __user *previous;
52515+ struct file * file;
52516 int count;
52517 int error;
52518 };
52519@@ -1001,6 +1014,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
52520 buf->error = -EINVAL; /* only used if we fail.. */
52521 if (reclen > buf->count)
52522 return -EINVAL;
52523+
52524+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
52525+ return 0;
52526+
52527 dirent = buf->previous;
52528
52529 if (dirent) {
52530@@ -1050,13 +1067,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
52531 buf.previous = NULL;
52532 buf.count = count;
52533 buf.error = 0;
52534+ buf.file = f.file;
52535
52536 error = vfs_readdir(f.file, compat_filldir64, &buf);
52537 if (error >= 0)
52538 error = buf.error;
52539 lastdirent = buf.previous;
52540 if (lastdirent) {
52541- typeof(lastdirent->d_off) d_off = f.file->f_pos;
52542+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
52543 if (__put_user_unaligned(d_off, &lastdirent->d_off))
52544 error = -EFAULT;
52545 else
52546diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
52547index a81147e..20bf2b5 100644
52548--- a/fs/compat_binfmt_elf.c
52549+++ b/fs/compat_binfmt_elf.c
52550@@ -30,11 +30,13 @@
52551 #undef elf_phdr
52552 #undef elf_shdr
52553 #undef elf_note
52554+#undef elf_dyn
52555 #undef elf_addr_t
52556 #define elfhdr elf32_hdr
52557 #define elf_phdr elf32_phdr
52558 #define elf_shdr elf32_shdr
52559 #define elf_note elf32_note
52560+#define elf_dyn Elf32_Dyn
52561 #define elf_addr_t Elf32_Addr
52562
52563 /*
52564diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
52565index 996cdc5..15e2f33 100644
52566--- a/fs/compat_ioctl.c
52567+++ b/fs/compat_ioctl.c
52568@@ -622,7 +622,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
52569 return -EFAULT;
52570 if (__get_user(udata, &ss32->iomem_base))
52571 return -EFAULT;
52572- ss.iomem_base = compat_ptr(udata);
52573+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
52574 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
52575 __get_user(ss.port_high, &ss32->port_high))
52576 return -EFAULT;
52577@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
52578 for (i = 0; i < nmsgs; i++) {
52579 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
52580 return -EFAULT;
52581- if (get_user(datap, &umsgs[i].buf) ||
52582- put_user(compat_ptr(datap), &tmsgs[i].buf))
52583+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
52584+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
52585 return -EFAULT;
52586 }
52587 return sys_ioctl(fd, cmd, (unsigned long)tdata);
52588@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
52589 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
52590 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
52591 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
52592- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
52593+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
52594 return -EFAULT;
52595
52596 return ioctl_preallocate(file, p);
52597@@ -1619,8 +1619,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
52598 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
52599 {
52600 unsigned int a, b;
52601- a = *(unsigned int *)p;
52602- b = *(unsigned int *)q;
52603+ a = *(const unsigned int *)p;
52604+ b = *(const unsigned int *)q;
52605 if (a > b)
52606 return 1;
52607 if (a < b)
52608diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
52609index 7aabc6a..34c1197 100644
52610--- a/fs/configfs/dir.c
52611+++ b/fs/configfs/dir.c
52612@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
52613 }
52614 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
52615 struct configfs_dirent *next;
52616- const char * name;
52617+ const unsigned char * name;
52618+ char d_name[sizeof(next->s_dentry->d_iname)];
52619 int len;
52620 struct inode *inode = NULL;
52621
52622@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
52623 continue;
52624
52625 name = configfs_get_name(next);
52626- len = strlen(name);
52627+ if (next->s_dentry && name == next->s_dentry->d_iname) {
52628+ len = next->s_dentry->d_name.len;
52629+ memcpy(d_name, name, len);
52630+ name = d_name;
52631+ } else
52632+ len = strlen(name);
52633
52634 /*
52635 * We'll have a dentry and an inode for
52636diff --git a/fs/coredump.c b/fs/coredump.c
52637index dafafba..10b3b27 100644
52638--- a/fs/coredump.c
52639+++ b/fs/coredump.c
52640@@ -52,7 +52,7 @@ struct core_name {
52641 char *corename;
52642 int used, size;
52643 };
52644-static atomic_t call_count = ATOMIC_INIT(1);
52645+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
52646
52647 /* The maximal length of core_pattern is also specified in sysctl.c */
52648
52649@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
52650 {
52651 char *old_corename = cn->corename;
52652
52653- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
52654+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
52655 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
52656
52657 if (!cn->corename) {
52658@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
52659 int pid_in_pattern = 0;
52660 int err = 0;
52661
52662- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
52663+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
52664 cn->corename = kmalloc(cn->size, GFP_KERNEL);
52665 cn->used = 0;
52666
52667@@ -435,8 +435,8 @@ static void wait_for_dump_helpers(struct file *file)
52668 struct pipe_inode_info *pipe = file->private_data;
52669
52670 pipe_lock(pipe);
52671- pipe->readers++;
52672- pipe->writers--;
52673+ atomic_inc(&pipe->readers);
52674+ atomic_dec(&pipe->writers);
52675 wake_up_interruptible_sync(&pipe->wait);
52676 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52677 pipe_unlock(pipe);
52678@@ -445,11 +445,11 @@ static void wait_for_dump_helpers(struct file *file)
52679 * We actually want wait_event_freezable() but then we need
52680 * to clear TIF_SIGPENDING and improve dump_interrupted().
52681 */
52682- wait_event_interruptible(pipe->wait, pipe->readers == 1);
52683+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
52684
52685 pipe_lock(pipe);
52686- pipe->readers--;
52687- pipe->writers++;
52688+ atomic_dec(&pipe->readers);
52689+ atomic_inc(&pipe->writers);
52690 pipe_unlock(pipe);
52691 }
52692
52693@@ -496,7 +496,8 @@ void do_coredump(siginfo_t *siginfo)
52694 struct files_struct *displaced;
52695 bool need_nonrelative = false;
52696 bool core_dumped = false;
52697- static atomic_t core_dump_count = ATOMIC_INIT(0);
52698+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
52699+ long signr = siginfo->si_signo;
52700 struct coredump_params cprm = {
52701 .siginfo = siginfo,
52702 .regs = signal_pt_regs(),
52703@@ -509,7 +510,10 @@ void do_coredump(siginfo_t *siginfo)
52704 .mm_flags = mm->flags,
52705 };
52706
52707- audit_core_dumps(siginfo->si_signo);
52708+ audit_core_dumps(signr);
52709+
52710+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
52711+ gr_handle_brute_attach(cprm.mm_flags);
52712
52713 binfmt = mm->binfmt;
52714 if (!binfmt || !binfmt->core_dump)
52715@@ -533,7 +537,7 @@ void do_coredump(siginfo_t *siginfo)
52716 need_nonrelative = true;
52717 }
52718
52719- retval = coredump_wait(siginfo->si_signo, &core_state);
52720+ retval = coredump_wait(signr, &core_state);
52721 if (retval < 0)
52722 goto fail_creds;
52723
52724@@ -576,7 +580,7 @@ void do_coredump(siginfo_t *siginfo)
52725 }
52726 cprm.limit = RLIM_INFINITY;
52727
52728- dump_count = atomic_inc_return(&core_dump_count);
52729+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
52730 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
52731 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
52732 task_tgid_vnr(current), current->comm);
52733@@ -608,6 +612,8 @@ void do_coredump(siginfo_t *siginfo)
52734 } else {
52735 struct inode *inode;
52736
52737+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
52738+
52739 if (cprm.limit < binfmt->min_coredump)
52740 goto fail_unlock;
52741
52742@@ -666,7 +672,7 @@ close_fail:
52743 filp_close(cprm.file, NULL);
52744 fail_dropcount:
52745 if (ispipe)
52746- atomic_dec(&core_dump_count);
52747+ atomic_dec_unchecked(&core_dump_count);
52748 fail_unlock:
52749 kfree(cn.corename);
52750 fail_corename:
52751@@ -687,7 +693,7 @@ int dump_write(struct file *file, const void *addr, int nr)
52752 {
52753 return !dump_interrupted() &&
52754 access_ok(VERIFY_READ, addr, nr) &&
52755- file->f_op->write(file, addr, nr, &file->f_pos) == nr;
52756+ file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
52757 }
52758 EXPORT_SYMBOL(dump_write);
52759
52760diff --git a/fs/dcache.c b/fs/dcache.c
52761index f09b908..04b9690 100644
52762--- a/fs/dcache.c
52763+++ b/fs/dcache.c
52764@@ -3086,7 +3086,8 @@ void __init vfs_caches_init(unsigned long mempages)
52765 mempages -= reserve;
52766
52767 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
52768- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
52769+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
52770+ SLAB_NO_SANITIZE, NULL);
52771
52772 dcache_init();
52773 inode_init();
52774diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
52775index c7c83ff..bda9461 100644
52776--- a/fs/debugfs/inode.c
52777+++ b/fs/debugfs/inode.c
52778@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
52779 */
52780 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
52781 {
52782+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
52783+ return __create_file(name, S_IFDIR | S_IRWXU,
52784+#else
52785 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
52786+#endif
52787 parent, NULL, NULL);
52788 }
52789 EXPORT_SYMBOL_GPL(debugfs_create_dir);
52790diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
52791index 5eab400..810a3f5 100644
52792--- a/fs/ecryptfs/inode.c
52793+++ b/fs/ecryptfs/inode.c
52794@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
52795 old_fs = get_fs();
52796 set_fs(get_ds());
52797 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
52798- (char __user *)lower_buf,
52799+ (char __force_user *)lower_buf,
52800 PATH_MAX);
52801 set_fs(old_fs);
52802 if (rc < 0)
52803@@ -706,7 +706,7 @@ out:
52804 static void
52805 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
52806 {
52807- char *buf = nd_get_link(nd);
52808+ const char *buf = nd_get_link(nd);
52809 if (!IS_ERR(buf)) {
52810 /* Free the char* */
52811 kfree(buf);
52812diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
52813index e4141f2..d8263e8 100644
52814--- a/fs/ecryptfs/miscdev.c
52815+++ b/fs/ecryptfs/miscdev.c
52816@@ -304,7 +304,7 @@ check_list:
52817 goto out_unlock_msg_ctx;
52818 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
52819 if (msg_ctx->msg) {
52820- if (copy_to_user(&buf[i], packet_length, packet_length_size))
52821+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
52822 goto out_unlock_msg_ctx;
52823 i += packet_length_size;
52824 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
52825diff --git a/fs/exec.c b/fs/exec.c
52826index ffd7a81..3c84660 100644
52827--- a/fs/exec.c
52828+++ b/fs/exec.c
52829@@ -55,8 +55,20 @@
52830 #include <linux/pipe_fs_i.h>
52831 #include <linux/oom.h>
52832 #include <linux/compat.h>
52833+#include <linux/random.h>
52834+#include <linux/seq_file.h>
52835+#include <linux/coredump.h>
52836+#include <linux/mman.h>
52837+
52838+#ifdef CONFIG_PAX_REFCOUNT
52839+#include <linux/kallsyms.h>
52840+#include <linux/kdebug.h>
52841+#endif
52842+
52843+#include <trace/events/fs.h>
52844
52845 #include <asm/uaccess.h>
52846+#include <asm/sections.h>
52847 #include <asm/mmu_context.h>
52848 #include <asm/tlb.h>
52849
52850@@ -66,17 +78,32 @@
52851
52852 #include <trace/events/sched.h>
52853
52854+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52855+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
52856+{
52857+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
52858+}
52859+#endif
52860+
52861+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
52862+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
52863+EXPORT_SYMBOL(pax_set_initial_flags_func);
52864+#endif
52865+
52866 int suid_dumpable = 0;
52867
52868 static LIST_HEAD(formats);
52869 static DEFINE_RWLOCK(binfmt_lock);
52870
52871+extern int gr_process_kernel_exec_ban(void);
52872+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
52873+
52874 void __register_binfmt(struct linux_binfmt * fmt, int insert)
52875 {
52876 BUG_ON(!fmt);
52877 write_lock(&binfmt_lock);
52878- insert ? list_add(&fmt->lh, &formats) :
52879- list_add_tail(&fmt->lh, &formats);
52880+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
52881+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
52882 write_unlock(&binfmt_lock);
52883 }
52884
52885@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt);
52886 void unregister_binfmt(struct linux_binfmt * fmt)
52887 {
52888 write_lock(&binfmt_lock);
52889- list_del(&fmt->lh);
52890+ pax_list_del((struct list_head *)&fmt->lh);
52891 write_unlock(&binfmt_lock);
52892 }
52893
52894@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
52895 int write)
52896 {
52897 struct page *page;
52898- int ret;
52899
52900-#ifdef CONFIG_STACK_GROWSUP
52901- if (write) {
52902- ret = expand_downwards(bprm->vma, pos);
52903- if (ret < 0)
52904- return NULL;
52905- }
52906-#endif
52907- ret = get_user_pages(current, bprm->mm, pos,
52908- 1, write, 1, &page, NULL);
52909- if (ret <= 0)
52910+ if (0 > expand_downwards(bprm->vma, pos))
52911+ return NULL;
52912+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
52913 return NULL;
52914
52915 if (write) {
52916@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
52917 if (size <= ARG_MAX)
52918 return page;
52919
52920+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52921+ // only allow 512KB for argv+env on suid/sgid binaries
52922+ // to prevent easy ASLR exhaustion
52923+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
52924+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
52925+ (size > (512 * 1024))) {
52926+ put_page(page);
52927+ return NULL;
52928+ }
52929+#endif
52930+
52931 /*
52932 * Limit to 1/4-th the stack size for the argv+env strings.
52933 * This ensures that:
52934@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
52935 vma->vm_end = STACK_TOP_MAX;
52936 vma->vm_start = vma->vm_end - PAGE_SIZE;
52937 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
52938+
52939+#ifdef CONFIG_PAX_SEGMEXEC
52940+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
52941+#endif
52942+
52943 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
52944 INIT_LIST_HEAD(&vma->anon_vma_chain);
52945
52946@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
52947 mm->stack_vm = mm->total_vm = 1;
52948 up_write(&mm->mmap_sem);
52949 bprm->p = vma->vm_end - sizeof(void *);
52950+
52951+#ifdef CONFIG_PAX_RANDUSTACK
52952+ if (randomize_va_space)
52953+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
52954+#endif
52955+
52956 return 0;
52957 err:
52958 up_write(&mm->mmap_sem);
52959@@ -396,7 +437,7 @@ struct user_arg_ptr {
52960 } ptr;
52961 };
52962
52963-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
52964+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
52965 {
52966 const char __user *native;
52967
52968@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
52969 compat_uptr_t compat;
52970
52971 if (get_user(compat, argv.ptr.compat + nr))
52972- return ERR_PTR(-EFAULT);
52973+ return (const char __force_user *)ERR_PTR(-EFAULT);
52974
52975 return compat_ptr(compat);
52976 }
52977 #endif
52978
52979 if (get_user(native, argv.ptr.native + nr))
52980- return ERR_PTR(-EFAULT);
52981+ return (const char __force_user *)ERR_PTR(-EFAULT);
52982
52983 return native;
52984 }
52985@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
52986 if (!p)
52987 break;
52988
52989- if (IS_ERR(p))
52990+ if (IS_ERR((const char __force_kernel *)p))
52991 return -EFAULT;
52992
52993 if (i >= max)
52994@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
52995
52996 ret = -EFAULT;
52997 str = get_user_arg_ptr(argv, argc);
52998- if (IS_ERR(str))
52999+ if (IS_ERR((const char __force_kernel *)str))
53000 goto out;
53001
53002 len = strnlen_user(str, MAX_ARG_STRLEN);
53003@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
53004 int r;
53005 mm_segment_t oldfs = get_fs();
53006 struct user_arg_ptr argv = {
53007- .ptr.native = (const char __user *const __user *)__argv,
53008+ .ptr.native = (const char __force_user * const __force_user *)__argv,
53009 };
53010
53011 set_fs(KERNEL_DS);
53012@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
53013 unsigned long new_end = old_end - shift;
53014 struct mmu_gather tlb;
53015
53016- BUG_ON(new_start > new_end);
53017+ if (new_start >= new_end || new_start < mmap_min_addr)
53018+ return -ENOMEM;
53019
53020 /*
53021 * ensure there are no vmas between where we want to go
53022@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
53023 if (vma != find_vma(mm, new_start))
53024 return -EFAULT;
53025
53026+#ifdef CONFIG_PAX_SEGMEXEC
53027+ BUG_ON(pax_find_mirror_vma(vma));
53028+#endif
53029+
53030 /*
53031 * cover the whole range: [new_start, old_end)
53032 */
53033@@ -607,7 +653,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
53034 return -ENOMEM;
53035
53036 lru_add_drain();
53037- tlb_gather_mmu(&tlb, mm, 0);
53038+ tlb_gather_mmu(&tlb, mm, old_start, old_end);
53039 if (new_end > old_start) {
53040 /*
53041 * when the old and new regions overlap clear from new_end.
53042@@ -624,7 +670,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
53043 free_pgd_range(&tlb, old_start, old_end, new_end,
53044 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
53045 }
53046- tlb_finish_mmu(&tlb, new_end, old_end);
53047+ tlb_finish_mmu(&tlb, old_start, old_end);
53048
53049 /*
53050 * Shrink the vma to just the new range. Always succeeds.
53051@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
53052 stack_top = arch_align_stack(stack_top);
53053 stack_top = PAGE_ALIGN(stack_top);
53054
53055- if (unlikely(stack_top < mmap_min_addr) ||
53056- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
53057- return -ENOMEM;
53058-
53059 stack_shift = vma->vm_end - stack_top;
53060
53061 bprm->p -= stack_shift;
53062@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
53063 bprm->exec -= stack_shift;
53064
53065 down_write(&mm->mmap_sem);
53066+
53067+ /* Move stack pages down in memory. */
53068+ if (stack_shift) {
53069+ ret = shift_arg_pages(vma, stack_shift);
53070+ if (ret)
53071+ goto out_unlock;
53072+ }
53073+
53074 vm_flags = VM_STACK_FLAGS;
53075
53076+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
53077+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
53078+ vm_flags &= ~VM_EXEC;
53079+
53080+#ifdef CONFIG_PAX_MPROTECT
53081+ if (mm->pax_flags & MF_PAX_MPROTECT)
53082+ vm_flags &= ~VM_MAYEXEC;
53083+#endif
53084+
53085+ }
53086+#endif
53087+
53088 /*
53089 * Adjust stack execute permissions; explicitly enable for
53090 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
53091@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
53092 goto out_unlock;
53093 BUG_ON(prev != vma);
53094
53095- /* Move stack pages down in memory. */
53096- if (stack_shift) {
53097- ret = shift_arg_pages(vma, stack_shift);
53098- if (ret)
53099- goto out_unlock;
53100- }
53101-
53102 /* mprotect_fixup is overkill to remove the temporary stack flags */
53103 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
53104
53105@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
53106 #endif
53107 current->mm->start_stack = bprm->p;
53108 ret = expand_stack(vma, stack_base);
53109+
53110+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
53111+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
53112+ unsigned long size;
53113+ vm_flags_t vm_flags;
53114+
53115+ size = STACK_TOP - vma->vm_end;
53116+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
53117+
53118+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
53119+
53120+#ifdef CONFIG_X86
53121+ if (!ret) {
53122+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
53123+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
53124+ }
53125+#endif
53126+
53127+ }
53128+#endif
53129+
53130 if (ret)
53131 ret = -EFAULT;
53132
53133@@ -772,6 +848,8 @@ struct file *open_exec(const char *name)
53134
53135 fsnotify_open(file);
53136
53137+ trace_open_exec(name);
53138+
53139 err = deny_write_access(file);
53140 if (err)
53141 goto exit;
53142@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset,
53143 old_fs = get_fs();
53144 set_fs(get_ds());
53145 /* The cast to a user pointer is valid due to the set_fs() */
53146- result = vfs_read(file, (void __user *)addr, count, &pos);
53147+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
53148 set_fs(old_fs);
53149 return result;
53150 }
53151@@ -1251,7 +1329,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
53152 }
53153 rcu_read_unlock();
53154
53155- if (p->fs->users > n_fs) {
53156+ if (atomic_read(&p->fs->users) > n_fs) {
53157 bprm->unsafe |= LSM_UNSAFE_SHARE;
53158 } else {
53159 res = -EAGAIN;
53160@@ -1451,6 +1529,31 @@ int search_binary_handler(struct linux_binprm *bprm)
53161
53162 EXPORT_SYMBOL(search_binary_handler);
53163
53164+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53165+static DEFINE_PER_CPU(u64, exec_counter);
53166+static int __init init_exec_counters(void)
53167+{
53168+ unsigned int cpu;
53169+
53170+ for_each_possible_cpu(cpu) {
53171+ per_cpu(exec_counter, cpu) = (u64)cpu;
53172+ }
53173+
53174+ return 0;
53175+}
53176+early_initcall(init_exec_counters);
53177+static inline void increment_exec_counter(void)
53178+{
53179+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
53180+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
53181+}
53182+#else
53183+static inline void increment_exec_counter(void) {}
53184+#endif
53185+
53186+extern void gr_handle_exec_args(struct linux_binprm *bprm,
53187+ struct user_arg_ptr argv);
53188+
53189 /*
53190 * sys_execve() executes a new program.
53191 */
53192@@ -1458,6 +1561,11 @@ static int do_execve_common(const char *filename,
53193 struct user_arg_ptr argv,
53194 struct user_arg_ptr envp)
53195 {
53196+#ifdef CONFIG_GRKERNSEC
53197+ struct file *old_exec_file;
53198+ struct acl_subject_label *old_acl;
53199+ struct rlimit old_rlim[RLIM_NLIMITS];
53200+#endif
53201 struct linux_binprm *bprm;
53202 struct file *file;
53203 struct files_struct *displaced;
53204@@ -1465,6 +1573,8 @@ static int do_execve_common(const char *filename,
53205 int retval;
53206 const struct cred *cred = current_cred();
53207
53208+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
53209+
53210 /*
53211 * We move the actual failure in case of RLIMIT_NPROC excess from
53212 * set*uid() to execve() because too many poorly written programs
53213@@ -1505,12 +1615,22 @@ static int do_execve_common(const char *filename,
53214 if (IS_ERR(file))
53215 goto out_unmark;
53216
53217+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
53218+ retval = -EPERM;
53219+ goto out_file;
53220+ }
53221+
53222 sched_exec();
53223
53224 bprm->file = file;
53225 bprm->filename = filename;
53226 bprm->interp = filename;
53227
53228+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
53229+ retval = -EACCES;
53230+ goto out_file;
53231+ }
53232+
53233 retval = bprm_mm_init(bprm);
53234 if (retval)
53235 goto out_file;
53236@@ -1527,24 +1647,70 @@ static int do_execve_common(const char *filename,
53237 if (retval < 0)
53238 goto out;
53239
53240+#ifdef CONFIG_GRKERNSEC
53241+ old_acl = current->acl;
53242+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
53243+ old_exec_file = current->exec_file;
53244+ get_file(file);
53245+ current->exec_file = file;
53246+#endif
53247+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53248+ /* limit suid stack to 8MB
53249+ * we saved the old limits above and will restore them if this exec fails
53250+ */
53251+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
53252+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
53253+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
53254+#endif
53255+
53256+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
53257+ retval = -EPERM;
53258+ goto out_fail;
53259+ }
53260+
53261+ if (!gr_tpe_allow(file)) {
53262+ retval = -EACCES;
53263+ goto out_fail;
53264+ }
53265+
53266+ if (gr_check_crash_exec(file)) {
53267+ retval = -EACCES;
53268+ goto out_fail;
53269+ }
53270+
53271+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
53272+ bprm->unsafe);
53273+ if (retval < 0)
53274+ goto out_fail;
53275+
53276 retval = copy_strings_kernel(1, &bprm->filename, bprm);
53277 if (retval < 0)
53278- goto out;
53279+ goto out_fail;
53280
53281 bprm->exec = bprm->p;
53282 retval = copy_strings(bprm->envc, envp, bprm);
53283 if (retval < 0)
53284- goto out;
53285+ goto out_fail;
53286
53287 retval = copy_strings(bprm->argc, argv, bprm);
53288 if (retval < 0)
53289- goto out;
53290+ goto out_fail;
53291+
53292+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
53293+
53294+ gr_handle_exec_args(bprm, argv);
53295
53296 retval = search_binary_handler(bprm);
53297 if (retval < 0)
53298- goto out;
53299+ goto out_fail;
53300+#ifdef CONFIG_GRKERNSEC
53301+ if (old_exec_file)
53302+ fput(old_exec_file);
53303+#endif
53304
53305 /* execve succeeded */
53306+
53307+ increment_exec_counter();
53308 current->fs->in_exec = 0;
53309 current->in_execve = 0;
53310 acct_update_integrals(current);
53311@@ -1553,6 +1719,14 @@ static int do_execve_common(const char *filename,
53312 put_files_struct(displaced);
53313 return retval;
53314
53315+out_fail:
53316+#ifdef CONFIG_GRKERNSEC
53317+ current->acl = old_acl;
53318+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
53319+ fput(current->exec_file);
53320+ current->exec_file = old_exec_file;
53321+#endif
53322+
53323 out:
53324 if (bprm->mm) {
53325 acct_arg_size(bprm, 0);
53326@@ -1701,3 +1875,287 @@ asmlinkage long compat_sys_execve(const char __user * filename,
53327 return error;
53328 }
53329 #endif
53330+
53331+int pax_check_flags(unsigned long *flags)
53332+{
53333+ int retval = 0;
53334+
53335+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
53336+ if (*flags & MF_PAX_SEGMEXEC)
53337+ {
53338+ *flags &= ~MF_PAX_SEGMEXEC;
53339+ retval = -EINVAL;
53340+ }
53341+#endif
53342+
53343+ if ((*flags & MF_PAX_PAGEEXEC)
53344+
53345+#ifdef CONFIG_PAX_PAGEEXEC
53346+ && (*flags & MF_PAX_SEGMEXEC)
53347+#endif
53348+
53349+ )
53350+ {
53351+ *flags &= ~MF_PAX_PAGEEXEC;
53352+ retval = -EINVAL;
53353+ }
53354+
53355+ if ((*flags & MF_PAX_MPROTECT)
53356+
53357+#ifdef CONFIG_PAX_MPROTECT
53358+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
53359+#endif
53360+
53361+ )
53362+ {
53363+ *flags &= ~MF_PAX_MPROTECT;
53364+ retval = -EINVAL;
53365+ }
53366+
53367+ if ((*flags & MF_PAX_EMUTRAMP)
53368+
53369+#ifdef CONFIG_PAX_EMUTRAMP
53370+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
53371+#endif
53372+
53373+ )
53374+ {
53375+ *flags &= ~MF_PAX_EMUTRAMP;
53376+ retval = -EINVAL;
53377+ }
53378+
53379+ return retval;
53380+}
53381+
53382+EXPORT_SYMBOL(pax_check_flags);
53383+
53384+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
53385+char *pax_get_path(const struct path *path, char *buf, int buflen)
53386+{
53387+ char *pathname = d_path(path, buf, buflen);
53388+
53389+ if (IS_ERR(pathname))
53390+ goto toolong;
53391+
53392+ pathname = mangle_path(buf, pathname, "\t\n\\");
53393+ if (!pathname)
53394+ goto toolong;
53395+
53396+ *pathname = 0;
53397+ return buf;
53398+
53399+toolong:
53400+ return "<path too long>";
53401+}
53402+EXPORT_SYMBOL(pax_get_path);
53403+
53404+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
53405+{
53406+ struct task_struct *tsk = current;
53407+ struct mm_struct *mm = current->mm;
53408+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
53409+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
53410+ char *path_exec = NULL;
53411+ char *path_fault = NULL;
53412+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
53413+ siginfo_t info = { };
53414+
53415+ if (buffer_exec && buffer_fault) {
53416+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
53417+
53418+ down_read(&mm->mmap_sem);
53419+ vma = mm->mmap;
53420+ while (vma && (!vma_exec || !vma_fault)) {
53421+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
53422+ vma_exec = vma;
53423+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
53424+ vma_fault = vma;
53425+ vma = vma->vm_next;
53426+ }
53427+ if (vma_exec)
53428+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
53429+ if (vma_fault) {
53430+ start = vma_fault->vm_start;
53431+ end = vma_fault->vm_end;
53432+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
53433+ if (vma_fault->vm_file)
53434+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
53435+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
53436+ path_fault = "<heap>";
53437+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
53438+ path_fault = "<stack>";
53439+ else
53440+ path_fault = "<anonymous mapping>";
53441+ }
53442+ up_read(&mm->mmap_sem);
53443+ }
53444+ if (tsk->signal->curr_ip)
53445+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
53446+ else
53447+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
53448+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
53449+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
53450+ free_page((unsigned long)buffer_exec);
53451+ free_page((unsigned long)buffer_fault);
53452+ pax_report_insns(regs, pc, sp);
53453+ info.si_signo = SIGKILL;
53454+ info.si_errno = 0;
53455+ info.si_code = SI_KERNEL;
53456+ info.si_pid = 0;
53457+ info.si_uid = 0;
53458+ do_coredump(&info);
53459+}
53460+#endif
53461+
53462+#ifdef CONFIG_PAX_REFCOUNT
53463+void pax_report_refcount_overflow(struct pt_regs *regs)
53464+{
53465+ if (current->signal->curr_ip)
53466+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
53467+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
53468+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
53469+ else
53470+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
53471+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
53472+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
53473+ preempt_disable();
53474+ show_regs(regs);
53475+ preempt_enable();
53476+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
53477+}
53478+#endif
53479+
53480+#ifdef CONFIG_PAX_USERCOPY
53481+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
53482+static noinline int check_stack_object(const void *obj, unsigned long len)
53483+{
53484+ const void * const stack = task_stack_page(current);
53485+ const void * const stackend = stack + THREAD_SIZE;
53486+
53487+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
53488+ const void *frame = NULL;
53489+ const void *oldframe;
53490+#endif
53491+
53492+ if (obj + len < obj)
53493+ return -1;
53494+
53495+ if (obj + len <= stack || stackend <= obj)
53496+ return 0;
53497+
53498+ if (obj < stack || stackend < obj + len)
53499+ return -1;
53500+
53501+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
53502+ oldframe = __builtin_frame_address(1);
53503+ if (oldframe)
53504+ frame = __builtin_frame_address(2);
53505+ /*
53506+ low ----------------------------------------------> high
53507+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
53508+ ^----------------^
53509+ allow copies only within here
53510+ */
53511+ while (stack <= frame && frame < stackend) {
53512+ /* if obj + len extends past the last frame, this
53513+ check won't pass and the next frame will be 0,
53514+ causing us to bail out and correctly report
53515+ the copy as invalid
53516+ */
53517+ if (obj + len <= frame)
53518+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
53519+ oldframe = frame;
53520+ frame = *(const void * const *)frame;
53521+ }
53522+ return -1;
53523+#else
53524+ return 1;
53525+#endif
53526+}
53527+
53528+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
53529+{
53530+ if (current->signal->curr_ip)
53531+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
53532+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
53533+ else
53534+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
53535+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
53536+ dump_stack();
53537+ gr_handle_kernel_exploit();
53538+ do_group_exit(SIGKILL);
53539+}
53540+#endif
53541+
53542+#ifdef CONFIG_PAX_USERCOPY
53543+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
53544+{
53545+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
53546+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
53547+#ifdef CONFIG_MODULES
53548+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
53549+#else
53550+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
53551+#endif
53552+
53553+#else
53554+ unsigned long textlow = (unsigned long)_stext;
53555+ unsigned long texthigh = (unsigned long)_etext;
53556+#endif
53557+
53558+ if (high <= textlow || low > texthigh)
53559+ return false;
53560+ else
53561+ return true;
53562+}
53563+#endif
53564+
53565+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
53566+{
53567+
53568+#ifdef CONFIG_PAX_USERCOPY
53569+ const char *type;
53570+
53571+ if (!n)
53572+ return;
53573+
53574+ type = check_heap_object(ptr, n);
53575+ if (!type) {
53576+ int ret = check_stack_object(ptr, n);
53577+ if (ret == 1 || ret == 2)
53578+ return;
53579+ if (ret == 0) {
53580+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
53581+ type = "<kernel text>";
53582+ else
53583+ return;
53584+ } else
53585+ type = "<process stack>";
53586+ }
53587+
53588+ pax_report_usercopy(ptr, n, to_user, type);
53589+#endif
53590+
53591+}
53592+EXPORT_SYMBOL(__check_object_size);
53593+
53594+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
53595+void pax_track_stack(void)
53596+{
53597+ unsigned long sp = (unsigned long)&sp;
53598+ if (sp < current_thread_info()->lowest_stack &&
53599+ sp > (unsigned long)task_stack_page(current))
53600+ current_thread_info()->lowest_stack = sp;
53601+}
53602+EXPORT_SYMBOL(pax_track_stack);
53603+#endif
53604+
53605+#ifdef CONFIG_PAX_SIZE_OVERFLOW
53606+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
53607+{
53608+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
53609+ dump_stack();
53610+ do_group_exit(SIGKILL);
53611+}
53612+EXPORT_SYMBOL(report_size_overflow);
53613+#endif
53614diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
53615index 9f9992b..8b59411 100644
53616--- a/fs/ext2/balloc.c
53617+++ b/fs/ext2/balloc.c
53618@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
53619
53620 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
53621 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
53622- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
53623+ if (free_blocks < root_blocks + 1 &&
53624 !uid_eq(sbi->s_resuid, current_fsuid()) &&
53625 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
53626- !in_group_p (sbi->s_resgid))) {
53627+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
53628 return 0;
53629 }
53630 return 1;
53631diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
53632index 22548f5..41521d8 100644
53633--- a/fs/ext3/balloc.c
53634+++ b/fs/ext3/balloc.c
53635@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
53636
53637 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
53638 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
53639- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
53640+ if (free_blocks < root_blocks + 1 &&
53641 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
53642 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
53643- !in_group_p (sbi->s_resgid))) {
53644+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
53645 return 0;
53646 }
53647 return 1;
53648diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
53649index 3742e4c..69a797f 100644
53650--- a/fs/ext4/balloc.c
53651+++ b/fs/ext4/balloc.c
53652@@ -528,8 +528,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
53653 /* Hm, nope. Are (enough) root reserved clusters available? */
53654 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
53655 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
53656- capable(CAP_SYS_RESOURCE) ||
53657- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
53658+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
53659+ capable_nolog(CAP_SYS_RESOURCE)) {
53660
53661 if (free_clusters >= (nclusters + dirty_clusters +
53662 resv_clusters))
53663diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
53664index 5aae3d1..b5da7f8 100644
53665--- a/fs/ext4/ext4.h
53666+++ b/fs/ext4/ext4.h
53667@@ -1252,19 +1252,19 @@ struct ext4_sb_info {
53668 unsigned long s_mb_last_start;
53669
53670 /* stats for buddy allocator */
53671- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
53672- atomic_t s_bal_success; /* we found long enough chunks */
53673- atomic_t s_bal_allocated; /* in blocks */
53674- atomic_t s_bal_ex_scanned; /* total extents scanned */
53675- atomic_t s_bal_goals; /* goal hits */
53676- atomic_t s_bal_breaks; /* too long searches */
53677- atomic_t s_bal_2orders; /* 2^order hits */
53678+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
53679+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
53680+ atomic_unchecked_t s_bal_allocated; /* in blocks */
53681+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
53682+ atomic_unchecked_t s_bal_goals; /* goal hits */
53683+ atomic_unchecked_t s_bal_breaks; /* too long searches */
53684+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
53685 spinlock_t s_bal_lock;
53686 unsigned long s_mb_buddies_generated;
53687 unsigned long long s_mb_generation_time;
53688- atomic_t s_mb_lost_chunks;
53689- atomic_t s_mb_preallocated;
53690- atomic_t s_mb_discarded;
53691+ atomic_unchecked_t s_mb_lost_chunks;
53692+ atomic_unchecked_t s_mb_preallocated;
53693+ atomic_unchecked_t s_mb_discarded;
53694 atomic_t s_lock_busy;
53695
53696 /* locality groups */
53697diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
53698index 59c6750..a549154 100644
53699--- a/fs/ext4/mballoc.c
53700+++ b/fs/ext4/mballoc.c
53701@@ -1865,7 +1865,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
53702 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
53703
53704 if (EXT4_SB(sb)->s_mb_stats)
53705- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
53706+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
53707
53708 break;
53709 }
53710@@ -2170,7 +2170,7 @@ repeat:
53711 ac->ac_status = AC_STATUS_CONTINUE;
53712 ac->ac_flags |= EXT4_MB_HINT_FIRST;
53713 cr = 3;
53714- atomic_inc(&sbi->s_mb_lost_chunks);
53715+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
53716 goto repeat;
53717 }
53718 }
53719@@ -2678,25 +2678,25 @@ int ext4_mb_release(struct super_block *sb)
53720 if (sbi->s_mb_stats) {
53721 ext4_msg(sb, KERN_INFO,
53722 "mballoc: %u blocks %u reqs (%u success)",
53723- atomic_read(&sbi->s_bal_allocated),
53724- atomic_read(&sbi->s_bal_reqs),
53725- atomic_read(&sbi->s_bal_success));
53726+ atomic_read_unchecked(&sbi->s_bal_allocated),
53727+ atomic_read_unchecked(&sbi->s_bal_reqs),
53728+ atomic_read_unchecked(&sbi->s_bal_success));
53729 ext4_msg(sb, KERN_INFO,
53730 "mballoc: %u extents scanned, %u goal hits, "
53731 "%u 2^N hits, %u breaks, %u lost",
53732- atomic_read(&sbi->s_bal_ex_scanned),
53733- atomic_read(&sbi->s_bal_goals),
53734- atomic_read(&sbi->s_bal_2orders),
53735- atomic_read(&sbi->s_bal_breaks),
53736- atomic_read(&sbi->s_mb_lost_chunks));
53737+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
53738+ atomic_read_unchecked(&sbi->s_bal_goals),
53739+ atomic_read_unchecked(&sbi->s_bal_2orders),
53740+ atomic_read_unchecked(&sbi->s_bal_breaks),
53741+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
53742 ext4_msg(sb, KERN_INFO,
53743 "mballoc: %lu generated and it took %Lu",
53744 sbi->s_mb_buddies_generated,
53745 sbi->s_mb_generation_time);
53746 ext4_msg(sb, KERN_INFO,
53747 "mballoc: %u preallocated, %u discarded",
53748- atomic_read(&sbi->s_mb_preallocated),
53749- atomic_read(&sbi->s_mb_discarded));
53750+ atomic_read_unchecked(&sbi->s_mb_preallocated),
53751+ atomic_read_unchecked(&sbi->s_mb_discarded));
53752 }
53753
53754 free_percpu(sbi->s_locality_groups);
53755@@ -3150,16 +3150,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
53756 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
53757
53758 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
53759- atomic_inc(&sbi->s_bal_reqs);
53760- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
53761+ atomic_inc_unchecked(&sbi->s_bal_reqs);
53762+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
53763 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
53764- atomic_inc(&sbi->s_bal_success);
53765- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
53766+ atomic_inc_unchecked(&sbi->s_bal_success);
53767+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
53768 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
53769 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
53770- atomic_inc(&sbi->s_bal_goals);
53771+ atomic_inc_unchecked(&sbi->s_bal_goals);
53772 if (ac->ac_found > sbi->s_mb_max_to_scan)
53773- atomic_inc(&sbi->s_bal_breaks);
53774+ atomic_inc_unchecked(&sbi->s_bal_breaks);
53775 }
53776
53777 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
53778@@ -3559,7 +3559,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
53779 trace_ext4_mb_new_inode_pa(ac, pa);
53780
53781 ext4_mb_use_inode_pa(ac, pa);
53782- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
53783+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
53784
53785 ei = EXT4_I(ac->ac_inode);
53786 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
53787@@ -3619,7 +3619,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
53788 trace_ext4_mb_new_group_pa(ac, pa);
53789
53790 ext4_mb_use_group_pa(ac, pa);
53791- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
53792+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
53793
53794 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
53795 lg = ac->ac_lg;
53796@@ -3708,7 +3708,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
53797 * from the bitmap and continue.
53798 */
53799 }
53800- atomic_add(free, &sbi->s_mb_discarded);
53801+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
53802
53803 return err;
53804 }
53805@@ -3726,7 +3726,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
53806 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
53807 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
53808 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
53809- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
53810+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
53811 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
53812
53813 return 0;
53814diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
53815index 214461e..3614c89 100644
53816--- a/fs/ext4/mmp.c
53817+++ b/fs/ext4/mmp.c
53818@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
53819 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
53820 const char *function, unsigned int line, const char *msg)
53821 {
53822- __ext4_warning(sb, function, line, msg);
53823+ __ext4_warning(sb, function, line, "%s", msg);
53824 __ext4_warning(sb, function, line,
53825 "MMP failure info: last update time: %llu, last update "
53826 "node: %s, last update device: %s\n",
53827diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
53828index 49d3c01..9579efd 100644
53829--- a/fs/ext4/resize.c
53830+++ b/fs/ext4/resize.c
53831@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
53832 ext4_fsblk_t end = start + input->blocks_count;
53833 ext4_group_t group = input->group;
53834 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
53835- unsigned overhead = ext4_group_overhead_blocks(sb, group);
53836- ext4_fsblk_t metaend = start + overhead;
53837+ unsigned overhead;
53838+ ext4_fsblk_t metaend;
53839 struct buffer_head *bh = NULL;
53840 ext4_grpblk_t free_blocks_count, offset;
53841 int err = -EINVAL;
53842
53843+ if (group != sbi->s_groups_count) {
53844+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
53845+ input->group, sbi->s_groups_count);
53846+ return -EINVAL;
53847+ }
53848+
53849+ overhead = ext4_group_overhead_blocks(sb, group);
53850+ metaend = start + overhead;
53851 input->free_blocks_count = free_blocks_count =
53852 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
53853
53854@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
53855 free_blocks_count, input->reserved_blocks);
53856
53857 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
53858- if (group != sbi->s_groups_count)
53859- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
53860- input->group, sbi->s_groups_count);
53861- else if (offset != 0)
53862+ if (offset != 0)
53863 ext4_warning(sb, "Last group not full");
53864 else if (input->reserved_blocks > input->blocks_count / 5)
53865 ext4_warning(sb, "Reserved blocks too high (%u)",
53866diff --git a/fs/ext4/super.c b/fs/ext4/super.c
53867index 3f7c39e..227f24f 100644
53868--- a/fs/ext4/super.c
53869+++ b/fs/ext4/super.c
53870@@ -1236,7 +1236,7 @@ static ext4_fsblk_t get_sb_block(void **data)
53871 }
53872
53873 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
53874-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
53875+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
53876 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
53877
53878 #ifdef CONFIG_QUOTA
53879@@ -2372,7 +2372,7 @@ struct ext4_attr {
53880 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
53881 const char *, size_t);
53882 int offset;
53883-};
53884+} __do_const;
53885
53886 static int parse_strtoull(const char *buf,
53887 unsigned long long max, unsigned long long *value)
53888diff --git a/fs/fcntl.c b/fs/fcntl.c
53889index 6599222..e7bf0de 100644
53890--- a/fs/fcntl.c
53891+++ b/fs/fcntl.c
53892@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
53893 if (err)
53894 return err;
53895
53896+ if (gr_handle_chroot_fowner(pid, type))
53897+ return -ENOENT;
53898+ if (gr_check_protected_task_fowner(pid, type))
53899+ return -EACCES;
53900+
53901 f_modown(filp, pid, type, force);
53902 return 0;
53903 }
53904diff --git a/fs/fhandle.c b/fs/fhandle.c
53905index 999ff5c..41f4109 100644
53906--- a/fs/fhandle.c
53907+++ b/fs/fhandle.c
53908@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
53909 } else
53910 retval = 0;
53911 /* copy the mount id */
53912- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
53913- sizeof(*mnt_id)) ||
53914+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
53915 copy_to_user(ufh, handle,
53916 sizeof(struct file_handle) + handle_bytes))
53917 retval = -EFAULT;
53918diff --git a/fs/file.c b/fs/file.c
53919index 4a78f98..9447397 100644
53920--- a/fs/file.c
53921+++ b/fs/file.c
53922@@ -16,6 +16,7 @@
53923 #include <linux/slab.h>
53924 #include <linux/vmalloc.h>
53925 #include <linux/file.h>
53926+#include <linux/security.h>
53927 #include <linux/fdtable.h>
53928 #include <linux/bitops.h>
53929 #include <linux/interrupt.h>
53930@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
53931 if (!file)
53932 return __close_fd(files, fd);
53933
53934+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
53935 if (fd >= rlimit(RLIMIT_NOFILE))
53936 return -EBADF;
53937
53938@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
53939 if (unlikely(oldfd == newfd))
53940 return -EINVAL;
53941
53942+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
53943 if (newfd >= rlimit(RLIMIT_NOFILE))
53944 return -EBADF;
53945
53946@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
53947 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
53948 {
53949 int err;
53950+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
53951 if (from >= rlimit(RLIMIT_NOFILE))
53952 return -EINVAL;
53953 err = alloc_fd(from, flags);
53954diff --git a/fs/filesystems.c b/fs/filesystems.c
53955index 92567d9..fcd8cbf 100644
53956--- a/fs/filesystems.c
53957+++ b/fs/filesystems.c
53958@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
53959 int len = dot ? dot - name : strlen(name);
53960
53961 fs = __get_fs_type(name, len);
53962+#ifdef CONFIG_GRKERNSEC_MODHARDEN
53963+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
53964+#else
53965 if (!fs && (request_module("fs-%.*s", len, name) == 0))
53966+#endif
53967 fs = __get_fs_type(name, len);
53968
53969 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
53970diff --git a/fs/fs_struct.c b/fs/fs_struct.c
53971index d8ac61d..79a36f0 100644
53972--- a/fs/fs_struct.c
53973+++ b/fs/fs_struct.c
53974@@ -4,6 +4,7 @@
53975 #include <linux/path.h>
53976 #include <linux/slab.h>
53977 #include <linux/fs_struct.h>
53978+#include <linux/grsecurity.h>
53979 #include "internal.h"
53980
53981 /*
53982@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
53983 write_seqcount_begin(&fs->seq);
53984 old_root = fs->root;
53985 fs->root = *path;
53986+ gr_set_chroot_entries(current, path);
53987 write_seqcount_end(&fs->seq);
53988 spin_unlock(&fs->lock);
53989 if (old_root.dentry)
53990@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
53991 int hits = 0;
53992 spin_lock(&fs->lock);
53993 write_seqcount_begin(&fs->seq);
53994+ /* this root replacement is only done by pivot_root,
53995+ leave grsec's chroot tagging alone for this task
53996+ so that a pivoted root isn't treated as a chroot
53997+ */
53998 hits += replace_path(&fs->root, old_root, new_root);
53999 hits += replace_path(&fs->pwd, old_root, new_root);
54000 write_seqcount_end(&fs->seq);
54001@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
54002 task_lock(tsk);
54003 spin_lock(&fs->lock);
54004 tsk->fs = NULL;
54005- kill = !--fs->users;
54006+ gr_clear_chroot_entries(tsk);
54007+ kill = !atomic_dec_return(&fs->users);
54008 spin_unlock(&fs->lock);
54009 task_unlock(tsk);
54010 if (kill)
54011@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
54012 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
54013 /* We don't need to lock fs - think why ;-) */
54014 if (fs) {
54015- fs->users = 1;
54016+ atomic_set(&fs->users, 1);
54017 fs->in_exec = 0;
54018 spin_lock_init(&fs->lock);
54019 seqcount_init(&fs->seq);
54020@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
54021 spin_lock(&old->lock);
54022 fs->root = old->root;
54023 path_get(&fs->root);
54024+ /* instead of calling gr_set_chroot_entries here,
54025+ we call it from every caller of this function
54026+ */
54027 fs->pwd = old->pwd;
54028 path_get(&fs->pwd);
54029 spin_unlock(&old->lock);
54030@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
54031
54032 task_lock(current);
54033 spin_lock(&fs->lock);
54034- kill = !--fs->users;
54035+ kill = !atomic_dec_return(&fs->users);
54036 current->fs = new_fs;
54037+ gr_set_chroot_entries(current, &new_fs->root);
54038 spin_unlock(&fs->lock);
54039 task_unlock(current);
54040
54041@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
54042
54043 int current_umask(void)
54044 {
54045- return current->fs->umask;
54046+ return current->fs->umask | gr_acl_umask();
54047 }
54048 EXPORT_SYMBOL(current_umask);
54049
54050 /* to be mentioned only in INIT_TASK */
54051 struct fs_struct init_fs = {
54052- .users = 1,
54053+ .users = ATOMIC_INIT(1),
54054 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
54055 .seq = SEQCNT_ZERO,
54056 .umask = 0022,
54057diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
54058index e2cba1f..17a25bb 100644
54059--- a/fs/fscache/cookie.c
54060+++ b/fs/fscache/cookie.c
54061@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
54062 parent ? (char *) parent->def->name : "<no-parent>",
54063 def->name, netfs_data);
54064
54065- fscache_stat(&fscache_n_acquires);
54066+ fscache_stat_unchecked(&fscache_n_acquires);
54067
54068 /* if there's no parent cookie, then we don't create one here either */
54069 if (!parent) {
54070- fscache_stat(&fscache_n_acquires_null);
54071+ fscache_stat_unchecked(&fscache_n_acquires_null);
54072 _leave(" [no parent]");
54073 return NULL;
54074 }
54075@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
54076 /* allocate and initialise a cookie */
54077 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
54078 if (!cookie) {
54079- fscache_stat(&fscache_n_acquires_oom);
54080+ fscache_stat_unchecked(&fscache_n_acquires_oom);
54081 _leave(" [ENOMEM]");
54082 return NULL;
54083 }
54084@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
54085
54086 switch (cookie->def->type) {
54087 case FSCACHE_COOKIE_TYPE_INDEX:
54088- fscache_stat(&fscache_n_cookie_index);
54089+ fscache_stat_unchecked(&fscache_n_cookie_index);
54090 break;
54091 case FSCACHE_COOKIE_TYPE_DATAFILE:
54092- fscache_stat(&fscache_n_cookie_data);
54093+ fscache_stat_unchecked(&fscache_n_cookie_data);
54094 break;
54095 default:
54096- fscache_stat(&fscache_n_cookie_special);
54097+ fscache_stat_unchecked(&fscache_n_cookie_special);
54098 break;
54099 }
54100
54101@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
54102 if (fscache_acquire_non_index_cookie(cookie) < 0) {
54103 atomic_dec(&parent->n_children);
54104 __fscache_cookie_put(cookie);
54105- fscache_stat(&fscache_n_acquires_nobufs);
54106+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
54107 _leave(" = NULL");
54108 return NULL;
54109 }
54110 }
54111
54112- fscache_stat(&fscache_n_acquires_ok);
54113+ fscache_stat_unchecked(&fscache_n_acquires_ok);
54114 _leave(" = %p", cookie);
54115 return cookie;
54116 }
54117@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
54118 cache = fscache_select_cache_for_object(cookie->parent);
54119 if (!cache) {
54120 up_read(&fscache_addremove_sem);
54121- fscache_stat(&fscache_n_acquires_no_cache);
54122+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
54123 _leave(" = -ENOMEDIUM [no cache]");
54124 return -ENOMEDIUM;
54125 }
54126@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
54127 object = cache->ops->alloc_object(cache, cookie);
54128 fscache_stat_d(&fscache_n_cop_alloc_object);
54129 if (IS_ERR(object)) {
54130- fscache_stat(&fscache_n_object_no_alloc);
54131+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
54132 ret = PTR_ERR(object);
54133 goto error;
54134 }
54135
54136- fscache_stat(&fscache_n_object_alloc);
54137+ fscache_stat_unchecked(&fscache_n_object_alloc);
54138
54139 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
54140
54141@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
54142
54143 _enter("{%s}", cookie->def->name);
54144
54145- fscache_stat(&fscache_n_invalidates);
54146+ fscache_stat_unchecked(&fscache_n_invalidates);
54147
54148 /* Only permit invalidation of data files. Invalidating an index will
54149 * require the caller to release all its attachments to the tree rooted
54150@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
54151 {
54152 struct fscache_object *object;
54153
54154- fscache_stat(&fscache_n_updates);
54155+ fscache_stat_unchecked(&fscache_n_updates);
54156
54157 if (!cookie) {
54158- fscache_stat(&fscache_n_updates_null);
54159+ fscache_stat_unchecked(&fscache_n_updates_null);
54160 _leave(" [no cookie]");
54161 return;
54162 }
54163@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
54164 struct fscache_object *object;
54165 unsigned long event;
54166
54167- fscache_stat(&fscache_n_relinquishes);
54168+ fscache_stat_unchecked(&fscache_n_relinquishes);
54169 if (retire)
54170- fscache_stat(&fscache_n_relinquishes_retire);
54171+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
54172
54173 if (!cookie) {
54174- fscache_stat(&fscache_n_relinquishes_null);
54175+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
54176 _leave(" [no cookie]");
54177 return;
54178 }
54179@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
54180
54181 /* wait for the cookie to finish being instantiated (or to fail) */
54182 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
54183- fscache_stat(&fscache_n_relinquishes_waitcrt);
54184+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
54185 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
54186 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
54187 }
54188diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
54189index ee38fef..0a326d4 100644
54190--- a/fs/fscache/internal.h
54191+++ b/fs/fscache/internal.h
54192@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
54193 * stats.c
54194 */
54195 #ifdef CONFIG_FSCACHE_STATS
54196-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
54197-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
54198+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
54199+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
54200
54201-extern atomic_t fscache_n_op_pend;
54202-extern atomic_t fscache_n_op_run;
54203-extern atomic_t fscache_n_op_enqueue;
54204-extern atomic_t fscache_n_op_deferred_release;
54205-extern atomic_t fscache_n_op_release;
54206-extern atomic_t fscache_n_op_gc;
54207-extern atomic_t fscache_n_op_cancelled;
54208-extern atomic_t fscache_n_op_rejected;
54209+extern atomic_unchecked_t fscache_n_op_pend;
54210+extern atomic_unchecked_t fscache_n_op_run;
54211+extern atomic_unchecked_t fscache_n_op_enqueue;
54212+extern atomic_unchecked_t fscache_n_op_deferred_release;
54213+extern atomic_unchecked_t fscache_n_op_release;
54214+extern atomic_unchecked_t fscache_n_op_gc;
54215+extern atomic_unchecked_t fscache_n_op_cancelled;
54216+extern atomic_unchecked_t fscache_n_op_rejected;
54217
54218-extern atomic_t fscache_n_attr_changed;
54219-extern atomic_t fscache_n_attr_changed_ok;
54220-extern atomic_t fscache_n_attr_changed_nobufs;
54221-extern atomic_t fscache_n_attr_changed_nomem;
54222-extern atomic_t fscache_n_attr_changed_calls;
54223+extern atomic_unchecked_t fscache_n_attr_changed;
54224+extern atomic_unchecked_t fscache_n_attr_changed_ok;
54225+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
54226+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
54227+extern atomic_unchecked_t fscache_n_attr_changed_calls;
54228
54229-extern atomic_t fscache_n_allocs;
54230-extern atomic_t fscache_n_allocs_ok;
54231-extern atomic_t fscache_n_allocs_wait;
54232-extern atomic_t fscache_n_allocs_nobufs;
54233-extern atomic_t fscache_n_allocs_intr;
54234-extern atomic_t fscache_n_allocs_object_dead;
54235-extern atomic_t fscache_n_alloc_ops;
54236-extern atomic_t fscache_n_alloc_op_waits;
54237+extern atomic_unchecked_t fscache_n_allocs;
54238+extern atomic_unchecked_t fscache_n_allocs_ok;
54239+extern atomic_unchecked_t fscache_n_allocs_wait;
54240+extern atomic_unchecked_t fscache_n_allocs_nobufs;
54241+extern atomic_unchecked_t fscache_n_allocs_intr;
54242+extern atomic_unchecked_t fscache_n_allocs_object_dead;
54243+extern atomic_unchecked_t fscache_n_alloc_ops;
54244+extern atomic_unchecked_t fscache_n_alloc_op_waits;
54245
54246-extern atomic_t fscache_n_retrievals;
54247-extern atomic_t fscache_n_retrievals_ok;
54248-extern atomic_t fscache_n_retrievals_wait;
54249-extern atomic_t fscache_n_retrievals_nodata;
54250-extern atomic_t fscache_n_retrievals_nobufs;
54251-extern atomic_t fscache_n_retrievals_intr;
54252-extern atomic_t fscache_n_retrievals_nomem;
54253-extern atomic_t fscache_n_retrievals_object_dead;
54254-extern atomic_t fscache_n_retrieval_ops;
54255-extern atomic_t fscache_n_retrieval_op_waits;
54256+extern atomic_unchecked_t fscache_n_retrievals;
54257+extern atomic_unchecked_t fscache_n_retrievals_ok;
54258+extern atomic_unchecked_t fscache_n_retrievals_wait;
54259+extern atomic_unchecked_t fscache_n_retrievals_nodata;
54260+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
54261+extern atomic_unchecked_t fscache_n_retrievals_intr;
54262+extern atomic_unchecked_t fscache_n_retrievals_nomem;
54263+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
54264+extern atomic_unchecked_t fscache_n_retrieval_ops;
54265+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
54266
54267-extern atomic_t fscache_n_stores;
54268-extern atomic_t fscache_n_stores_ok;
54269-extern atomic_t fscache_n_stores_again;
54270-extern atomic_t fscache_n_stores_nobufs;
54271-extern atomic_t fscache_n_stores_oom;
54272-extern atomic_t fscache_n_store_ops;
54273-extern atomic_t fscache_n_store_calls;
54274-extern atomic_t fscache_n_store_pages;
54275-extern atomic_t fscache_n_store_radix_deletes;
54276-extern atomic_t fscache_n_store_pages_over_limit;
54277+extern atomic_unchecked_t fscache_n_stores;
54278+extern atomic_unchecked_t fscache_n_stores_ok;
54279+extern atomic_unchecked_t fscache_n_stores_again;
54280+extern atomic_unchecked_t fscache_n_stores_nobufs;
54281+extern atomic_unchecked_t fscache_n_stores_oom;
54282+extern atomic_unchecked_t fscache_n_store_ops;
54283+extern atomic_unchecked_t fscache_n_store_calls;
54284+extern atomic_unchecked_t fscache_n_store_pages;
54285+extern atomic_unchecked_t fscache_n_store_radix_deletes;
54286+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
54287
54288-extern atomic_t fscache_n_store_vmscan_not_storing;
54289-extern atomic_t fscache_n_store_vmscan_gone;
54290-extern atomic_t fscache_n_store_vmscan_busy;
54291-extern atomic_t fscache_n_store_vmscan_cancelled;
54292-extern atomic_t fscache_n_store_vmscan_wait;
54293+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
54294+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
54295+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
54296+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
54297+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
54298
54299-extern atomic_t fscache_n_marks;
54300-extern atomic_t fscache_n_uncaches;
54301+extern atomic_unchecked_t fscache_n_marks;
54302+extern atomic_unchecked_t fscache_n_uncaches;
54303
54304-extern atomic_t fscache_n_acquires;
54305-extern atomic_t fscache_n_acquires_null;
54306-extern atomic_t fscache_n_acquires_no_cache;
54307-extern atomic_t fscache_n_acquires_ok;
54308-extern atomic_t fscache_n_acquires_nobufs;
54309-extern atomic_t fscache_n_acquires_oom;
54310+extern atomic_unchecked_t fscache_n_acquires;
54311+extern atomic_unchecked_t fscache_n_acquires_null;
54312+extern atomic_unchecked_t fscache_n_acquires_no_cache;
54313+extern atomic_unchecked_t fscache_n_acquires_ok;
54314+extern atomic_unchecked_t fscache_n_acquires_nobufs;
54315+extern atomic_unchecked_t fscache_n_acquires_oom;
54316
54317-extern atomic_t fscache_n_invalidates;
54318-extern atomic_t fscache_n_invalidates_run;
54319+extern atomic_unchecked_t fscache_n_invalidates;
54320+extern atomic_unchecked_t fscache_n_invalidates_run;
54321
54322-extern atomic_t fscache_n_updates;
54323-extern atomic_t fscache_n_updates_null;
54324-extern atomic_t fscache_n_updates_run;
54325+extern atomic_unchecked_t fscache_n_updates;
54326+extern atomic_unchecked_t fscache_n_updates_null;
54327+extern atomic_unchecked_t fscache_n_updates_run;
54328
54329-extern atomic_t fscache_n_relinquishes;
54330-extern atomic_t fscache_n_relinquishes_null;
54331-extern atomic_t fscache_n_relinquishes_waitcrt;
54332-extern atomic_t fscache_n_relinquishes_retire;
54333+extern atomic_unchecked_t fscache_n_relinquishes;
54334+extern atomic_unchecked_t fscache_n_relinquishes_null;
54335+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
54336+extern atomic_unchecked_t fscache_n_relinquishes_retire;
54337
54338-extern atomic_t fscache_n_cookie_index;
54339-extern atomic_t fscache_n_cookie_data;
54340-extern atomic_t fscache_n_cookie_special;
54341+extern atomic_unchecked_t fscache_n_cookie_index;
54342+extern atomic_unchecked_t fscache_n_cookie_data;
54343+extern atomic_unchecked_t fscache_n_cookie_special;
54344
54345-extern atomic_t fscache_n_object_alloc;
54346-extern atomic_t fscache_n_object_no_alloc;
54347-extern atomic_t fscache_n_object_lookups;
54348-extern atomic_t fscache_n_object_lookups_negative;
54349-extern atomic_t fscache_n_object_lookups_positive;
54350-extern atomic_t fscache_n_object_lookups_timed_out;
54351-extern atomic_t fscache_n_object_created;
54352-extern atomic_t fscache_n_object_avail;
54353-extern atomic_t fscache_n_object_dead;
54354+extern atomic_unchecked_t fscache_n_object_alloc;
54355+extern atomic_unchecked_t fscache_n_object_no_alloc;
54356+extern atomic_unchecked_t fscache_n_object_lookups;
54357+extern atomic_unchecked_t fscache_n_object_lookups_negative;
54358+extern atomic_unchecked_t fscache_n_object_lookups_positive;
54359+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
54360+extern atomic_unchecked_t fscache_n_object_created;
54361+extern atomic_unchecked_t fscache_n_object_avail;
54362+extern atomic_unchecked_t fscache_n_object_dead;
54363
54364-extern atomic_t fscache_n_checkaux_none;
54365-extern atomic_t fscache_n_checkaux_okay;
54366-extern atomic_t fscache_n_checkaux_update;
54367-extern atomic_t fscache_n_checkaux_obsolete;
54368+extern atomic_unchecked_t fscache_n_checkaux_none;
54369+extern atomic_unchecked_t fscache_n_checkaux_okay;
54370+extern atomic_unchecked_t fscache_n_checkaux_update;
54371+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
54372
54373 extern atomic_t fscache_n_cop_alloc_object;
54374 extern atomic_t fscache_n_cop_lookup_object;
54375@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
54376 atomic_inc(stat);
54377 }
54378
54379+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
54380+{
54381+ atomic_inc_unchecked(stat);
54382+}
54383+
54384 static inline void fscache_stat_d(atomic_t *stat)
54385 {
54386 atomic_dec(stat);
54387@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
54388
54389 #define __fscache_stat(stat) (NULL)
54390 #define fscache_stat(stat) do {} while (0)
54391+#define fscache_stat_unchecked(stat) do {} while (0)
54392 #define fscache_stat_d(stat) do {} while (0)
54393 #endif
54394
54395diff --git a/fs/fscache/object.c b/fs/fscache/object.c
54396index 50d41c1..10ee117 100644
54397--- a/fs/fscache/object.c
54398+++ b/fs/fscache/object.c
54399@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
54400 /* Invalidate an object on disk */
54401 case FSCACHE_OBJECT_INVALIDATING:
54402 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
54403- fscache_stat(&fscache_n_invalidates_run);
54404+ fscache_stat_unchecked(&fscache_n_invalidates_run);
54405 fscache_stat(&fscache_n_cop_invalidate_object);
54406 fscache_invalidate_object(object);
54407 fscache_stat_d(&fscache_n_cop_invalidate_object);
54408@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
54409 /* update the object metadata on disk */
54410 case FSCACHE_OBJECT_UPDATING:
54411 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
54412- fscache_stat(&fscache_n_updates_run);
54413+ fscache_stat_unchecked(&fscache_n_updates_run);
54414 fscache_stat(&fscache_n_cop_update_object);
54415 object->cache->ops->update_object(object);
54416 fscache_stat_d(&fscache_n_cop_update_object);
54417@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
54418 spin_lock(&object->lock);
54419 object->state = FSCACHE_OBJECT_DEAD;
54420 spin_unlock(&object->lock);
54421- fscache_stat(&fscache_n_object_dead);
54422+ fscache_stat_unchecked(&fscache_n_object_dead);
54423 goto terminal_transit;
54424
54425 /* handle the parent cache of this object being withdrawn from
54426@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
54427 spin_lock(&object->lock);
54428 object->state = FSCACHE_OBJECT_DEAD;
54429 spin_unlock(&object->lock);
54430- fscache_stat(&fscache_n_object_dead);
54431+ fscache_stat_unchecked(&fscache_n_object_dead);
54432 goto terminal_transit;
54433
54434 /* complain about the object being woken up once it is
54435@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
54436 parent->cookie->def->name, cookie->def->name,
54437 object->cache->tag->name);
54438
54439- fscache_stat(&fscache_n_object_lookups);
54440+ fscache_stat_unchecked(&fscache_n_object_lookups);
54441 fscache_stat(&fscache_n_cop_lookup_object);
54442 ret = object->cache->ops->lookup_object(object);
54443 fscache_stat_d(&fscache_n_cop_lookup_object);
54444@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
54445 if (ret == -ETIMEDOUT) {
54446 /* probably stuck behind another object, so move this one to
54447 * the back of the queue */
54448- fscache_stat(&fscache_n_object_lookups_timed_out);
54449+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
54450 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
54451 }
54452
54453@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
54454
54455 spin_lock(&object->lock);
54456 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
54457- fscache_stat(&fscache_n_object_lookups_negative);
54458+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
54459
54460 /* transit here to allow write requests to begin stacking up
54461 * and read requests to begin returning ENODATA */
54462@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
54463 * result, in which case there may be data available */
54464 spin_lock(&object->lock);
54465 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
54466- fscache_stat(&fscache_n_object_lookups_positive);
54467+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
54468
54469 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
54470
54471@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
54472 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
54473 } else {
54474 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
54475- fscache_stat(&fscache_n_object_created);
54476+ fscache_stat_unchecked(&fscache_n_object_created);
54477
54478 object->state = FSCACHE_OBJECT_AVAILABLE;
54479 spin_unlock(&object->lock);
54480@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
54481 fscache_enqueue_dependents(object);
54482
54483 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
54484- fscache_stat(&fscache_n_object_avail);
54485+ fscache_stat_unchecked(&fscache_n_object_avail);
54486
54487 _leave("");
54488 }
54489@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
54490 enum fscache_checkaux result;
54491
54492 if (!object->cookie->def->check_aux) {
54493- fscache_stat(&fscache_n_checkaux_none);
54494+ fscache_stat_unchecked(&fscache_n_checkaux_none);
54495 return FSCACHE_CHECKAUX_OKAY;
54496 }
54497
54498@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
54499 switch (result) {
54500 /* entry okay as is */
54501 case FSCACHE_CHECKAUX_OKAY:
54502- fscache_stat(&fscache_n_checkaux_okay);
54503+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
54504 break;
54505
54506 /* entry requires update */
54507 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
54508- fscache_stat(&fscache_n_checkaux_update);
54509+ fscache_stat_unchecked(&fscache_n_checkaux_update);
54510 break;
54511
54512 /* entry requires deletion */
54513 case FSCACHE_CHECKAUX_OBSOLETE:
54514- fscache_stat(&fscache_n_checkaux_obsolete);
54515+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
54516 break;
54517
54518 default:
54519diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
54520index 762a9ec..2023284 100644
54521--- a/fs/fscache/operation.c
54522+++ b/fs/fscache/operation.c
54523@@ -17,7 +17,7 @@
54524 #include <linux/slab.h>
54525 #include "internal.h"
54526
54527-atomic_t fscache_op_debug_id;
54528+atomic_unchecked_t fscache_op_debug_id;
54529 EXPORT_SYMBOL(fscache_op_debug_id);
54530
54531 /**
54532@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
54533 ASSERTCMP(atomic_read(&op->usage), >, 0);
54534 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
54535
54536- fscache_stat(&fscache_n_op_enqueue);
54537+ fscache_stat_unchecked(&fscache_n_op_enqueue);
54538 switch (op->flags & FSCACHE_OP_TYPE) {
54539 case FSCACHE_OP_ASYNC:
54540 _debug("queue async");
54541@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
54542 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
54543 if (op->processor)
54544 fscache_enqueue_operation(op);
54545- fscache_stat(&fscache_n_op_run);
54546+ fscache_stat_unchecked(&fscache_n_op_run);
54547 }
54548
54549 /*
54550@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
54551 if (object->n_in_progress > 0) {
54552 atomic_inc(&op->usage);
54553 list_add_tail(&op->pend_link, &object->pending_ops);
54554- fscache_stat(&fscache_n_op_pend);
54555+ fscache_stat_unchecked(&fscache_n_op_pend);
54556 } else if (!list_empty(&object->pending_ops)) {
54557 atomic_inc(&op->usage);
54558 list_add_tail(&op->pend_link, &object->pending_ops);
54559- fscache_stat(&fscache_n_op_pend);
54560+ fscache_stat_unchecked(&fscache_n_op_pend);
54561 fscache_start_operations(object);
54562 } else {
54563 ASSERTCMP(object->n_in_progress, ==, 0);
54564@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
54565 object->n_exclusive++; /* reads and writes must wait */
54566 atomic_inc(&op->usage);
54567 list_add_tail(&op->pend_link, &object->pending_ops);
54568- fscache_stat(&fscache_n_op_pend);
54569+ fscache_stat_unchecked(&fscache_n_op_pend);
54570 ret = 0;
54571 } else {
54572 /* If we're in any other state, there must have been an I/O
54573@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
54574 if (object->n_exclusive > 0) {
54575 atomic_inc(&op->usage);
54576 list_add_tail(&op->pend_link, &object->pending_ops);
54577- fscache_stat(&fscache_n_op_pend);
54578+ fscache_stat_unchecked(&fscache_n_op_pend);
54579 } else if (!list_empty(&object->pending_ops)) {
54580 atomic_inc(&op->usage);
54581 list_add_tail(&op->pend_link, &object->pending_ops);
54582- fscache_stat(&fscache_n_op_pend);
54583+ fscache_stat_unchecked(&fscache_n_op_pend);
54584 fscache_start_operations(object);
54585 } else {
54586 ASSERTCMP(object->n_exclusive, ==, 0);
54587@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
54588 object->n_ops++;
54589 atomic_inc(&op->usage);
54590 list_add_tail(&op->pend_link, &object->pending_ops);
54591- fscache_stat(&fscache_n_op_pend);
54592+ fscache_stat_unchecked(&fscache_n_op_pend);
54593 ret = 0;
54594 } else if (object->state == FSCACHE_OBJECT_DYING ||
54595 object->state == FSCACHE_OBJECT_LC_DYING ||
54596 object->state == FSCACHE_OBJECT_WITHDRAWING) {
54597- fscache_stat(&fscache_n_op_rejected);
54598+ fscache_stat_unchecked(&fscache_n_op_rejected);
54599 op->state = FSCACHE_OP_ST_CANCELLED;
54600 ret = -ENOBUFS;
54601 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
54602@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
54603 ret = -EBUSY;
54604 if (op->state == FSCACHE_OP_ST_PENDING) {
54605 ASSERT(!list_empty(&op->pend_link));
54606- fscache_stat(&fscache_n_op_cancelled);
54607+ fscache_stat_unchecked(&fscache_n_op_cancelled);
54608 list_del_init(&op->pend_link);
54609 if (do_cancel)
54610 do_cancel(op);
54611@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
54612 while (!list_empty(&object->pending_ops)) {
54613 op = list_entry(object->pending_ops.next,
54614 struct fscache_operation, pend_link);
54615- fscache_stat(&fscache_n_op_cancelled);
54616+ fscache_stat_unchecked(&fscache_n_op_cancelled);
54617 list_del_init(&op->pend_link);
54618
54619 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
54620@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
54621 op->state, ==, FSCACHE_OP_ST_CANCELLED);
54622 op->state = FSCACHE_OP_ST_DEAD;
54623
54624- fscache_stat(&fscache_n_op_release);
54625+ fscache_stat_unchecked(&fscache_n_op_release);
54626
54627 if (op->release) {
54628 op->release(op);
54629@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
54630 * lock, and defer it otherwise */
54631 if (!spin_trylock(&object->lock)) {
54632 _debug("defer put");
54633- fscache_stat(&fscache_n_op_deferred_release);
54634+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
54635
54636 cache = object->cache;
54637 spin_lock(&cache->op_gc_list_lock);
54638@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
54639
54640 _debug("GC DEFERRED REL OBJ%x OP%x",
54641 object->debug_id, op->debug_id);
54642- fscache_stat(&fscache_n_op_gc);
54643+ fscache_stat_unchecked(&fscache_n_op_gc);
54644
54645 ASSERTCMP(atomic_read(&op->usage), ==, 0);
54646 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
54647diff --git a/fs/fscache/page.c b/fs/fscache/page.c
54648index ff000e5..c44ec6d 100644
54649--- a/fs/fscache/page.c
54650+++ b/fs/fscache/page.c
54651@@ -61,7 +61,7 @@ try_again:
54652 val = radix_tree_lookup(&cookie->stores, page->index);
54653 if (!val) {
54654 rcu_read_unlock();
54655- fscache_stat(&fscache_n_store_vmscan_not_storing);
54656+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
54657 __fscache_uncache_page(cookie, page);
54658 return true;
54659 }
54660@@ -91,11 +91,11 @@ try_again:
54661 spin_unlock(&cookie->stores_lock);
54662
54663 if (xpage) {
54664- fscache_stat(&fscache_n_store_vmscan_cancelled);
54665- fscache_stat(&fscache_n_store_radix_deletes);
54666+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
54667+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
54668 ASSERTCMP(xpage, ==, page);
54669 } else {
54670- fscache_stat(&fscache_n_store_vmscan_gone);
54671+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
54672 }
54673
54674 wake_up_bit(&cookie->flags, 0);
54675@@ -110,11 +110,11 @@ page_busy:
54676 * sleeping on memory allocation, so we may need to impose a timeout
54677 * too. */
54678 if (!(gfp & __GFP_WAIT)) {
54679- fscache_stat(&fscache_n_store_vmscan_busy);
54680+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
54681 return false;
54682 }
54683
54684- fscache_stat(&fscache_n_store_vmscan_wait);
54685+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
54686 __fscache_wait_on_page_write(cookie, page);
54687 gfp &= ~__GFP_WAIT;
54688 goto try_again;
54689@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
54690 FSCACHE_COOKIE_STORING_TAG);
54691 if (!radix_tree_tag_get(&cookie->stores, page->index,
54692 FSCACHE_COOKIE_PENDING_TAG)) {
54693- fscache_stat(&fscache_n_store_radix_deletes);
54694+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
54695 xpage = radix_tree_delete(&cookie->stores, page->index);
54696 }
54697 spin_unlock(&cookie->stores_lock);
54698@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
54699
54700 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
54701
54702- fscache_stat(&fscache_n_attr_changed_calls);
54703+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
54704
54705 if (fscache_object_is_active(object)) {
54706 fscache_stat(&fscache_n_cop_attr_changed);
54707@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
54708
54709 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
54710
54711- fscache_stat(&fscache_n_attr_changed);
54712+ fscache_stat_unchecked(&fscache_n_attr_changed);
54713
54714 op = kzalloc(sizeof(*op), GFP_KERNEL);
54715 if (!op) {
54716- fscache_stat(&fscache_n_attr_changed_nomem);
54717+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
54718 _leave(" = -ENOMEM");
54719 return -ENOMEM;
54720 }
54721@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
54722 if (fscache_submit_exclusive_op(object, op) < 0)
54723 goto nobufs;
54724 spin_unlock(&cookie->lock);
54725- fscache_stat(&fscache_n_attr_changed_ok);
54726+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
54727 fscache_put_operation(op);
54728 _leave(" = 0");
54729 return 0;
54730@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
54731 nobufs:
54732 spin_unlock(&cookie->lock);
54733 kfree(op);
54734- fscache_stat(&fscache_n_attr_changed_nobufs);
54735+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
54736 _leave(" = %d", -ENOBUFS);
54737 return -ENOBUFS;
54738 }
54739@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
54740 /* allocate a retrieval operation and attempt to submit it */
54741 op = kzalloc(sizeof(*op), GFP_NOIO);
54742 if (!op) {
54743- fscache_stat(&fscache_n_retrievals_nomem);
54744+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
54745 return NULL;
54746 }
54747
54748@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
54749 return 0;
54750 }
54751
54752- fscache_stat(&fscache_n_retrievals_wait);
54753+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
54754
54755 jif = jiffies;
54756 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
54757 fscache_wait_bit_interruptible,
54758 TASK_INTERRUPTIBLE) != 0) {
54759- fscache_stat(&fscache_n_retrievals_intr);
54760+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
54761 _leave(" = -ERESTARTSYS");
54762 return -ERESTARTSYS;
54763 }
54764@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
54765 */
54766 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
54767 struct fscache_retrieval *op,
54768- atomic_t *stat_op_waits,
54769- atomic_t *stat_object_dead)
54770+ atomic_unchecked_t *stat_op_waits,
54771+ atomic_unchecked_t *stat_object_dead)
54772 {
54773 int ret;
54774
54775@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
54776 goto check_if_dead;
54777
54778 _debug(">>> WT");
54779- fscache_stat(stat_op_waits);
54780+ fscache_stat_unchecked(stat_op_waits);
54781 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
54782 fscache_wait_bit_interruptible,
54783 TASK_INTERRUPTIBLE) != 0) {
54784@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
54785
54786 check_if_dead:
54787 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
54788- fscache_stat(stat_object_dead);
54789+ fscache_stat_unchecked(stat_object_dead);
54790 _leave(" = -ENOBUFS [cancelled]");
54791 return -ENOBUFS;
54792 }
54793 if (unlikely(fscache_object_is_dead(object))) {
54794 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
54795 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
54796- fscache_stat(stat_object_dead);
54797+ fscache_stat_unchecked(stat_object_dead);
54798 return -ENOBUFS;
54799 }
54800 return 0;
54801@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
54802
54803 _enter("%p,%p,,,", cookie, page);
54804
54805- fscache_stat(&fscache_n_retrievals);
54806+ fscache_stat_unchecked(&fscache_n_retrievals);
54807
54808 if (hlist_empty(&cookie->backing_objects))
54809 goto nobufs;
54810@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
54811 goto nobufs_unlock_dec;
54812 spin_unlock(&cookie->lock);
54813
54814- fscache_stat(&fscache_n_retrieval_ops);
54815+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
54816
54817 /* pin the netfs read context in case we need to do the actual netfs
54818 * read because we've encountered a cache read failure */
54819@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
54820
54821 error:
54822 if (ret == -ENOMEM)
54823- fscache_stat(&fscache_n_retrievals_nomem);
54824+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
54825 else if (ret == -ERESTARTSYS)
54826- fscache_stat(&fscache_n_retrievals_intr);
54827+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
54828 else if (ret == -ENODATA)
54829- fscache_stat(&fscache_n_retrievals_nodata);
54830+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
54831 else if (ret < 0)
54832- fscache_stat(&fscache_n_retrievals_nobufs);
54833+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
54834 else
54835- fscache_stat(&fscache_n_retrievals_ok);
54836+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
54837
54838 fscache_put_retrieval(op);
54839 _leave(" = %d", ret);
54840@@ -467,7 +467,7 @@ nobufs_unlock:
54841 spin_unlock(&cookie->lock);
54842 kfree(op);
54843 nobufs:
54844- fscache_stat(&fscache_n_retrievals_nobufs);
54845+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
54846 _leave(" = -ENOBUFS");
54847 return -ENOBUFS;
54848 }
54849@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
54850
54851 _enter("%p,,%d,,,", cookie, *nr_pages);
54852
54853- fscache_stat(&fscache_n_retrievals);
54854+ fscache_stat_unchecked(&fscache_n_retrievals);
54855
54856 if (hlist_empty(&cookie->backing_objects))
54857 goto nobufs;
54858@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
54859 goto nobufs_unlock_dec;
54860 spin_unlock(&cookie->lock);
54861
54862- fscache_stat(&fscache_n_retrieval_ops);
54863+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
54864
54865 /* pin the netfs read context in case we need to do the actual netfs
54866 * read because we've encountered a cache read failure */
54867@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
54868
54869 error:
54870 if (ret == -ENOMEM)
54871- fscache_stat(&fscache_n_retrievals_nomem);
54872+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
54873 else if (ret == -ERESTARTSYS)
54874- fscache_stat(&fscache_n_retrievals_intr);
54875+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
54876 else if (ret == -ENODATA)
54877- fscache_stat(&fscache_n_retrievals_nodata);
54878+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
54879 else if (ret < 0)
54880- fscache_stat(&fscache_n_retrievals_nobufs);
54881+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
54882 else
54883- fscache_stat(&fscache_n_retrievals_ok);
54884+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
54885
54886 fscache_put_retrieval(op);
54887 _leave(" = %d", ret);
54888@@ -591,7 +591,7 @@ nobufs_unlock:
54889 spin_unlock(&cookie->lock);
54890 kfree(op);
54891 nobufs:
54892- fscache_stat(&fscache_n_retrievals_nobufs);
54893+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
54894 _leave(" = -ENOBUFS");
54895 return -ENOBUFS;
54896 }
54897@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
54898
54899 _enter("%p,%p,,,", cookie, page);
54900
54901- fscache_stat(&fscache_n_allocs);
54902+ fscache_stat_unchecked(&fscache_n_allocs);
54903
54904 if (hlist_empty(&cookie->backing_objects))
54905 goto nobufs;
54906@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
54907 goto nobufs_unlock;
54908 spin_unlock(&cookie->lock);
54909
54910- fscache_stat(&fscache_n_alloc_ops);
54911+ fscache_stat_unchecked(&fscache_n_alloc_ops);
54912
54913 ret = fscache_wait_for_retrieval_activation(
54914 object, op,
54915@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
54916
54917 error:
54918 if (ret == -ERESTARTSYS)
54919- fscache_stat(&fscache_n_allocs_intr);
54920+ fscache_stat_unchecked(&fscache_n_allocs_intr);
54921 else if (ret < 0)
54922- fscache_stat(&fscache_n_allocs_nobufs);
54923+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
54924 else
54925- fscache_stat(&fscache_n_allocs_ok);
54926+ fscache_stat_unchecked(&fscache_n_allocs_ok);
54927
54928 fscache_put_retrieval(op);
54929 _leave(" = %d", ret);
54930@@ -677,7 +677,7 @@ nobufs_unlock:
54931 spin_unlock(&cookie->lock);
54932 kfree(op);
54933 nobufs:
54934- fscache_stat(&fscache_n_allocs_nobufs);
54935+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
54936 _leave(" = -ENOBUFS");
54937 return -ENOBUFS;
54938 }
54939@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
54940
54941 spin_lock(&cookie->stores_lock);
54942
54943- fscache_stat(&fscache_n_store_calls);
54944+ fscache_stat_unchecked(&fscache_n_store_calls);
54945
54946 /* find a page to store */
54947 page = NULL;
54948@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
54949 page = results[0];
54950 _debug("gang %d [%lx]", n, page->index);
54951 if (page->index > op->store_limit) {
54952- fscache_stat(&fscache_n_store_pages_over_limit);
54953+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
54954 goto superseded;
54955 }
54956
54957@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
54958 spin_unlock(&cookie->stores_lock);
54959 spin_unlock(&object->lock);
54960
54961- fscache_stat(&fscache_n_store_pages);
54962+ fscache_stat_unchecked(&fscache_n_store_pages);
54963 fscache_stat(&fscache_n_cop_write_page);
54964 ret = object->cache->ops->write_page(op, page);
54965 fscache_stat_d(&fscache_n_cop_write_page);
54966@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
54967 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
54968 ASSERT(PageFsCache(page));
54969
54970- fscache_stat(&fscache_n_stores);
54971+ fscache_stat_unchecked(&fscache_n_stores);
54972
54973 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
54974 _leave(" = -ENOBUFS [invalidating]");
54975@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
54976 spin_unlock(&cookie->stores_lock);
54977 spin_unlock(&object->lock);
54978
54979- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
54980+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54981 op->store_limit = object->store_limit;
54982
54983 if (fscache_submit_op(object, &op->op) < 0)
54984@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
54985
54986 spin_unlock(&cookie->lock);
54987 radix_tree_preload_end();
54988- fscache_stat(&fscache_n_store_ops);
54989- fscache_stat(&fscache_n_stores_ok);
54990+ fscache_stat_unchecked(&fscache_n_store_ops);
54991+ fscache_stat_unchecked(&fscache_n_stores_ok);
54992
54993 /* the work queue now carries its own ref on the object */
54994 fscache_put_operation(&op->op);
54995@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
54996 return 0;
54997
54998 already_queued:
54999- fscache_stat(&fscache_n_stores_again);
55000+ fscache_stat_unchecked(&fscache_n_stores_again);
55001 already_pending:
55002 spin_unlock(&cookie->stores_lock);
55003 spin_unlock(&object->lock);
55004 spin_unlock(&cookie->lock);
55005 radix_tree_preload_end();
55006 kfree(op);
55007- fscache_stat(&fscache_n_stores_ok);
55008+ fscache_stat_unchecked(&fscache_n_stores_ok);
55009 _leave(" = 0");
55010 return 0;
55011
55012@@ -959,14 +959,14 @@ nobufs:
55013 spin_unlock(&cookie->lock);
55014 radix_tree_preload_end();
55015 kfree(op);
55016- fscache_stat(&fscache_n_stores_nobufs);
55017+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
55018 _leave(" = -ENOBUFS");
55019 return -ENOBUFS;
55020
55021 nomem_free:
55022 kfree(op);
55023 nomem:
55024- fscache_stat(&fscache_n_stores_oom);
55025+ fscache_stat_unchecked(&fscache_n_stores_oom);
55026 _leave(" = -ENOMEM");
55027 return -ENOMEM;
55028 }
55029@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
55030 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
55031 ASSERTCMP(page, !=, NULL);
55032
55033- fscache_stat(&fscache_n_uncaches);
55034+ fscache_stat_unchecked(&fscache_n_uncaches);
55035
55036 /* cache withdrawal may beat us to it */
55037 if (!PageFsCache(page))
55038@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
55039 struct fscache_cookie *cookie = op->op.object->cookie;
55040
55041 #ifdef CONFIG_FSCACHE_STATS
55042- atomic_inc(&fscache_n_marks);
55043+ atomic_inc_unchecked(&fscache_n_marks);
55044 #endif
55045
55046 _debug("- mark %p{%lx}", page, page->index);
55047diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
55048index 40d13c7..ddf52b9 100644
55049--- a/fs/fscache/stats.c
55050+++ b/fs/fscache/stats.c
55051@@ -18,99 +18,99 @@
55052 /*
55053 * operation counters
55054 */
55055-atomic_t fscache_n_op_pend;
55056-atomic_t fscache_n_op_run;
55057-atomic_t fscache_n_op_enqueue;
55058-atomic_t fscache_n_op_requeue;
55059-atomic_t fscache_n_op_deferred_release;
55060-atomic_t fscache_n_op_release;
55061-atomic_t fscache_n_op_gc;
55062-atomic_t fscache_n_op_cancelled;
55063-atomic_t fscache_n_op_rejected;
55064+atomic_unchecked_t fscache_n_op_pend;
55065+atomic_unchecked_t fscache_n_op_run;
55066+atomic_unchecked_t fscache_n_op_enqueue;
55067+atomic_unchecked_t fscache_n_op_requeue;
55068+atomic_unchecked_t fscache_n_op_deferred_release;
55069+atomic_unchecked_t fscache_n_op_release;
55070+atomic_unchecked_t fscache_n_op_gc;
55071+atomic_unchecked_t fscache_n_op_cancelled;
55072+atomic_unchecked_t fscache_n_op_rejected;
55073
55074-atomic_t fscache_n_attr_changed;
55075-atomic_t fscache_n_attr_changed_ok;
55076-atomic_t fscache_n_attr_changed_nobufs;
55077-atomic_t fscache_n_attr_changed_nomem;
55078-atomic_t fscache_n_attr_changed_calls;
55079+atomic_unchecked_t fscache_n_attr_changed;
55080+atomic_unchecked_t fscache_n_attr_changed_ok;
55081+atomic_unchecked_t fscache_n_attr_changed_nobufs;
55082+atomic_unchecked_t fscache_n_attr_changed_nomem;
55083+atomic_unchecked_t fscache_n_attr_changed_calls;
55084
55085-atomic_t fscache_n_allocs;
55086-atomic_t fscache_n_allocs_ok;
55087-atomic_t fscache_n_allocs_wait;
55088-atomic_t fscache_n_allocs_nobufs;
55089-atomic_t fscache_n_allocs_intr;
55090-atomic_t fscache_n_allocs_object_dead;
55091-atomic_t fscache_n_alloc_ops;
55092-atomic_t fscache_n_alloc_op_waits;
55093+atomic_unchecked_t fscache_n_allocs;
55094+atomic_unchecked_t fscache_n_allocs_ok;
55095+atomic_unchecked_t fscache_n_allocs_wait;
55096+atomic_unchecked_t fscache_n_allocs_nobufs;
55097+atomic_unchecked_t fscache_n_allocs_intr;
55098+atomic_unchecked_t fscache_n_allocs_object_dead;
55099+atomic_unchecked_t fscache_n_alloc_ops;
55100+atomic_unchecked_t fscache_n_alloc_op_waits;
55101
55102-atomic_t fscache_n_retrievals;
55103-atomic_t fscache_n_retrievals_ok;
55104-atomic_t fscache_n_retrievals_wait;
55105-atomic_t fscache_n_retrievals_nodata;
55106-atomic_t fscache_n_retrievals_nobufs;
55107-atomic_t fscache_n_retrievals_intr;
55108-atomic_t fscache_n_retrievals_nomem;
55109-atomic_t fscache_n_retrievals_object_dead;
55110-atomic_t fscache_n_retrieval_ops;
55111-atomic_t fscache_n_retrieval_op_waits;
55112+atomic_unchecked_t fscache_n_retrievals;
55113+atomic_unchecked_t fscache_n_retrievals_ok;
55114+atomic_unchecked_t fscache_n_retrievals_wait;
55115+atomic_unchecked_t fscache_n_retrievals_nodata;
55116+atomic_unchecked_t fscache_n_retrievals_nobufs;
55117+atomic_unchecked_t fscache_n_retrievals_intr;
55118+atomic_unchecked_t fscache_n_retrievals_nomem;
55119+atomic_unchecked_t fscache_n_retrievals_object_dead;
55120+atomic_unchecked_t fscache_n_retrieval_ops;
55121+atomic_unchecked_t fscache_n_retrieval_op_waits;
55122
55123-atomic_t fscache_n_stores;
55124-atomic_t fscache_n_stores_ok;
55125-atomic_t fscache_n_stores_again;
55126-atomic_t fscache_n_stores_nobufs;
55127-atomic_t fscache_n_stores_oom;
55128-atomic_t fscache_n_store_ops;
55129-atomic_t fscache_n_store_calls;
55130-atomic_t fscache_n_store_pages;
55131-atomic_t fscache_n_store_radix_deletes;
55132-atomic_t fscache_n_store_pages_over_limit;
55133+atomic_unchecked_t fscache_n_stores;
55134+atomic_unchecked_t fscache_n_stores_ok;
55135+atomic_unchecked_t fscache_n_stores_again;
55136+atomic_unchecked_t fscache_n_stores_nobufs;
55137+atomic_unchecked_t fscache_n_stores_oom;
55138+atomic_unchecked_t fscache_n_store_ops;
55139+atomic_unchecked_t fscache_n_store_calls;
55140+atomic_unchecked_t fscache_n_store_pages;
55141+atomic_unchecked_t fscache_n_store_radix_deletes;
55142+atomic_unchecked_t fscache_n_store_pages_over_limit;
55143
55144-atomic_t fscache_n_store_vmscan_not_storing;
55145-atomic_t fscache_n_store_vmscan_gone;
55146-atomic_t fscache_n_store_vmscan_busy;
55147-atomic_t fscache_n_store_vmscan_cancelled;
55148-atomic_t fscache_n_store_vmscan_wait;
55149+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
55150+atomic_unchecked_t fscache_n_store_vmscan_gone;
55151+atomic_unchecked_t fscache_n_store_vmscan_busy;
55152+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
55153+atomic_unchecked_t fscache_n_store_vmscan_wait;
55154
55155-atomic_t fscache_n_marks;
55156-atomic_t fscache_n_uncaches;
55157+atomic_unchecked_t fscache_n_marks;
55158+atomic_unchecked_t fscache_n_uncaches;
55159
55160-atomic_t fscache_n_acquires;
55161-atomic_t fscache_n_acquires_null;
55162-atomic_t fscache_n_acquires_no_cache;
55163-atomic_t fscache_n_acquires_ok;
55164-atomic_t fscache_n_acquires_nobufs;
55165-atomic_t fscache_n_acquires_oom;
55166+atomic_unchecked_t fscache_n_acquires;
55167+atomic_unchecked_t fscache_n_acquires_null;
55168+atomic_unchecked_t fscache_n_acquires_no_cache;
55169+atomic_unchecked_t fscache_n_acquires_ok;
55170+atomic_unchecked_t fscache_n_acquires_nobufs;
55171+atomic_unchecked_t fscache_n_acquires_oom;
55172
55173-atomic_t fscache_n_invalidates;
55174-atomic_t fscache_n_invalidates_run;
55175+atomic_unchecked_t fscache_n_invalidates;
55176+atomic_unchecked_t fscache_n_invalidates_run;
55177
55178-atomic_t fscache_n_updates;
55179-atomic_t fscache_n_updates_null;
55180-atomic_t fscache_n_updates_run;
55181+atomic_unchecked_t fscache_n_updates;
55182+atomic_unchecked_t fscache_n_updates_null;
55183+atomic_unchecked_t fscache_n_updates_run;
55184
55185-atomic_t fscache_n_relinquishes;
55186-atomic_t fscache_n_relinquishes_null;
55187-atomic_t fscache_n_relinquishes_waitcrt;
55188-atomic_t fscache_n_relinquishes_retire;
55189+atomic_unchecked_t fscache_n_relinquishes;
55190+atomic_unchecked_t fscache_n_relinquishes_null;
55191+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
55192+atomic_unchecked_t fscache_n_relinquishes_retire;
55193
55194-atomic_t fscache_n_cookie_index;
55195-atomic_t fscache_n_cookie_data;
55196-atomic_t fscache_n_cookie_special;
55197+atomic_unchecked_t fscache_n_cookie_index;
55198+atomic_unchecked_t fscache_n_cookie_data;
55199+atomic_unchecked_t fscache_n_cookie_special;
55200
55201-atomic_t fscache_n_object_alloc;
55202-atomic_t fscache_n_object_no_alloc;
55203-atomic_t fscache_n_object_lookups;
55204-atomic_t fscache_n_object_lookups_negative;
55205-atomic_t fscache_n_object_lookups_positive;
55206-atomic_t fscache_n_object_lookups_timed_out;
55207-atomic_t fscache_n_object_created;
55208-atomic_t fscache_n_object_avail;
55209-atomic_t fscache_n_object_dead;
55210+atomic_unchecked_t fscache_n_object_alloc;
55211+atomic_unchecked_t fscache_n_object_no_alloc;
55212+atomic_unchecked_t fscache_n_object_lookups;
55213+atomic_unchecked_t fscache_n_object_lookups_negative;
55214+atomic_unchecked_t fscache_n_object_lookups_positive;
55215+atomic_unchecked_t fscache_n_object_lookups_timed_out;
55216+atomic_unchecked_t fscache_n_object_created;
55217+atomic_unchecked_t fscache_n_object_avail;
55218+atomic_unchecked_t fscache_n_object_dead;
55219
55220-atomic_t fscache_n_checkaux_none;
55221-atomic_t fscache_n_checkaux_okay;
55222-atomic_t fscache_n_checkaux_update;
55223-atomic_t fscache_n_checkaux_obsolete;
55224+atomic_unchecked_t fscache_n_checkaux_none;
55225+atomic_unchecked_t fscache_n_checkaux_okay;
55226+atomic_unchecked_t fscache_n_checkaux_update;
55227+atomic_unchecked_t fscache_n_checkaux_obsolete;
55228
55229 atomic_t fscache_n_cop_alloc_object;
55230 atomic_t fscache_n_cop_lookup_object;
55231@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
55232 seq_puts(m, "FS-Cache statistics\n");
55233
55234 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
55235- atomic_read(&fscache_n_cookie_index),
55236- atomic_read(&fscache_n_cookie_data),
55237- atomic_read(&fscache_n_cookie_special));
55238+ atomic_read_unchecked(&fscache_n_cookie_index),
55239+ atomic_read_unchecked(&fscache_n_cookie_data),
55240+ atomic_read_unchecked(&fscache_n_cookie_special));
55241
55242 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
55243- atomic_read(&fscache_n_object_alloc),
55244- atomic_read(&fscache_n_object_no_alloc),
55245- atomic_read(&fscache_n_object_avail),
55246- atomic_read(&fscache_n_object_dead));
55247+ atomic_read_unchecked(&fscache_n_object_alloc),
55248+ atomic_read_unchecked(&fscache_n_object_no_alloc),
55249+ atomic_read_unchecked(&fscache_n_object_avail),
55250+ atomic_read_unchecked(&fscache_n_object_dead));
55251 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
55252- atomic_read(&fscache_n_checkaux_none),
55253- atomic_read(&fscache_n_checkaux_okay),
55254- atomic_read(&fscache_n_checkaux_update),
55255- atomic_read(&fscache_n_checkaux_obsolete));
55256+ atomic_read_unchecked(&fscache_n_checkaux_none),
55257+ atomic_read_unchecked(&fscache_n_checkaux_okay),
55258+ atomic_read_unchecked(&fscache_n_checkaux_update),
55259+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
55260
55261 seq_printf(m, "Pages : mrk=%u unc=%u\n",
55262- atomic_read(&fscache_n_marks),
55263- atomic_read(&fscache_n_uncaches));
55264+ atomic_read_unchecked(&fscache_n_marks),
55265+ atomic_read_unchecked(&fscache_n_uncaches));
55266
55267 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
55268 " oom=%u\n",
55269- atomic_read(&fscache_n_acquires),
55270- atomic_read(&fscache_n_acquires_null),
55271- atomic_read(&fscache_n_acquires_no_cache),
55272- atomic_read(&fscache_n_acquires_ok),
55273- atomic_read(&fscache_n_acquires_nobufs),
55274- atomic_read(&fscache_n_acquires_oom));
55275+ atomic_read_unchecked(&fscache_n_acquires),
55276+ atomic_read_unchecked(&fscache_n_acquires_null),
55277+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
55278+ atomic_read_unchecked(&fscache_n_acquires_ok),
55279+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
55280+ atomic_read_unchecked(&fscache_n_acquires_oom));
55281
55282 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
55283- atomic_read(&fscache_n_object_lookups),
55284- atomic_read(&fscache_n_object_lookups_negative),
55285- atomic_read(&fscache_n_object_lookups_positive),
55286- atomic_read(&fscache_n_object_created),
55287- atomic_read(&fscache_n_object_lookups_timed_out));
55288+ atomic_read_unchecked(&fscache_n_object_lookups),
55289+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
55290+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
55291+ atomic_read_unchecked(&fscache_n_object_created),
55292+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
55293
55294 seq_printf(m, "Invals : n=%u run=%u\n",
55295- atomic_read(&fscache_n_invalidates),
55296- atomic_read(&fscache_n_invalidates_run));
55297+ atomic_read_unchecked(&fscache_n_invalidates),
55298+ atomic_read_unchecked(&fscache_n_invalidates_run));
55299
55300 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
55301- atomic_read(&fscache_n_updates),
55302- atomic_read(&fscache_n_updates_null),
55303- atomic_read(&fscache_n_updates_run));
55304+ atomic_read_unchecked(&fscache_n_updates),
55305+ atomic_read_unchecked(&fscache_n_updates_null),
55306+ atomic_read_unchecked(&fscache_n_updates_run));
55307
55308 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
55309- atomic_read(&fscache_n_relinquishes),
55310- atomic_read(&fscache_n_relinquishes_null),
55311- atomic_read(&fscache_n_relinquishes_waitcrt),
55312- atomic_read(&fscache_n_relinquishes_retire));
55313+ atomic_read_unchecked(&fscache_n_relinquishes),
55314+ atomic_read_unchecked(&fscache_n_relinquishes_null),
55315+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
55316+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
55317
55318 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
55319- atomic_read(&fscache_n_attr_changed),
55320- atomic_read(&fscache_n_attr_changed_ok),
55321- atomic_read(&fscache_n_attr_changed_nobufs),
55322- atomic_read(&fscache_n_attr_changed_nomem),
55323- atomic_read(&fscache_n_attr_changed_calls));
55324+ atomic_read_unchecked(&fscache_n_attr_changed),
55325+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
55326+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
55327+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
55328+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
55329
55330 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
55331- atomic_read(&fscache_n_allocs),
55332- atomic_read(&fscache_n_allocs_ok),
55333- atomic_read(&fscache_n_allocs_wait),
55334- atomic_read(&fscache_n_allocs_nobufs),
55335- atomic_read(&fscache_n_allocs_intr));
55336+ atomic_read_unchecked(&fscache_n_allocs),
55337+ atomic_read_unchecked(&fscache_n_allocs_ok),
55338+ atomic_read_unchecked(&fscache_n_allocs_wait),
55339+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
55340+ atomic_read_unchecked(&fscache_n_allocs_intr));
55341 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
55342- atomic_read(&fscache_n_alloc_ops),
55343- atomic_read(&fscache_n_alloc_op_waits),
55344- atomic_read(&fscache_n_allocs_object_dead));
55345+ atomic_read_unchecked(&fscache_n_alloc_ops),
55346+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
55347+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
55348
55349 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
55350 " int=%u oom=%u\n",
55351- atomic_read(&fscache_n_retrievals),
55352- atomic_read(&fscache_n_retrievals_ok),
55353- atomic_read(&fscache_n_retrievals_wait),
55354- atomic_read(&fscache_n_retrievals_nodata),
55355- atomic_read(&fscache_n_retrievals_nobufs),
55356- atomic_read(&fscache_n_retrievals_intr),
55357- atomic_read(&fscache_n_retrievals_nomem));
55358+ atomic_read_unchecked(&fscache_n_retrievals),
55359+ atomic_read_unchecked(&fscache_n_retrievals_ok),
55360+ atomic_read_unchecked(&fscache_n_retrievals_wait),
55361+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
55362+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
55363+ atomic_read_unchecked(&fscache_n_retrievals_intr),
55364+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
55365 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
55366- atomic_read(&fscache_n_retrieval_ops),
55367- atomic_read(&fscache_n_retrieval_op_waits),
55368- atomic_read(&fscache_n_retrievals_object_dead));
55369+ atomic_read_unchecked(&fscache_n_retrieval_ops),
55370+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
55371+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
55372
55373 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
55374- atomic_read(&fscache_n_stores),
55375- atomic_read(&fscache_n_stores_ok),
55376- atomic_read(&fscache_n_stores_again),
55377- atomic_read(&fscache_n_stores_nobufs),
55378- atomic_read(&fscache_n_stores_oom));
55379+ atomic_read_unchecked(&fscache_n_stores),
55380+ atomic_read_unchecked(&fscache_n_stores_ok),
55381+ atomic_read_unchecked(&fscache_n_stores_again),
55382+ atomic_read_unchecked(&fscache_n_stores_nobufs),
55383+ atomic_read_unchecked(&fscache_n_stores_oom));
55384 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
55385- atomic_read(&fscache_n_store_ops),
55386- atomic_read(&fscache_n_store_calls),
55387- atomic_read(&fscache_n_store_pages),
55388- atomic_read(&fscache_n_store_radix_deletes),
55389- atomic_read(&fscache_n_store_pages_over_limit));
55390+ atomic_read_unchecked(&fscache_n_store_ops),
55391+ atomic_read_unchecked(&fscache_n_store_calls),
55392+ atomic_read_unchecked(&fscache_n_store_pages),
55393+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
55394+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
55395
55396 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
55397- atomic_read(&fscache_n_store_vmscan_not_storing),
55398- atomic_read(&fscache_n_store_vmscan_gone),
55399- atomic_read(&fscache_n_store_vmscan_busy),
55400- atomic_read(&fscache_n_store_vmscan_cancelled),
55401- atomic_read(&fscache_n_store_vmscan_wait));
55402+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
55403+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
55404+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
55405+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
55406+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
55407
55408 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
55409- atomic_read(&fscache_n_op_pend),
55410- atomic_read(&fscache_n_op_run),
55411- atomic_read(&fscache_n_op_enqueue),
55412- atomic_read(&fscache_n_op_cancelled),
55413- atomic_read(&fscache_n_op_rejected));
55414+ atomic_read_unchecked(&fscache_n_op_pend),
55415+ atomic_read_unchecked(&fscache_n_op_run),
55416+ atomic_read_unchecked(&fscache_n_op_enqueue),
55417+ atomic_read_unchecked(&fscache_n_op_cancelled),
55418+ atomic_read_unchecked(&fscache_n_op_rejected));
55419 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
55420- atomic_read(&fscache_n_op_deferred_release),
55421- atomic_read(&fscache_n_op_release),
55422- atomic_read(&fscache_n_op_gc));
55423+ atomic_read_unchecked(&fscache_n_op_deferred_release),
55424+ atomic_read_unchecked(&fscache_n_op_release),
55425+ atomic_read_unchecked(&fscache_n_op_gc));
55426
55427 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
55428 atomic_read(&fscache_n_cop_alloc_object),
55429diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
55430index aef34b1..59bfd7b 100644
55431--- a/fs/fuse/cuse.c
55432+++ b/fs/fuse/cuse.c
55433@@ -600,10 +600,12 @@ static int __init cuse_init(void)
55434 INIT_LIST_HEAD(&cuse_conntbl[i]);
55435
55436 /* inherit and extend fuse_dev_operations */
55437- cuse_channel_fops = fuse_dev_operations;
55438- cuse_channel_fops.owner = THIS_MODULE;
55439- cuse_channel_fops.open = cuse_channel_open;
55440- cuse_channel_fops.release = cuse_channel_release;
55441+ pax_open_kernel();
55442+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
55443+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
55444+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
55445+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
55446+ pax_close_kernel();
55447
55448 cuse_class = class_create(THIS_MODULE, "cuse");
55449 if (IS_ERR(cuse_class))
55450diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
55451index 1d55f94..088da65 100644
55452--- a/fs/fuse/dev.c
55453+++ b/fs/fuse/dev.c
55454@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
55455 ret = 0;
55456 pipe_lock(pipe);
55457
55458- if (!pipe->readers) {
55459+ if (!atomic_read(&pipe->readers)) {
55460 send_sig(SIGPIPE, current, 0);
55461 if (!ret)
55462 ret = -EPIPE;
55463@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
55464 page_nr++;
55465 ret += buf->len;
55466
55467- if (pipe->files)
55468+ if (atomic_read(&pipe->files))
55469 do_wakeup = 1;
55470 }
55471
55472diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
55473index 5b12746..b481b03 100644
55474--- a/fs/fuse/dir.c
55475+++ b/fs/fuse/dir.c
55476@@ -1437,7 +1437,7 @@ static char *read_link(struct dentry *dentry)
55477 return link;
55478 }
55479
55480-static void free_link(char *link)
55481+static void free_link(const char *link)
55482 {
55483 if (!IS_ERR(link))
55484 free_page((unsigned long) link);
55485diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
55486index 62b484e..0f9a140 100644
55487--- a/fs/gfs2/inode.c
55488+++ b/fs/gfs2/inode.c
55489@@ -1441,7 +1441,7 @@ out:
55490
55491 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
55492 {
55493- char *s = nd_get_link(nd);
55494+ const char *s = nd_get_link(nd);
55495 if (!IS_ERR(s))
55496 kfree(s);
55497 }
55498diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
55499index a3f868a..bb308ae 100644
55500--- a/fs/hugetlbfs/inode.c
55501+++ b/fs/hugetlbfs/inode.c
55502@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
55503 struct mm_struct *mm = current->mm;
55504 struct vm_area_struct *vma;
55505 struct hstate *h = hstate_file(file);
55506+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
55507 struct vm_unmapped_area_info info;
55508
55509 if (len & ~huge_page_mask(h))
55510@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
55511 return addr;
55512 }
55513
55514+#ifdef CONFIG_PAX_RANDMMAP
55515+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
55516+#endif
55517+
55518 if (addr) {
55519 addr = ALIGN(addr, huge_page_size(h));
55520 vma = find_vma(mm, addr);
55521- if (TASK_SIZE - len >= addr &&
55522- (!vma || addr + len <= vma->vm_start))
55523+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
55524 return addr;
55525 }
55526
55527 info.flags = 0;
55528 info.length = len;
55529 info.low_limit = TASK_UNMAPPED_BASE;
55530+
55531+#ifdef CONFIG_PAX_RANDMMAP
55532+ if (mm->pax_flags & MF_PAX_RANDMMAP)
55533+ info.low_limit += mm->delta_mmap;
55534+#endif
55535+
55536 info.high_limit = TASK_SIZE;
55537 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
55538 info.align_offset = 0;
55539@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
55540 };
55541 MODULE_ALIAS_FS("hugetlbfs");
55542
55543-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
55544+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
55545
55546 static int can_do_hugetlb_shm(void)
55547 {
55548diff --git a/fs/inode.c b/fs/inode.c
55549index 00d5fc3..98ce7d7 100644
55550--- a/fs/inode.c
55551+++ b/fs/inode.c
55552@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
55553
55554 #ifdef CONFIG_SMP
55555 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
55556- static atomic_t shared_last_ino;
55557- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
55558+ static atomic_unchecked_t shared_last_ino;
55559+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
55560
55561 res = next - LAST_INO_BATCH;
55562 }
55563diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
55564index 4a6cf28..d3a29d3 100644
55565--- a/fs/jffs2/erase.c
55566+++ b/fs/jffs2/erase.c
55567@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
55568 struct jffs2_unknown_node marker = {
55569 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
55570 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
55571- .totlen = cpu_to_je32(c->cleanmarker_size)
55572+ .totlen = cpu_to_je32(c->cleanmarker_size),
55573+ .hdr_crc = cpu_to_je32(0)
55574 };
55575
55576 jffs2_prealloc_raw_node_refs(c, jeb, 1);
55577diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
55578index a6597d6..41b30ec 100644
55579--- a/fs/jffs2/wbuf.c
55580+++ b/fs/jffs2/wbuf.c
55581@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
55582 {
55583 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
55584 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
55585- .totlen = constant_cpu_to_je32(8)
55586+ .totlen = constant_cpu_to_je32(8),
55587+ .hdr_crc = constant_cpu_to_je32(0)
55588 };
55589
55590 /*
55591diff --git a/fs/jfs/super.c b/fs/jfs/super.c
55592index 788e0a9..8433098 100644
55593--- a/fs/jfs/super.c
55594+++ b/fs/jfs/super.c
55595@@ -878,7 +878,7 @@ static int __init init_jfs_fs(void)
55596
55597 jfs_inode_cachep =
55598 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
55599- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
55600+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
55601 init_once);
55602 if (jfs_inode_cachep == NULL)
55603 return -ENOMEM;
55604diff --git a/fs/libfs.c b/fs/libfs.c
55605index 916da8c..1588998 100644
55606--- a/fs/libfs.c
55607+++ b/fs/libfs.c
55608@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
55609
55610 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
55611 struct dentry *next;
55612+ char d_name[sizeof(next->d_iname)];
55613+ const unsigned char *name;
55614+
55615 next = list_entry(p, struct dentry, d_u.d_child);
55616 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
55617 if (!simple_positive(next)) {
55618@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
55619
55620 spin_unlock(&next->d_lock);
55621 spin_unlock(&dentry->d_lock);
55622- if (filldir(dirent, next->d_name.name,
55623+ name = next->d_name.name;
55624+ if (name == next->d_iname) {
55625+ memcpy(d_name, name, next->d_name.len);
55626+ name = d_name;
55627+ }
55628+ if (filldir(dirent, name,
55629 next->d_name.len, filp->f_pos,
55630 next->d_inode->i_ino,
55631 dt_type(next->d_inode)) < 0)
55632diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
55633index acd3947..1f896e2 100644
55634--- a/fs/lockd/clntproc.c
55635+++ b/fs/lockd/clntproc.c
55636@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
55637 /*
55638 * Cookie counter for NLM requests
55639 */
55640-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
55641+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
55642
55643 void nlmclnt_next_cookie(struct nlm_cookie *c)
55644 {
55645- u32 cookie = atomic_inc_return(&nlm_cookie);
55646+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
55647
55648 memcpy(c->data, &cookie, 4);
55649 c->len=4;
55650diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
55651index a2aa97d..10d6c41 100644
55652--- a/fs/lockd/svc.c
55653+++ b/fs/lockd/svc.c
55654@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
55655 svc_sock_update_bufs(serv);
55656 serv->sv_maxconn = nlm_max_connections;
55657
55658- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
55659+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
55660 if (IS_ERR(nlmsvc_task)) {
55661 error = PTR_ERR(nlmsvc_task);
55662 printk(KERN_WARNING
55663diff --git a/fs/locks.c b/fs/locks.c
55664index cb424a4..850e4dd 100644
55665--- a/fs/locks.c
55666+++ b/fs/locks.c
55667@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
55668 return;
55669
55670 if (filp->f_op && filp->f_op->flock) {
55671- struct file_lock fl = {
55672+ struct file_lock flock = {
55673 .fl_pid = current->tgid,
55674 .fl_file = filp,
55675 .fl_flags = FL_FLOCK,
55676 .fl_type = F_UNLCK,
55677 .fl_end = OFFSET_MAX,
55678 };
55679- filp->f_op->flock(filp, F_SETLKW, &fl);
55680- if (fl.fl_ops && fl.fl_ops->fl_release_private)
55681- fl.fl_ops->fl_release_private(&fl);
55682+ filp->f_op->flock(filp, F_SETLKW, &flock);
55683+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
55684+ flock.fl_ops->fl_release_private(&flock);
55685 }
55686
55687 lock_flocks();
55688diff --git a/fs/namei.c b/fs/namei.c
55689index 9ed9361..2b72db1 100644
55690--- a/fs/namei.c
55691+++ b/fs/namei.c
55692@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
55693 if (ret != -EACCES)
55694 return ret;
55695
55696+#ifdef CONFIG_GRKERNSEC
55697+ /* we'll block if we have to log due to a denied capability use */
55698+ if (mask & MAY_NOT_BLOCK)
55699+ return -ECHILD;
55700+#endif
55701+
55702 if (S_ISDIR(inode->i_mode)) {
55703 /* DACs are overridable for directories */
55704- if (inode_capable(inode, CAP_DAC_OVERRIDE))
55705- return 0;
55706 if (!(mask & MAY_WRITE))
55707- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
55708+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
55709+ inode_capable(inode, CAP_DAC_READ_SEARCH))
55710 return 0;
55711+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
55712+ return 0;
55713 return -EACCES;
55714 }
55715 /*
55716+ * Searching includes executable on directories, else just read.
55717+ */
55718+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
55719+ if (mask == MAY_READ)
55720+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
55721+ inode_capable(inode, CAP_DAC_READ_SEARCH))
55722+ return 0;
55723+
55724+ /*
55725 * Read/write DACs are always overridable.
55726 * Executable DACs are overridable when there is
55727 * at least one exec bit set.
55728@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
55729 if (inode_capable(inode, CAP_DAC_OVERRIDE))
55730 return 0;
55731
55732- /*
55733- * Searching includes executable on directories, else just read.
55734- */
55735- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
55736- if (mask == MAY_READ)
55737- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
55738- return 0;
55739-
55740 return -EACCES;
55741 }
55742
55743@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
55744 {
55745 struct dentry *dentry = link->dentry;
55746 int error;
55747- char *s;
55748+ const char *s;
55749
55750 BUG_ON(nd->flags & LOOKUP_RCU);
55751
55752@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
55753 if (error)
55754 goto out_put_nd_path;
55755
55756+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
55757+ dentry->d_inode, dentry, nd->path.mnt)) {
55758+ error = -EACCES;
55759+ goto out_put_nd_path;
55760+ }
55761+
55762 nd->last_type = LAST_BIND;
55763 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
55764 error = PTR_ERR(*p);
55765@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
55766 if (res)
55767 break;
55768 res = walk_component(nd, path, LOOKUP_FOLLOW);
55769+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
55770+ res = -EACCES;
55771 put_link(nd, &link, cookie);
55772 } while (res > 0);
55773
55774@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
55775 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
55776 {
55777 unsigned long a, b, adata, bdata, mask, hash, len;
55778- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
55779+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
55780
55781 hash = a = 0;
55782 len = -sizeof(unsigned long);
55783@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
55784 if (err)
55785 break;
55786 err = lookup_last(nd, &path);
55787+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
55788+ err = -EACCES;
55789 put_link(nd, &link, cookie);
55790 }
55791 }
55792@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
55793 if (!err)
55794 err = complete_walk(nd);
55795
55796+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
55797+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
55798+ path_put(&nd->path);
55799+ err = -ENOENT;
55800+ }
55801+ }
55802+
55803 if (!err && nd->flags & LOOKUP_DIRECTORY) {
55804 if (!can_lookup(nd->inode)) {
55805 path_put(&nd->path);
55806@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
55807 retval = path_lookupat(dfd, name->name,
55808 flags | LOOKUP_REVAL, nd);
55809
55810- if (likely(!retval))
55811+ if (likely(!retval)) {
55812 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
55813+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
55814+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
55815+ path_put(&nd->path);
55816+ return -ENOENT;
55817+ }
55818+ }
55819+ }
55820 return retval;
55821 }
55822
55823@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
55824 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
55825 return -EPERM;
55826
55827+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
55828+ return -EPERM;
55829+ if (gr_handle_rawio(inode))
55830+ return -EPERM;
55831+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
55832+ return -EACCES;
55833+
55834 return 0;
55835 }
55836
55837@@ -2602,7 +2641,7 @@ looked_up:
55838 * cleared otherwise prior to returning.
55839 */
55840 static int lookup_open(struct nameidata *nd, struct path *path,
55841- struct file *file,
55842+ struct path *link, struct file *file,
55843 const struct open_flags *op,
55844 bool got_write, int *opened)
55845 {
55846@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
55847 /* Negative dentry, just create the file */
55848 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
55849 umode_t mode = op->mode;
55850+
55851+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
55852+ error = -EACCES;
55853+ goto out_dput;
55854+ }
55855+
55856+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
55857+ error = -EACCES;
55858+ goto out_dput;
55859+ }
55860+
55861 if (!IS_POSIXACL(dir->d_inode))
55862 mode &= ~current_umask();
55863 /*
55864@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
55865 nd->flags & LOOKUP_EXCL);
55866 if (error)
55867 goto out_dput;
55868+ else
55869+ gr_handle_create(dentry, nd->path.mnt);
55870 }
55871 out_no_open:
55872 path->dentry = dentry;
55873@@ -2672,7 +2724,7 @@ out_dput:
55874 /*
55875 * Handle the last step of open()
55876 */
55877-static int do_last(struct nameidata *nd, struct path *path,
55878+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
55879 struct file *file, const struct open_flags *op,
55880 int *opened, struct filename *name)
55881 {
55882@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
55883 error = complete_walk(nd);
55884 if (error)
55885 return error;
55886+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
55887+ error = -ENOENT;
55888+ goto out;
55889+ }
55890 audit_inode(name, nd->path.dentry, 0);
55891 if (open_flag & O_CREAT) {
55892 error = -EISDIR;
55893 goto out;
55894 }
55895+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
55896+ error = -EACCES;
55897+ goto out;
55898+ }
55899 goto finish_open;
55900 case LAST_BIND:
55901 error = complete_walk(nd);
55902 if (error)
55903 return error;
55904+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
55905+ error = -ENOENT;
55906+ goto out;
55907+ }
55908+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
55909+ error = -EACCES;
55910+ goto out;
55911+ }
55912 audit_inode(name, dir, 0);
55913 goto finish_open;
55914 }
55915@@ -2759,7 +2827,7 @@ retry_lookup:
55916 */
55917 }
55918 mutex_lock(&dir->d_inode->i_mutex);
55919- error = lookup_open(nd, path, file, op, got_write, opened);
55920+ error = lookup_open(nd, path, link, file, op, got_write, opened);
55921 mutex_unlock(&dir->d_inode->i_mutex);
55922
55923 if (error <= 0) {
55924@@ -2783,11 +2851,28 @@ retry_lookup:
55925 goto finish_open_created;
55926 }
55927
55928+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
55929+ error = -ENOENT;
55930+ goto exit_dput;
55931+ }
55932+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
55933+ error = -EACCES;
55934+ goto exit_dput;
55935+ }
55936+
55937 /*
55938 * create/update audit record if it already exists.
55939 */
55940- if (path->dentry->d_inode)
55941+ if (path->dentry->d_inode) {
55942+ /* only check if O_CREAT is specified, all other checks need to go
55943+ into may_open */
55944+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
55945+ error = -EACCES;
55946+ goto exit_dput;
55947+ }
55948+
55949 audit_inode(name, path->dentry, 0);
55950+ }
55951
55952 /*
55953 * If atomic_open() acquired write access it is dropped now due to
55954@@ -2828,6 +2913,11 @@ finish_lookup:
55955 }
55956 }
55957 BUG_ON(inode != path->dentry->d_inode);
55958+ /* if we're resolving a symlink to another symlink */
55959+ if (link && gr_handle_symlink_owner(link, inode)) {
55960+ error = -EACCES;
55961+ goto out;
55962+ }
55963 return 1;
55964 }
55965
55966@@ -2837,7 +2927,6 @@ finish_lookup:
55967 save_parent.dentry = nd->path.dentry;
55968 save_parent.mnt = mntget(path->mnt);
55969 nd->path.dentry = path->dentry;
55970-
55971 }
55972 nd->inode = inode;
55973 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
55974@@ -2846,6 +2935,16 @@ finish_lookup:
55975 path_put(&save_parent);
55976 return error;
55977 }
55978+
55979+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
55980+ error = -ENOENT;
55981+ goto out;
55982+ }
55983+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
55984+ error = -EACCES;
55985+ goto out;
55986+ }
55987+
55988 error = -EISDIR;
55989 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
55990 goto out;
55991@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
55992 if (unlikely(error))
55993 goto out;
55994
55995- error = do_last(nd, &path, file, op, &opened, pathname);
55996+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
55997 while (unlikely(error > 0)) { /* trailing symlink */
55998 struct path link = path;
55999 void *cookie;
56000@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
56001 error = follow_link(&link, nd, &cookie);
56002 if (unlikely(error))
56003 break;
56004- error = do_last(nd, &path, file, op, &opened, pathname);
56005+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
56006 put_link(nd, &link, cookie);
56007 }
56008 out:
56009@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
56010 goto unlock;
56011
56012 error = -EEXIST;
56013- if (dentry->d_inode)
56014+ if (dentry->d_inode) {
56015+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
56016+ error = -ENOENT;
56017+ }
56018 goto fail;
56019+ }
56020 /*
56021 * Special case - lookup gave negative, but... we had foo/bar/
56022 * From the vfs_mknod() POV we just have a negative dentry -
56023@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
56024 }
56025 EXPORT_SYMBOL(user_path_create);
56026
56027+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
56028+{
56029+ struct filename *tmp = getname(pathname);
56030+ struct dentry *res;
56031+ if (IS_ERR(tmp))
56032+ return ERR_CAST(tmp);
56033+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
56034+ if (IS_ERR(res))
56035+ putname(tmp);
56036+ else
56037+ *to = tmp;
56038+ return res;
56039+}
56040+
56041 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
56042 {
56043 int error = may_create(dir, dentry);
56044@@ -3177,6 +3294,17 @@ retry:
56045
56046 if (!IS_POSIXACL(path.dentry->d_inode))
56047 mode &= ~current_umask();
56048+
56049+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
56050+ error = -EPERM;
56051+ goto out;
56052+ }
56053+
56054+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
56055+ error = -EACCES;
56056+ goto out;
56057+ }
56058+
56059 error = security_path_mknod(&path, dentry, mode, dev);
56060 if (error)
56061 goto out;
56062@@ -3193,6 +3321,8 @@ retry:
56063 break;
56064 }
56065 out:
56066+ if (!error)
56067+ gr_handle_create(dentry, path.mnt);
56068 done_path_create(&path, dentry);
56069 if (retry_estale(error, lookup_flags)) {
56070 lookup_flags |= LOOKUP_REVAL;
56071@@ -3245,9 +3375,16 @@ retry:
56072
56073 if (!IS_POSIXACL(path.dentry->d_inode))
56074 mode &= ~current_umask();
56075+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
56076+ error = -EACCES;
56077+ goto out;
56078+ }
56079 error = security_path_mkdir(&path, dentry, mode);
56080 if (!error)
56081 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
56082+ if (!error)
56083+ gr_handle_create(dentry, path.mnt);
56084+out:
56085 done_path_create(&path, dentry);
56086 if (retry_estale(error, lookup_flags)) {
56087 lookup_flags |= LOOKUP_REVAL;
56088@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
56089 struct filename *name;
56090 struct dentry *dentry;
56091 struct nameidata nd;
56092+ ino_t saved_ino = 0;
56093+ dev_t saved_dev = 0;
56094 unsigned int lookup_flags = 0;
56095 retry:
56096 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
56097@@ -3360,10 +3499,21 @@ retry:
56098 error = -ENOENT;
56099 goto exit3;
56100 }
56101+
56102+ saved_ino = dentry->d_inode->i_ino;
56103+ saved_dev = gr_get_dev_from_dentry(dentry);
56104+
56105+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
56106+ error = -EACCES;
56107+ goto exit3;
56108+ }
56109+
56110 error = security_path_rmdir(&nd.path, dentry);
56111 if (error)
56112 goto exit3;
56113 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
56114+ if (!error && (saved_dev || saved_ino))
56115+ gr_handle_delete(saved_ino, saved_dev);
56116 exit3:
56117 dput(dentry);
56118 exit2:
56119@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
56120 struct dentry *dentry;
56121 struct nameidata nd;
56122 struct inode *inode = NULL;
56123+ ino_t saved_ino = 0;
56124+ dev_t saved_dev = 0;
56125 unsigned int lookup_flags = 0;
56126 retry:
56127 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
56128@@ -3455,10 +3607,22 @@ retry:
56129 if (!inode)
56130 goto slashes;
56131 ihold(inode);
56132+
56133+ if (inode->i_nlink <= 1) {
56134+ saved_ino = inode->i_ino;
56135+ saved_dev = gr_get_dev_from_dentry(dentry);
56136+ }
56137+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
56138+ error = -EACCES;
56139+ goto exit2;
56140+ }
56141+
56142 error = security_path_unlink(&nd.path, dentry);
56143 if (error)
56144 goto exit2;
56145 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
56146+ if (!error && (saved_ino || saved_dev))
56147+ gr_handle_delete(saved_ino, saved_dev);
56148 exit2:
56149 dput(dentry);
56150 }
56151@@ -3536,9 +3700,17 @@ retry:
56152 if (IS_ERR(dentry))
56153 goto out_putname;
56154
56155+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
56156+ error = -EACCES;
56157+ goto out;
56158+ }
56159+
56160 error = security_path_symlink(&path, dentry, from->name);
56161 if (!error)
56162 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
56163+ if (!error)
56164+ gr_handle_create(dentry, path.mnt);
56165+out:
56166 done_path_create(&path, dentry);
56167 if (retry_estale(error, lookup_flags)) {
56168 lookup_flags |= LOOKUP_REVAL;
56169@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
56170 {
56171 struct dentry *new_dentry;
56172 struct path old_path, new_path;
56173+ struct filename *to = NULL;
56174 int how = 0;
56175 int error;
56176
56177@@ -3635,7 +3808,7 @@ retry:
56178 if (error)
56179 return error;
56180
56181- new_dentry = user_path_create(newdfd, newname, &new_path,
56182+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
56183 (how & LOOKUP_REVAL));
56184 error = PTR_ERR(new_dentry);
56185 if (IS_ERR(new_dentry))
56186@@ -3647,11 +3820,28 @@ retry:
56187 error = may_linkat(&old_path);
56188 if (unlikely(error))
56189 goto out_dput;
56190+
56191+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
56192+ old_path.dentry->d_inode,
56193+ old_path.dentry->d_inode->i_mode, to)) {
56194+ error = -EACCES;
56195+ goto out_dput;
56196+ }
56197+
56198+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
56199+ old_path.dentry, old_path.mnt, to)) {
56200+ error = -EACCES;
56201+ goto out_dput;
56202+ }
56203+
56204 error = security_path_link(old_path.dentry, &new_path, new_dentry);
56205 if (error)
56206 goto out_dput;
56207 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
56208+ if (!error)
56209+ gr_handle_create(new_dentry, new_path.mnt);
56210 out_dput:
56211+ putname(to);
56212 done_path_create(&new_path, new_dentry);
56213 if (retry_estale(error, how)) {
56214 how |= LOOKUP_REVAL;
56215@@ -3897,12 +4087,21 @@ retry:
56216 if (new_dentry == trap)
56217 goto exit5;
56218
56219+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
56220+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
56221+ to);
56222+ if (error)
56223+ goto exit5;
56224+
56225 error = security_path_rename(&oldnd.path, old_dentry,
56226 &newnd.path, new_dentry);
56227 if (error)
56228 goto exit5;
56229 error = vfs_rename(old_dir->d_inode, old_dentry,
56230 new_dir->d_inode, new_dentry);
56231+ if (!error)
56232+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
56233+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
56234 exit5:
56235 dput(new_dentry);
56236 exit4:
56237@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
56238
56239 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
56240 {
56241+ char tmpbuf[64];
56242+ const char *newlink;
56243 int len;
56244
56245 len = PTR_ERR(link);
56246@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
56247 len = strlen(link);
56248 if (len > (unsigned) buflen)
56249 len = buflen;
56250- if (copy_to_user(buffer, link, len))
56251+
56252+ if (len < sizeof(tmpbuf)) {
56253+ memcpy(tmpbuf, link, len);
56254+ newlink = tmpbuf;
56255+ } else
56256+ newlink = link;
56257+
56258+ if (copy_to_user(buffer, newlink, len))
56259 len = -EFAULT;
56260 out:
56261 return len;
56262diff --git a/fs/namespace.c b/fs/namespace.c
56263index 7b1ca9b..6faeccf 100644
56264--- a/fs/namespace.c
56265+++ b/fs/namespace.c
56266@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
56267 if (!(sb->s_flags & MS_RDONLY))
56268 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
56269 up_write(&sb->s_umount);
56270+
56271+ gr_log_remount(mnt->mnt_devname, retval);
56272+
56273 return retval;
56274 }
56275
56276@@ -1283,6 +1286,9 @@ static int do_umount(struct mount *mnt, int flags)
56277 }
56278 br_write_unlock(&vfsmount_lock);
56279 namespace_unlock();
56280+
56281+ gr_log_unmount(mnt->mnt_devname, retval);
56282+
56283 return retval;
56284 }
56285
56286@@ -1302,7 +1308,7 @@ static inline bool may_mount(void)
56287 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
56288 */
56289
56290-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
56291+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
56292 {
56293 struct path path;
56294 struct mount *mnt;
56295@@ -1342,7 +1348,7 @@ out:
56296 /*
56297 * The 2.0 compatible umount. No flags.
56298 */
56299-SYSCALL_DEFINE1(oldumount, char __user *, name)
56300+SYSCALL_DEFINE1(oldumount, const char __user *, name)
56301 {
56302 return sys_umount(name, 0);
56303 }
56304@@ -2313,6 +2319,16 @@ long do_mount(const char *dev_name, const char *dir_name,
56305 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
56306 MS_STRICTATIME);
56307
56308+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
56309+ retval = -EPERM;
56310+ goto dput_out;
56311+ }
56312+
56313+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
56314+ retval = -EPERM;
56315+ goto dput_out;
56316+ }
56317+
56318 if (flags & MS_REMOUNT)
56319 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
56320 data_page);
56321@@ -2327,6 +2343,9 @@ long do_mount(const char *dev_name, const char *dir_name,
56322 dev_name, data_page);
56323 dput_out:
56324 path_put(&path);
56325+
56326+ gr_log_mount(dev_name, dir_name, retval);
56327+
56328 return retval;
56329 }
56330
56331@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
56332 }
56333 EXPORT_SYMBOL(mount_subtree);
56334
56335-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
56336- char __user *, type, unsigned long, flags, void __user *, data)
56337+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
56338+ const char __user *, type, unsigned long, flags, void __user *, data)
56339 {
56340 int ret;
56341 char *kernel_type;
56342@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
56343 if (error)
56344 goto out2;
56345
56346+ if (gr_handle_chroot_pivot()) {
56347+ error = -EPERM;
56348+ goto out2;
56349+ }
56350+
56351 get_fs_root(current->fs, &root);
56352 old_mp = lock_mount(&old);
56353 error = PTR_ERR(old_mp);
56354@@ -2864,7 +2888,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
56355 !nsown_capable(CAP_SYS_ADMIN))
56356 return -EPERM;
56357
56358- if (fs->users != 1)
56359+ if (atomic_read(&fs->users) != 1)
56360 return -EINVAL;
56361
56362 get_mnt_ns(mnt_ns);
56363diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
56364index cff089a..4c3d57a 100644
56365--- a/fs/nfs/callback.c
56366+++ b/fs/nfs/callback.c
56367@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
56368 struct svc_rqst *rqstp;
56369 int (*callback_svc)(void *vrqstp);
56370 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
56371- char svc_name[12];
56372 int ret;
56373
56374 nfs_callback_bc_serv(minorversion, xprt, serv);
56375@@ -235,10 +234,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
56376
56377 svc_sock_update_bufs(serv);
56378
56379- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
56380 cb_info->serv = serv;
56381 cb_info->rqst = rqstp;
56382- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
56383+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
56384 if (IS_ERR(cb_info->task)) {
56385 ret = PTR_ERR(cb_info->task);
56386 svc_exit_thread(cb_info->rqst);
56387diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
56388index a35582c..ebbdcd5 100644
56389--- a/fs/nfs/callback_xdr.c
56390+++ b/fs/nfs/callback_xdr.c
56391@@ -51,7 +51,7 @@ struct callback_op {
56392 callback_decode_arg_t decode_args;
56393 callback_encode_res_t encode_res;
56394 long res_maxsize;
56395-};
56396+} __do_const;
56397
56398 static struct callback_op callback_ops[];
56399
56400diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
56401index c1c7a9d..7afa0b8 100644
56402--- a/fs/nfs/inode.c
56403+++ b/fs/nfs/inode.c
56404@@ -1043,16 +1043,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
56405 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
56406 }
56407
56408-static atomic_long_t nfs_attr_generation_counter;
56409+static atomic_long_unchecked_t nfs_attr_generation_counter;
56410
56411 static unsigned long nfs_read_attr_generation_counter(void)
56412 {
56413- return atomic_long_read(&nfs_attr_generation_counter);
56414+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
56415 }
56416
56417 unsigned long nfs_inc_attr_generation_counter(void)
56418 {
56419- return atomic_long_inc_return(&nfs_attr_generation_counter);
56420+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
56421 }
56422
56423 void nfs_fattr_init(struct nfs_fattr *fattr)
56424diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
56425index 2c37442..9b9538b 100644
56426--- a/fs/nfs/nfs4state.c
56427+++ b/fs/nfs/nfs4state.c
56428@@ -1193,7 +1193,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
56429 snprintf(buf, sizeof(buf), "%s-manager",
56430 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
56431 rcu_read_unlock();
56432- task = kthread_run(nfs4_run_state_manager, clp, buf);
56433+ task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
56434 if (IS_ERR(task)) {
56435 printk(KERN_ERR "%s: kthread_run: %ld\n",
56436 __func__, PTR_ERR(task));
56437diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
56438index 27d74a2..c4c2a73 100644
56439--- a/fs/nfsd/nfs4proc.c
56440+++ b/fs/nfsd/nfs4proc.c
56441@@ -1126,7 +1126,7 @@ struct nfsd4_operation {
56442 nfsd4op_rsize op_rsize_bop;
56443 stateid_getter op_get_currentstateid;
56444 stateid_setter op_set_currentstateid;
56445-};
56446+} __do_const;
56447
56448 static struct nfsd4_operation nfsd4_ops[];
56449
56450diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
56451index 582321a..0224663 100644
56452--- a/fs/nfsd/nfs4xdr.c
56453+++ b/fs/nfsd/nfs4xdr.c
56454@@ -1458,7 +1458,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
56455
56456 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
56457
56458-static nfsd4_dec nfsd4_dec_ops[] = {
56459+static const nfsd4_dec nfsd4_dec_ops[] = {
56460 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
56461 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
56462 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
56463@@ -1498,7 +1498,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
56464 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
56465 };
56466
56467-static nfsd4_dec nfsd41_dec_ops[] = {
56468+static const nfsd4_dec nfsd41_dec_ops[] = {
56469 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
56470 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
56471 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
56472@@ -1560,7 +1560,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
56473 };
56474
56475 struct nfsd4_minorversion_ops {
56476- nfsd4_dec *decoders;
56477+ const nfsd4_dec *decoders;
56478 int nops;
56479 };
56480
56481diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
56482index e76244e..9fe8f2f1 100644
56483--- a/fs/nfsd/nfscache.c
56484+++ b/fs/nfsd/nfscache.c
56485@@ -526,14 +526,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
56486 {
56487 struct svc_cacherep *rp = rqstp->rq_cacherep;
56488 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
56489- int len;
56490+ long len;
56491 size_t bufsize = 0;
56492
56493 if (!rp)
56494 return;
56495
56496- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
56497- len >>= 2;
56498+ if (statp) {
56499+ len = (char*)statp - (char*)resv->iov_base;
56500+ len = resv->iov_len - len;
56501+ len >>= 2;
56502+ }
56503
56504 /* Don't cache excessive amounts of data and XDR failures */
56505 if (!statp || len > (256 >> 2)) {
56506diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
56507index baf149a..76b86ad 100644
56508--- a/fs/nfsd/vfs.c
56509+++ b/fs/nfsd/vfs.c
56510@@ -940,7 +940,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
56511 } else {
56512 oldfs = get_fs();
56513 set_fs(KERNEL_DS);
56514- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
56515+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
56516 set_fs(oldfs);
56517 }
56518
56519@@ -1027,7 +1027,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
56520
56521 /* Write the data. */
56522 oldfs = get_fs(); set_fs(KERNEL_DS);
56523- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
56524+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
56525 set_fs(oldfs);
56526 if (host_err < 0)
56527 goto out_nfserr;
56528@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
56529 */
56530
56531 oldfs = get_fs(); set_fs(KERNEL_DS);
56532- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
56533+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
56534 set_fs(oldfs);
56535
56536 if (host_err < 0)
56537diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
56538index fea6bd5..8ee9d81 100644
56539--- a/fs/nls/nls_base.c
56540+++ b/fs/nls/nls_base.c
56541@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
56542
56543 int register_nls(struct nls_table * nls)
56544 {
56545- struct nls_table ** tmp = &tables;
56546+ struct nls_table *tmp = tables;
56547
56548 if (nls->next)
56549 return -EBUSY;
56550
56551 spin_lock(&nls_lock);
56552- while (*tmp) {
56553- if (nls == *tmp) {
56554+ while (tmp) {
56555+ if (nls == tmp) {
56556 spin_unlock(&nls_lock);
56557 return -EBUSY;
56558 }
56559- tmp = &(*tmp)->next;
56560+ tmp = tmp->next;
56561 }
56562- nls->next = tables;
56563+ pax_open_kernel();
56564+ *(struct nls_table **)&nls->next = tables;
56565+ pax_close_kernel();
56566 tables = nls;
56567 spin_unlock(&nls_lock);
56568 return 0;
56569@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
56570
56571 int unregister_nls(struct nls_table * nls)
56572 {
56573- struct nls_table ** tmp = &tables;
56574+ struct nls_table * const * tmp = &tables;
56575
56576 spin_lock(&nls_lock);
56577 while (*tmp) {
56578 if (nls == *tmp) {
56579- *tmp = nls->next;
56580+ pax_open_kernel();
56581+ *(struct nls_table **)tmp = nls->next;
56582+ pax_close_kernel();
56583 spin_unlock(&nls_lock);
56584 return 0;
56585 }
56586diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
56587index 7424929..35f6be5 100644
56588--- a/fs/nls/nls_euc-jp.c
56589+++ b/fs/nls/nls_euc-jp.c
56590@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
56591 p_nls = load_nls("cp932");
56592
56593 if (p_nls) {
56594- table.charset2upper = p_nls->charset2upper;
56595- table.charset2lower = p_nls->charset2lower;
56596+ pax_open_kernel();
56597+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
56598+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
56599+ pax_close_kernel();
56600 return register_nls(&table);
56601 }
56602
56603diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
56604index e7bc1d7..06bd4bb 100644
56605--- a/fs/nls/nls_koi8-ru.c
56606+++ b/fs/nls/nls_koi8-ru.c
56607@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
56608 p_nls = load_nls("koi8-u");
56609
56610 if (p_nls) {
56611- table.charset2upper = p_nls->charset2upper;
56612- table.charset2lower = p_nls->charset2lower;
56613+ pax_open_kernel();
56614+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
56615+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
56616+ pax_close_kernel();
56617 return register_nls(&table);
56618 }
56619
56620diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
56621index 77cc85d..a1e6299 100644
56622--- a/fs/notify/fanotify/fanotify_user.c
56623+++ b/fs/notify/fanotify/fanotify_user.c
56624@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
56625
56626 fd = fanotify_event_metadata.fd;
56627 ret = -EFAULT;
56628- if (copy_to_user(buf, &fanotify_event_metadata,
56629- fanotify_event_metadata.event_len))
56630+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
56631+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
56632 goto out_close_fd;
56633
56634 ret = prepare_for_access_response(group, event, fd);
56635diff --git a/fs/notify/notification.c b/fs/notify/notification.c
56636index 7b51b05..5ea5ef6 100644
56637--- a/fs/notify/notification.c
56638+++ b/fs/notify/notification.c
56639@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
56640 * get set to 0 so it will never get 'freed'
56641 */
56642 static struct fsnotify_event *q_overflow_event;
56643-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
56644+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
56645
56646 /**
56647 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
56648@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
56649 */
56650 u32 fsnotify_get_cookie(void)
56651 {
56652- return atomic_inc_return(&fsnotify_sync_cookie);
56653+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
56654 }
56655 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
56656
56657diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
56658index aa411c3..c260a84 100644
56659--- a/fs/ntfs/dir.c
56660+++ b/fs/ntfs/dir.c
56661@@ -1329,7 +1329,7 @@ find_next_index_buffer:
56662 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
56663 ~(s64)(ndir->itype.index.block_size - 1)));
56664 /* Bounds checks. */
56665- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
56666+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
56667 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
56668 "inode 0x%lx or driver bug.", vdir->i_ino);
56669 goto err_out;
56670diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
56671index c5670b8..01a3656 100644
56672--- a/fs/ntfs/file.c
56673+++ b/fs/ntfs/file.c
56674@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
56675 #endif /* NTFS_RW */
56676 };
56677
56678-const struct file_operations ntfs_empty_file_ops = {};
56679+const struct file_operations ntfs_empty_file_ops __read_only;
56680
56681-const struct inode_operations ntfs_empty_inode_ops = {};
56682+const struct inode_operations ntfs_empty_inode_ops __read_only;
56683diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
56684index 20dfec7..e238cb7 100644
56685--- a/fs/ocfs2/aops.c
56686+++ b/fs/ocfs2/aops.c
56687@@ -1756,7 +1756,7 @@ try_again:
56688 goto out;
56689 } else if (ret == 1) {
56690 clusters_need = wc->w_clen;
56691- ret = ocfs2_refcount_cow(inode, filp, di_bh,
56692+ ret = ocfs2_refcount_cow(inode, di_bh,
56693 wc->w_cpos, wc->w_clen, UINT_MAX);
56694 if (ret) {
56695 mlog_errno(ret);
56696diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
56697index ff54014..ff125fd 100644
56698--- a/fs/ocfs2/file.c
56699+++ b/fs/ocfs2/file.c
56700@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
56701 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
56702 goto out;
56703
56704- return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
56705+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
56706
56707 out:
56708 return status;
56709@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
56710 zero_clusters = last_cpos - zero_cpos;
56711
56712 if (needs_cow) {
56713- rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
56714+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
56715 zero_clusters, UINT_MAX);
56716 if (rc) {
56717 mlog_errno(rc);
56718@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
56719
56720 *meta_level = 1;
56721
56722- ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
56723+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
56724 if (ret)
56725 mlog_errno(ret);
56726 out:
56727diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
56728index aebeacd..0dcdd26 100644
56729--- a/fs/ocfs2/localalloc.c
56730+++ b/fs/ocfs2/localalloc.c
56731@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
56732 goto bail;
56733 }
56734
56735- atomic_inc(&osb->alloc_stats.moves);
56736+ atomic_inc_unchecked(&osb->alloc_stats.moves);
56737
56738 bail:
56739 if (handle)
56740diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
56741index f1fc172..452068b 100644
56742--- a/fs/ocfs2/move_extents.c
56743+++ b/fs/ocfs2/move_extents.c
56744@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
56745 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
56746 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
56747
56748- ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
56749+ ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
56750 p_cpos, new_p_cpos, len);
56751 if (ret) {
56752 mlog_errno(ret);
56753diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
56754index d355e6e..578d905 100644
56755--- a/fs/ocfs2/ocfs2.h
56756+++ b/fs/ocfs2/ocfs2.h
56757@@ -235,11 +235,11 @@ enum ocfs2_vol_state
56758
56759 struct ocfs2_alloc_stats
56760 {
56761- atomic_t moves;
56762- atomic_t local_data;
56763- atomic_t bitmap_data;
56764- atomic_t bg_allocs;
56765- atomic_t bg_extends;
56766+ atomic_unchecked_t moves;
56767+ atomic_unchecked_t local_data;
56768+ atomic_unchecked_t bitmap_data;
56769+ atomic_unchecked_t bg_allocs;
56770+ atomic_unchecked_t bg_extends;
56771 };
56772
56773 enum ocfs2_local_alloc_state
56774diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
56775index 998b17e..aefe414 100644
56776--- a/fs/ocfs2/refcounttree.c
56777+++ b/fs/ocfs2/refcounttree.c
56778@@ -49,7 +49,6 @@
56779
56780 struct ocfs2_cow_context {
56781 struct inode *inode;
56782- struct file *file;
56783 u32 cow_start;
56784 u32 cow_len;
56785 struct ocfs2_extent_tree data_et;
56786@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
56787 u32 *num_clusters,
56788 unsigned int *extent_flags);
56789 int (*cow_duplicate_clusters)(handle_t *handle,
56790- struct file *file,
56791+ struct inode *inode,
56792 u32 cpos, u32 old_cluster,
56793 u32 new_cluster, u32 new_len);
56794 };
56795@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
56796 }
56797
56798 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
56799- struct file *file,
56800+ struct inode *inode,
56801 u32 cpos, u32 old_cluster,
56802 u32 new_cluster, u32 new_len)
56803 {
56804 int ret = 0, partial;
56805- struct inode *inode = file_inode(file);
56806- struct ocfs2_caching_info *ci = INODE_CACHE(inode);
56807- struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
56808+ struct super_block *sb = inode->i_sb;
56809 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
56810 struct page *page;
56811 pgoff_t page_index;
56812@@ -2973,13 +2970,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
56813 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
56814 BUG_ON(PageDirty(page));
56815
56816- if (PageReadahead(page)) {
56817- page_cache_async_readahead(mapping,
56818- &file->f_ra, file,
56819- page, page_index,
56820- readahead_pages);
56821- }
56822-
56823 if (!PageUptodate(page)) {
56824 ret = block_read_full_page(page, ocfs2_get_block);
56825 if (ret) {
56826@@ -2999,7 +2989,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
56827 }
56828 }
56829
56830- ocfs2_map_and_dirty_page(inode, handle, from, to,
56831+ ocfs2_map_and_dirty_page(inode,
56832+ handle, from, to,
56833 page, 0, &new_block);
56834 mark_page_accessed(page);
56835 unlock:
56836@@ -3015,12 +3006,11 @@ unlock:
56837 }
56838
56839 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
56840- struct file *file,
56841+ struct inode *inode,
56842 u32 cpos, u32 old_cluster,
56843 u32 new_cluster, u32 new_len)
56844 {
56845 int ret = 0;
56846- struct inode *inode = file_inode(file);
56847 struct super_block *sb = inode->i_sb;
56848 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
56849 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
56850@@ -3145,7 +3135,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
56851
56852 /*If the old clusters is unwritten, no need to duplicate. */
56853 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
56854- ret = context->cow_duplicate_clusters(handle, context->file,
56855+ ret = context->cow_duplicate_clusters(handle, context->inode,
56856 cpos, old, new, len);
56857 if (ret) {
56858 mlog_errno(ret);
56859@@ -3423,35 +3413,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
56860 return ret;
56861 }
56862
56863-static void ocfs2_readahead_for_cow(struct inode *inode,
56864- struct file *file,
56865- u32 start, u32 len)
56866-{
56867- struct address_space *mapping;
56868- pgoff_t index;
56869- unsigned long num_pages;
56870- int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
56871-
56872- if (!file)
56873- return;
56874-
56875- mapping = file->f_mapping;
56876- num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
56877- if (!num_pages)
56878- num_pages = 1;
56879-
56880- index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
56881- page_cache_sync_readahead(mapping, &file->f_ra, file,
56882- index, num_pages);
56883-}
56884-
56885 /*
56886 * Starting at cpos, try to CoW write_len clusters. Don't CoW
56887 * past max_cpos. This will stop when it runs into a hole or an
56888 * unrefcounted extent.
56889 */
56890 static int ocfs2_refcount_cow_hunk(struct inode *inode,
56891- struct file *file,
56892 struct buffer_head *di_bh,
56893 u32 cpos, u32 write_len, u32 max_cpos)
56894 {
56895@@ -3480,8 +3447,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
56896
56897 BUG_ON(cow_len == 0);
56898
56899- ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
56900-
56901 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
56902 if (!context) {
56903 ret = -ENOMEM;
56904@@ -3503,7 +3468,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
56905 context->ref_root_bh = ref_root_bh;
56906 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
56907 context->get_clusters = ocfs2_di_get_clusters;
56908- context->file = file;
56909
56910 ocfs2_init_dinode_extent_tree(&context->data_et,
56911 INODE_CACHE(inode), di_bh);
56912@@ -3532,7 +3496,6 @@ out:
56913 * clusters between cpos and cpos+write_len are safe to modify.
56914 */
56915 int ocfs2_refcount_cow(struct inode *inode,
56916- struct file *file,
56917 struct buffer_head *di_bh,
56918 u32 cpos, u32 write_len, u32 max_cpos)
56919 {
56920@@ -3552,7 +3515,7 @@ int ocfs2_refcount_cow(struct inode *inode,
56921 num_clusters = write_len;
56922
56923 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
56924- ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
56925+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
56926 num_clusters, max_cpos);
56927 if (ret) {
56928 mlog_errno(ret);
56929diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
56930index 7754608..6422bbcdb 100644
56931--- a/fs/ocfs2/refcounttree.h
56932+++ b/fs/ocfs2/refcounttree.h
56933@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
56934 int *credits,
56935 int *ref_blocks);
56936 int ocfs2_refcount_cow(struct inode *inode,
56937- struct file *filep, struct buffer_head *di_bh,
56938+ struct buffer_head *di_bh,
56939 u32 cpos, u32 write_len, u32 max_cpos);
56940
56941 typedef int (ocfs2_post_refcount_func)(struct inode *inode,
56942@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
56943 u32 cpos, u32 write_len,
56944 struct ocfs2_post_refcount *post);
56945 int ocfs2_duplicate_clusters_by_page(handle_t *handle,
56946- struct file *file,
56947+ struct inode *inode,
56948 u32 cpos, u32 old_cluster,
56949 u32 new_cluster, u32 new_len);
56950 int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
56951- struct file *file,
56952+ struct inode *inode,
56953 u32 cpos, u32 old_cluster,
56954 u32 new_cluster, u32 new_len);
56955 int ocfs2_cow_sync_writeback(struct super_block *sb,
56956diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
56957index b7e74b5..19c6536 100644
56958--- a/fs/ocfs2/suballoc.c
56959+++ b/fs/ocfs2/suballoc.c
56960@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
56961 mlog_errno(status);
56962 goto bail;
56963 }
56964- atomic_inc(&osb->alloc_stats.bg_extends);
56965+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
56966
56967 /* You should never ask for this much metadata */
56968 BUG_ON(bits_wanted >
56969@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
56970 mlog_errno(status);
56971 goto bail;
56972 }
56973- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
56974+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
56975
56976 *suballoc_loc = res.sr_bg_blkno;
56977 *suballoc_bit_start = res.sr_bit_offset;
56978@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
56979 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
56980 res->sr_bits);
56981
56982- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
56983+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
56984
56985 BUG_ON(res->sr_bits != 1);
56986
56987@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
56988 mlog_errno(status);
56989 goto bail;
56990 }
56991- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
56992+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
56993
56994 BUG_ON(res.sr_bits != 1);
56995
56996@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
56997 cluster_start,
56998 num_clusters);
56999 if (!status)
57000- atomic_inc(&osb->alloc_stats.local_data);
57001+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
57002 } else {
57003 if (min_clusters > (osb->bitmap_cpg - 1)) {
57004 /* The only paths asking for contiguousness
57005@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
57006 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
57007 res.sr_bg_blkno,
57008 res.sr_bit_offset);
57009- atomic_inc(&osb->alloc_stats.bitmap_data);
57010+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
57011 *num_clusters = res.sr_bits;
57012 }
57013 }
57014diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
57015index 01b8516..579c4df 100644
57016--- a/fs/ocfs2/super.c
57017+++ b/fs/ocfs2/super.c
57018@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
57019 "%10s => GlobalAllocs: %d LocalAllocs: %d "
57020 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
57021 "Stats",
57022- atomic_read(&osb->alloc_stats.bitmap_data),
57023- atomic_read(&osb->alloc_stats.local_data),
57024- atomic_read(&osb->alloc_stats.bg_allocs),
57025- atomic_read(&osb->alloc_stats.moves),
57026- atomic_read(&osb->alloc_stats.bg_extends));
57027+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
57028+ atomic_read_unchecked(&osb->alloc_stats.local_data),
57029+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
57030+ atomic_read_unchecked(&osb->alloc_stats.moves),
57031+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
57032
57033 out += snprintf(buf + out, len - out,
57034 "%10s => State: %u Descriptor: %llu Size: %u bits "
57035@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
57036 spin_lock_init(&osb->osb_xattr_lock);
57037 ocfs2_init_steal_slots(osb);
57038
57039- atomic_set(&osb->alloc_stats.moves, 0);
57040- atomic_set(&osb->alloc_stats.local_data, 0);
57041- atomic_set(&osb->alloc_stats.bitmap_data, 0);
57042- atomic_set(&osb->alloc_stats.bg_allocs, 0);
57043- atomic_set(&osb->alloc_stats.bg_extends, 0);
57044+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
57045+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
57046+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
57047+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
57048+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
57049
57050 /* Copy the blockcheck stats from the superblock probe */
57051 osb->osb_ecc_stats = *stats;
57052diff --git a/fs/open.c b/fs/open.c
57053index 8c74100..4239c48 100644
57054--- a/fs/open.c
57055+++ b/fs/open.c
57056@@ -32,6 +32,8 @@
57057 #include <linux/dnotify.h>
57058 #include <linux/compat.h>
57059
57060+#define CREATE_TRACE_POINTS
57061+#include <trace/events/fs.h>
57062 #include "internal.h"
57063
57064 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
57065@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
57066 error = locks_verify_truncate(inode, NULL, length);
57067 if (!error)
57068 error = security_path_truncate(path);
57069+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
57070+ error = -EACCES;
57071 if (!error)
57072 error = do_truncate(path->dentry, length, 0, NULL);
57073
57074@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
57075 error = locks_verify_truncate(inode, f.file, length);
57076 if (!error)
57077 error = security_path_truncate(&f.file->f_path);
57078+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
57079+ error = -EACCES;
57080 if (!error)
57081 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
57082 sb_end_write(inode->i_sb);
57083@@ -360,6 +366,9 @@ retry:
57084 if (__mnt_is_readonly(path.mnt))
57085 res = -EROFS;
57086
57087+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
57088+ res = -EACCES;
57089+
57090 out_path_release:
57091 path_put(&path);
57092 if (retry_estale(res, lookup_flags)) {
57093@@ -391,6 +400,8 @@ retry:
57094 if (error)
57095 goto dput_and_out;
57096
57097+ gr_log_chdir(path.dentry, path.mnt);
57098+
57099 set_fs_pwd(current->fs, &path);
57100
57101 dput_and_out:
57102@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
57103 goto out_putf;
57104
57105 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
57106+
57107+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
57108+ error = -EPERM;
57109+
57110+ if (!error)
57111+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
57112+
57113 if (!error)
57114 set_fs_pwd(current->fs, &f.file->f_path);
57115 out_putf:
57116@@ -449,7 +467,13 @@ retry:
57117 if (error)
57118 goto dput_and_out;
57119
57120+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
57121+ goto dput_and_out;
57122+
57123 set_fs_root(current->fs, &path);
57124+
57125+ gr_handle_chroot_chdir(&path);
57126+
57127 error = 0;
57128 dput_and_out:
57129 path_put(&path);
57130@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
57131 if (error)
57132 return error;
57133 mutex_lock(&inode->i_mutex);
57134+
57135+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
57136+ error = -EACCES;
57137+ goto out_unlock;
57138+ }
57139+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
57140+ error = -EACCES;
57141+ goto out_unlock;
57142+ }
57143+
57144 error = security_path_chmod(path, mode);
57145 if (error)
57146 goto out_unlock;
57147@@ -531,6 +565,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
57148 uid = make_kuid(current_user_ns(), user);
57149 gid = make_kgid(current_user_ns(), group);
57150
57151+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
57152+ return -EACCES;
57153+
57154 newattrs.ia_valid = ATTR_CTIME;
57155 if (user != (uid_t) -1) {
57156 if (!uid_valid(uid))
57157@@ -946,6 +983,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
57158 } else {
57159 fsnotify_open(f);
57160 fd_install(fd, f);
57161+ trace_do_sys_open(tmp->name, flags, mode);
57162 }
57163 }
57164 putname(tmp);
57165diff --git a/fs/pipe.c b/fs/pipe.c
57166index d2c45e1..009fe1c 100644
57167--- a/fs/pipe.c
57168+++ b/fs/pipe.c
57169@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
57170
57171 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
57172 {
57173- if (pipe->files)
57174+ if (atomic_read(&pipe->files))
57175 mutex_lock_nested(&pipe->mutex, subclass);
57176 }
57177
57178@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
57179
57180 void pipe_unlock(struct pipe_inode_info *pipe)
57181 {
57182- if (pipe->files)
57183+ if (atomic_read(&pipe->files))
57184 mutex_unlock(&pipe->mutex);
57185 }
57186 EXPORT_SYMBOL(pipe_unlock);
57187@@ -449,9 +449,9 @@ redo:
57188 }
57189 if (bufs) /* More to do? */
57190 continue;
57191- if (!pipe->writers)
57192+ if (!atomic_read(&pipe->writers))
57193 break;
57194- if (!pipe->waiting_writers) {
57195+ if (!atomic_read(&pipe->waiting_writers)) {
57196 /* syscall merging: Usually we must not sleep
57197 * if O_NONBLOCK is set, or if we got some data.
57198 * But if a writer sleeps in kernel space, then
57199@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
57200 ret = 0;
57201 __pipe_lock(pipe);
57202
57203- if (!pipe->readers) {
57204+ if (!atomic_read(&pipe->readers)) {
57205 send_sig(SIGPIPE, current, 0);
57206 ret = -EPIPE;
57207 goto out;
57208@@ -562,7 +562,7 @@ redo1:
57209 for (;;) {
57210 int bufs;
57211
57212- if (!pipe->readers) {
57213+ if (!atomic_read(&pipe->readers)) {
57214 send_sig(SIGPIPE, current, 0);
57215 if (!ret)
57216 ret = -EPIPE;
57217@@ -653,9 +653,9 @@ redo2:
57218 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
57219 do_wakeup = 0;
57220 }
57221- pipe->waiting_writers++;
57222+ atomic_inc(&pipe->waiting_writers);
57223 pipe_wait(pipe);
57224- pipe->waiting_writers--;
57225+ atomic_dec(&pipe->waiting_writers);
57226 }
57227 out:
57228 __pipe_unlock(pipe);
57229@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
57230 mask = 0;
57231 if (filp->f_mode & FMODE_READ) {
57232 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
57233- if (!pipe->writers && filp->f_version != pipe->w_counter)
57234+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
57235 mask |= POLLHUP;
57236 }
57237
57238@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
57239 * Most Unices do not set POLLERR for FIFOs but on Linux they
57240 * behave exactly like pipes for poll().
57241 */
57242- if (!pipe->readers)
57243+ if (!atomic_read(&pipe->readers))
57244 mask |= POLLERR;
57245 }
57246
57247@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
57248
57249 __pipe_lock(pipe);
57250 if (file->f_mode & FMODE_READ)
57251- pipe->readers--;
57252+ atomic_dec(&pipe->readers);
57253 if (file->f_mode & FMODE_WRITE)
57254- pipe->writers--;
57255+ atomic_dec(&pipe->writers);
57256
57257- if (pipe->readers || pipe->writers) {
57258+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
57259 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
57260 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
57261 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
57262 }
57263 spin_lock(&inode->i_lock);
57264- if (!--pipe->files) {
57265+ if (atomic_dec_and_test(&pipe->files)) {
57266 inode->i_pipe = NULL;
57267 kill = 1;
57268 }
57269@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
57270 kfree(pipe);
57271 }
57272
57273-static struct vfsmount *pipe_mnt __read_mostly;
57274+struct vfsmount *pipe_mnt __read_mostly;
57275
57276 /*
57277 * pipefs_dname() is called from d_path().
57278@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
57279 goto fail_iput;
57280
57281 inode->i_pipe = pipe;
57282- pipe->files = 2;
57283- pipe->readers = pipe->writers = 1;
57284+ atomic_set(&pipe->files, 2);
57285+ atomic_set(&pipe->readers, 1);
57286+ atomic_set(&pipe->writers, 1);
57287 inode->i_fop = &pipefifo_fops;
57288
57289 /*
57290@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
57291 spin_lock(&inode->i_lock);
57292 if (inode->i_pipe) {
57293 pipe = inode->i_pipe;
57294- pipe->files++;
57295+ atomic_inc(&pipe->files);
57296 spin_unlock(&inode->i_lock);
57297 } else {
57298 spin_unlock(&inode->i_lock);
57299 pipe = alloc_pipe_info();
57300 if (!pipe)
57301 return -ENOMEM;
57302- pipe->files = 1;
57303+ atomic_set(&pipe->files, 1);
57304 spin_lock(&inode->i_lock);
57305 if (unlikely(inode->i_pipe)) {
57306- inode->i_pipe->files++;
57307+ atomic_inc(&inode->i_pipe->files);
57308 spin_unlock(&inode->i_lock);
57309 free_pipe_info(pipe);
57310 pipe = inode->i_pipe;
57311@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
57312 * opened, even when there is no process writing the FIFO.
57313 */
57314 pipe->r_counter++;
57315- if (pipe->readers++ == 0)
57316+ if (atomic_inc_return(&pipe->readers) == 1)
57317 wake_up_partner(pipe);
57318
57319- if (!is_pipe && !pipe->writers) {
57320+ if (!is_pipe && !atomic_read(&pipe->writers)) {
57321 if ((filp->f_flags & O_NONBLOCK)) {
57322 /* suppress POLLHUP until we have
57323 * seen a writer */
57324@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
57325 * errno=ENXIO when there is no process reading the FIFO.
57326 */
57327 ret = -ENXIO;
57328- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
57329+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
57330 goto err;
57331
57332 pipe->w_counter++;
57333- if (!pipe->writers++)
57334+ if (atomic_inc_return(&pipe->writers) == 1)
57335 wake_up_partner(pipe);
57336
57337- if (!is_pipe && !pipe->readers) {
57338+ if (!is_pipe && !atomic_read(&pipe->readers)) {
57339 if (wait_for_partner(pipe, &pipe->r_counter))
57340 goto err_wr;
57341 }
57342@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
57343 * the process can at least talk to itself.
57344 */
57345
57346- pipe->readers++;
57347- pipe->writers++;
57348+ atomic_inc(&pipe->readers);
57349+ atomic_inc(&pipe->writers);
57350 pipe->r_counter++;
57351 pipe->w_counter++;
57352- if (pipe->readers == 1 || pipe->writers == 1)
57353+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
57354 wake_up_partner(pipe);
57355 break;
57356
57357@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
57358 return 0;
57359
57360 err_rd:
57361- if (!--pipe->readers)
57362+ if (atomic_dec_and_test(&pipe->readers))
57363 wake_up_interruptible(&pipe->wait);
57364 ret = -ERESTARTSYS;
57365 goto err;
57366
57367 err_wr:
57368- if (!--pipe->writers)
57369+ if (atomic_dec_and_test(&pipe->writers))
57370 wake_up_interruptible(&pipe->wait);
57371 ret = -ERESTARTSYS;
57372 goto err;
57373
57374 err:
57375 spin_lock(&inode->i_lock);
57376- if (!--pipe->files) {
57377+ if (atomic_dec_and_test(&pipe->files)) {
57378 inode->i_pipe = NULL;
57379 kill = 1;
57380 }
57381diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
57382index 15af622..0e9f4467 100644
57383--- a/fs/proc/Kconfig
57384+++ b/fs/proc/Kconfig
57385@@ -30,12 +30,12 @@ config PROC_FS
57386
57387 config PROC_KCORE
57388 bool "/proc/kcore support" if !ARM
57389- depends on PROC_FS && MMU
57390+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
57391
57392 config PROC_VMCORE
57393 bool "/proc/vmcore support"
57394- depends on PROC_FS && CRASH_DUMP
57395- default y
57396+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
57397+ default n
57398 help
57399 Exports the dump image of crashed kernel in ELF format.
57400
57401@@ -59,8 +59,8 @@ config PROC_SYSCTL
57402 limited in memory.
57403
57404 config PROC_PAGE_MONITOR
57405- default y
57406- depends on PROC_FS && MMU
57407+ default n
57408+ depends on PROC_FS && MMU && !GRKERNSEC
57409 bool "Enable /proc page monitoring" if EXPERT
57410 help
57411 Various /proc files exist to monitor process memory utilization:
57412diff --git a/fs/proc/array.c b/fs/proc/array.c
57413index cbd0f1b..adec3f0 100644
57414--- a/fs/proc/array.c
57415+++ b/fs/proc/array.c
57416@@ -60,6 +60,7 @@
57417 #include <linux/tty.h>
57418 #include <linux/string.h>
57419 #include <linux/mman.h>
57420+#include <linux/grsecurity.h>
57421 #include <linux/proc_fs.h>
57422 #include <linux/ioport.h>
57423 #include <linux/uaccess.h>
57424@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
57425 seq_putc(m, '\n');
57426 }
57427
57428+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57429+static inline void task_pax(struct seq_file *m, struct task_struct *p)
57430+{
57431+ if (p->mm)
57432+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
57433+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
57434+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
57435+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
57436+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
57437+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
57438+ else
57439+ seq_printf(m, "PaX:\t-----\n");
57440+}
57441+#endif
57442+
57443 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
57444 struct pid *pid, struct task_struct *task)
57445 {
57446@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
57447 task_cpus_allowed(m, task);
57448 cpuset_task_status_allowed(m, task);
57449 task_context_switch_counts(m, task);
57450+
57451+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57452+ task_pax(m, task);
57453+#endif
57454+
57455+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
57456+ task_grsec_rbac(m, task);
57457+#endif
57458+
57459 return 0;
57460 }
57461
57462+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57463+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
57464+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
57465+ _mm->pax_flags & MF_PAX_SEGMEXEC))
57466+#endif
57467+
57468 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57469 struct pid *pid, struct task_struct *task, int whole)
57470 {
57471@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57472 char tcomm[sizeof(task->comm)];
57473 unsigned long flags;
57474
57475+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57476+ if (current->exec_id != m->exec_id) {
57477+ gr_log_badprocpid("stat");
57478+ return 0;
57479+ }
57480+#endif
57481+
57482 state = *get_task_state(task);
57483 vsize = eip = esp = 0;
57484 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
57485@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57486 gtime = task_gtime(task);
57487 }
57488
57489+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57490+ if (PAX_RAND_FLAGS(mm)) {
57491+ eip = 0;
57492+ esp = 0;
57493+ wchan = 0;
57494+ }
57495+#endif
57496+#ifdef CONFIG_GRKERNSEC_HIDESYM
57497+ wchan = 0;
57498+ eip =0;
57499+ esp =0;
57500+#endif
57501+
57502 /* scale priority and nice values from timeslices to -20..20 */
57503 /* to make it look like a "normal" Unix priority/nice value */
57504 priority = task_prio(task);
57505@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57506 seq_put_decimal_ull(m, ' ', vsize);
57507 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
57508 seq_put_decimal_ull(m, ' ', rsslim);
57509+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57510+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
57511+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
57512+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
57513+#else
57514 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
57515 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
57516 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
57517+#endif
57518 seq_put_decimal_ull(m, ' ', esp);
57519 seq_put_decimal_ull(m, ' ', eip);
57520 /* The signal information here is obsolete.
57521@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57522 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
57523 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
57524
57525- if (mm && permitted) {
57526+ if (mm && permitted
57527+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57528+ && !PAX_RAND_FLAGS(mm)
57529+#endif
57530+ ) {
57531 seq_put_decimal_ull(m, ' ', mm->start_data);
57532 seq_put_decimal_ull(m, ' ', mm->end_data);
57533 seq_put_decimal_ull(m, ' ', mm->start_brk);
57534@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
57535 struct pid *pid, struct task_struct *task)
57536 {
57537 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
57538- struct mm_struct *mm = get_task_mm(task);
57539+ struct mm_struct *mm;
57540
57541+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57542+ if (current->exec_id != m->exec_id) {
57543+ gr_log_badprocpid("statm");
57544+ return 0;
57545+ }
57546+#endif
57547+ mm = get_task_mm(task);
57548 if (mm) {
57549 size = task_statm(mm, &shared, &text, &data, &resident);
57550 mmput(mm);
57551@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
57552 return 0;
57553 }
57554
57555+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
57556+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
57557+{
57558+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
57559+}
57560+#endif
57561+
57562 #ifdef CONFIG_CHECKPOINT_RESTORE
57563 static struct pid *
57564 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
57565diff --git a/fs/proc/base.c b/fs/proc/base.c
57566index c3834da..b402b2b 100644
57567--- a/fs/proc/base.c
57568+++ b/fs/proc/base.c
57569@@ -113,6 +113,14 @@ struct pid_entry {
57570 union proc_op op;
57571 };
57572
57573+struct getdents_callback {
57574+ struct linux_dirent __user * current_dir;
57575+ struct linux_dirent __user * previous;
57576+ struct file * file;
57577+ int count;
57578+ int error;
57579+};
57580+
57581 #define NOD(NAME, MODE, IOP, FOP, OP) { \
57582 .name = (NAME), \
57583 .len = sizeof(NAME) - 1, \
57584@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
57585 if (!mm->arg_end)
57586 goto out_mm; /* Shh! No looking before we're done */
57587
57588+ if (gr_acl_handle_procpidmem(task))
57589+ goto out_mm;
57590+
57591 len = mm->arg_end - mm->arg_start;
57592
57593 if (len > PAGE_SIZE)
57594@@ -237,12 +248,28 @@ out:
57595 return res;
57596 }
57597
57598+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57599+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
57600+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
57601+ _mm->pax_flags & MF_PAX_SEGMEXEC))
57602+#endif
57603+
57604 static int proc_pid_auxv(struct task_struct *task, char *buffer)
57605 {
57606 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
57607 int res = PTR_ERR(mm);
57608 if (mm && !IS_ERR(mm)) {
57609 unsigned int nwords = 0;
57610+
57611+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57612+ /* allow if we're currently ptracing this task */
57613+ if (PAX_RAND_FLAGS(mm) &&
57614+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
57615+ mmput(mm);
57616+ return 0;
57617+ }
57618+#endif
57619+
57620 do {
57621 nwords += 2;
57622 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
57623@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
57624 }
57625
57626
57627-#ifdef CONFIG_KALLSYMS
57628+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57629 /*
57630 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
57631 * Returns the resolved symbol. If that fails, simply return the address.
57632@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
57633 mutex_unlock(&task->signal->cred_guard_mutex);
57634 }
57635
57636-#ifdef CONFIG_STACKTRACE
57637+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57638
57639 #define MAX_STACK_TRACE_DEPTH 64
57640
57641@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
57642 return count;
57643 }
57644
57645-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
57646+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
57647 static int proc_pid_syscall(struct task_struct *task, char *buffer)
57648 {
57649 long nr;
57650@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
57651 /************************************************************************/
57652
57653 /* permission checks */
57654-static int proc_fd_access_allowed(struct inode *inode)
57655+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
57656 {
57657 struct task_struct *task;
57658 int allowed = 0;
57659@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
57660 */
57661 task = get_proc_task(inode);
57662 if (task) {
57663- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
57664+ if (log)
57665+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
57666+ else
57667+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
57668 put_task_struct(task);
57669 }
57670 return allowed;
57671@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
57672 struct task_struct *task,
57673 int hide_pid_min)
57674 {
57675+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
57676+ return false;
57677+
57678+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57679+ rcu_read_lock();
57680+ {
57681+ const struct cred *tmpcred = current_cred();
57682+ const struct cred *cred = __task_cred(task);
57683+
57684+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
57685+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
57686+ || in_group_p(grsec_proc_gid)
57687+#endif
57688+ ) {
57689+ rcu_read_unlock();
57690+ return true;
57691+ }
57692+ }
57693+ rcu_read_unlock();
57694+
57695+ if (!pid->hide_pid)
57696+ return false;
57697+#endif
57698+
57699 if (pid->hide_pid < hide_pid_min)
57700 return true;
57701 if (in_group_p(pid->pid_gid))
57702 return true;
57703+
57704 return ptrace_may_access(task, PTRACE_MODE_READ);
57705 }
57706
57707@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
57708 put_task_struct(task);
57709
57710 if (!has_perms) {
57711+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57712+ {
57713+#else
57714 if (pid->hide_pid == 2) {
57715+#endif
57716 /*
57717 * Let's make getdents(), stat(), and open()
57718 * consistent with each other. If a process
57719@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
57720 if (!task)
57721 return -ESRCH;
57722
57723+ if (gr_acl_handle_procpidmem(task)) {
57724+ put_task_struct(task);
57725+ return -EPERM;
57726+ }
57727+
57728 mm = mm_access(task, mode);
57729 put_task_struct(task);
57730
57731@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
57732
57733 file->private_data = mm;
57734
57735+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57736+ file->f_version = current->exec_id;
57737+#endif
57738+
57739 return 0;
57740 }
57741
57742@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
57743 ssize_t copied;
57744 char *page;
57745
57746+#ifdef CONFIG_GRKERNSEC
57747+ if (write)
57748+ return -EPERM;
57749+#endif
57750+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57751+ if (file->f_version != current->exec_id) {
57752+ gr_log_badprocpid("mem");
57753+ return 0;
57754+ }
57755+#endif
57756+
57757 if (!mm)
57758 return 0;
57759
57760@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
57761 goto free;
57762
57763 while (count > 0) {
57764- int this_len = min_t(int, count, PAGE_SIZE);
57765+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
57766
57767 if (write && copy_from_user(page, buf, this_len)) {
57768 copied = -EFAULT;
57769@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
57770 if (!mm)
57771 return 0;
57772
57773+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57774+ if (file->f_version != current->exec_id) {
57775+ gr_log_badprocpid("environ");
57776+ return 0;
57777+ }
57778+#endif
57779+
57780 page = (char *)__get_free_page(GFP_TEMPORARY);
57781 if (!page)
57782 return -ENOMEM;
57783@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
57784 goto free;
57785 while (count > 0) {
57786 size_t this_len, max_len;
57787- int retval;
57788+ ssize_t retval;
57789
57790 if (src >= (mm->env_end - mm->env_start))
57791 break;
57792@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
57793 int error = -EACCES;
57794
57795 /* Are we allowed to snoop on the tasks file descriptors? */
57796- if (!proc_fd_access_allowed(inode))
57797+ if (!proc_fd_access_allowed(inode, 0))
57798 goto out;
57799
57800 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
57801@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
57802 struct path path;
57803
57804 /* Are we allowed to snoop on the tasks file descriptors? */
57805- if (!proc_fd_access_allowed(inode))
57806- goto out;
57807+ /* logging this is needed for learning on chromium to work properly,
57808+ but we don't want to flood the logs from 'ps' which does a readlink
57809+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
57810+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
57811+ */
57812+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
57813+ if (!proc_fd_access_allowed(inode,0))
57814+ goto out;
57815+ } else {
57816+ if (!proc_fd_access_allowed(inode,1))
57817+ goto out;
57818+ }
57819
57820 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
57821 if (error)
57822@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
57823 rcu_read_lock();
57824 cred = __task_cred(task);
57825 inode->i_uid = cred->euid;
57826+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
57827+ inode->i_gid = grsec_proc_gid;
57828+#else
57829 inode->i_gid = cred->egid;
57830+#endif
57831 rcu_read_unlock();
57832 }
57833 security_task_to_inode(task, inode);
57834@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
57835 return -ENOENT;
57836 }
57837 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
57838+#ifdef CONFIG_GRKERNSEC_PROC_USER
57839+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
57840+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57841+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
57842+#endif
57843 task_dumpable(task)) {
57844 cred = __task_cred(task);
57845 stat->uid = cred->euid;
57846+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
57847+ stat->gid = grsec_proc_gid;
57848+#else
57849 stat->gid = cred->egid;
57850+#endif
57851 }
57852 }
57853 rcu_read_unlock();
57854@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
57855
57856 if (task) {
57857 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
57858+#ifdef CONFIG_GRKERNSEC_PROC_USER
57859+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
57860+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57861+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
57862+#endif
57863 task_dumpable(task)) {
57864 rcu_read_lock();
57865 cred = __task_cred(task);
57866 inode->i_uid = cred->euid;
57867+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
57868+ inode->i_gid = grsec_proc_gid;
57869+#else
57870 inode->i_gid = cred->egid;
57871+#endif
57872 rcu_read_unlock();
57873 } else {
57874 inode->i_uid = GLOBAL_ROOT_UID;
57875@@ -2196,6 +2314,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
57876 if (!task)
57877 goto out_no_task;
57878
57879+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
57880+ goto out;
57881+
57882 /*
57883 * Yes, it does not scale. And it should not. Don't add
57884 * new entries into /proc/<tgid>/ without very good reasons.
57885@@ -2240,6 +2361,9 @@ static int proc_pident_readdir(struct file *filp,
57886 if (!task)
57887 goto out_no_task;
57888
57889+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
57890+ goto out;
57891+
57892 ret = 0;
57893 i = filp->f_pos;
57894 switch (i) {
57895@@ -2653,7 +2777,7 @@ static const struct pid_entry tgid_base_stuff[] = {
57896 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
57897 #endif
57898 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
57899-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
57900+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
57901 INF("syscall", S_IRUGO, proc_pid_syscall),
57902 #endif
57903 INF("cmdline", S_IRUGO, proc_pid_cmdline),
57904@@ -2678,10 +2802,10 @@ static const struct pid_entry tgid_base_stuff[] = {
57905 #ifdef CONFIG_SECURITY
57906 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
57907 #endif
57908-#ifdef CONFIG_KALLSYMS
57909+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57910 INF("wchan", S_IRUGO, proc_pid_wchan),
57911 #endif
57912-#ifdef CONFIG_STACKTRACE
57913+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57914 ONE("stack", S_IRUGO, proc_pid_stack),
57915 #endif
57916 #ifdef CONFIG_SCHEDSTATS
57917@@ -2715,6 +2839,9 @@ static const struct pid_entry tgid_base_stuff[] = {
57918 #ifdef CONFIG_HARDWALL
57919 INF("hardwall", S_IRUGO, proc_pid_hardwall),
57920 #endif
57921+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
57922+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
57923+#endif
57924 #ifdef CONFIG_USER_NS
57925 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
57926 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
57927@@ -2847,7 +2974,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
57928 if (!inode)
57929 goto out;
57930
57931+#ifdef CONFIG_GRKERNSEC_PROC_USER
57932+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
57933+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57934+ inode->i_gid = grsec_proc_gid;
57935+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
57936+#else
57937 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
57938+#endif
57939 inode->i_op = &proc_tgid_base_inode_operations;
57940 inode->i_fop = &proc_tgid_base_operations;
57941 inode->i_flags|=S_IMMUTABLE;
57942@@ -2885,7 +3019,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
57943 if (!task)
57944 goto out;
57945
57946+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
57947+ goto out_put_task;
57948+
57949 result = proc_pid_instantiate(dir, dentry, task, NULL);
57950+out_put_task:
57951 put_task_struct(task);
57952 out:
57953 return result;
57954@@ -2948,6 +3086,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
57955 static int fake_filldir(void *buf, const char *name, int namelen,
57956 loff_t offset, u64 ino, unsigned d_type)
57957 {
57958+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
57959+ __buf->error = -EINVAL;
57960 return 0;
57961 }
57962
57963@@ -3007,7 +3147,7 @@ static const struct pid_entry tid_base_stuff[] = {
57964 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
57965 #endif
57966 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
57967-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
57968+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
57969 INF("syscall", S_IRUGO, proc_pid_syscall),
57970 #endif
57971 INF("cmdline", S_IRUGO, proc_pid_cmdline),
57972@@ -3034,10 +3174,10 @@ static const struct pid_entry tid_base_stuff[] = {
57973 #ifdef CONFIG_SECURITY
57974 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
57975 #endif
57976-#ifdef CONFIG_KALLSYMS
57977+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57978 INF("wchan", S_IRUGO, proc_pid_wchan),
57979 #endif
57980-#ifdef CONFIG_STACKTRACE
57981+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57982 ONE("stack", S_IRUGO, proc_pid_stack),
57983 #endif
57984 #ifdef CONFIG_SCHEDSTATS
57985diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
57986index 82676e3..5f8518a 100644
57987--- a/fs/proc/cmdline.c
57988+++ b/fs/proc/cmdline.c
57989@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
57990
57991 static int __init proc_cmdline_init(void)
57992 {
57993+#ifdef CONFIG_GRKERNSEC_PROC_ADD
57994+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
57995+#else
57996 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
57997+#endif
57998 return 0;
57999 }
58000 module_init(proc_cmdline_init);
58001diff --git a/fs/proc/devices.c b/fs/proc/devices.c
58002index b143471..bb105e5 100644
58003--- a/fs/proc/devices.c
58004+++ b/fs/proc/devices.c
58005@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
58006
58007 static int __init proc_devices_init(void)
58008 {
58009+#ifdef CONFIG_GRKERNSEC_PROC_ADD
58010+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
58011+#else
58012 proc_create("devices", 0, NULL, &proc_devinfo_operations);
58013+#endif
58014 return 0;
58015 }
58016 module_init(proc_devices_init);
58017diff --git a/fs/proc/fd.c b/fs/proc/fd.c
58018index d7a4a28..0201742 100644
58019--- a/fs/proc/fd.c
58020+++ b/fs/proc/fd.c
58021@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
58022 if (!task)
58023 return -ENOENT;
58024
58025- files = get_files_struct(task);
58026+ if (!gr_acl_handle_procpidmem(task))
58027+ files = get_files_struct(task);
58028 put_task_struct(task);
58029
58030 if (files) {
58031@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
58032 */
58033 int proc_fd_permission(struct inode *inode, int mask)
58034 {
58035+ struct task_struct *task;
58036 int rv = generic_permission(inode, mask);
58037- if (rv == 0)
58038- return 0;
58039+
58040 if (task_pid(current) == proc_pid(inode))
58041 rv = 0;
58042+
58043+ task = get_proc_task(inode);
58044+ if (task == NULL)
58045+ return rv;
58046+
58047+ if (gr_acl_handle_procpidmem(task))
58048+ rv = -EACCES;
58049+
58050+ put_task_struct(task);
58051+
58052 return rv;
58053 }
58054
58055diff --git a/fs/proc/inode.c b/fs/proc/inode.c
58056index 073aea6..0630370 100644
58057--- a/fs/proc/inode.c
58058+++ b/fs/proc/inode.c
58059@@ -23,11 +23,17 @@
58060 #include <linux/slab.h>
58061 #include <linux/mount.h>
58062 #include <linux/magic.h>
58063+#include <linux/grsecurity.h>
58064
58065 #include <asm/uaccess.h>
58066
58067 #include "internal.h"
58068
58069+#ifdef CONFIG_PROC_SYSCTL
58070+extern const struct inode_operations proc_sys_inode_operations;
58071+extern const struct inode_operations proc_sys_dir_operations;
58072+#endif
58073+
58074 static void proc_evict_inode(struct inode *inode)
58075 {
58076 struct proc_dir_entry *de;
58077@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
58078 ns = PROC_I(inode)->ns.ns;
58079 if (ns_ops && ns)
58080 ns_ops->put(ns);
58081+
58082+#ifdef CONFIG_PROC_SYSCTL
58083+ if (inode->i_op == &proc_sys_inode_operations ||
58084+ inode->i_op == &proc_sys_dir_operations)
58085+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
58086+#endif
58087+
58088 }
58089
58090 static struct kmem_cache * proc_inode_cachep;
58091@@ -385,7 +398,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
58092 if (de->mode) {
58093 inode->i_mode = de->mode;
58094 inode->i_uid = de->uid;
58095+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
58096+ inode->i_gid = grsec_proc_gid;
58097+#else
58098 inode->i_gid = de->gid;
58099+#endif
58100 }
58101 if (de->size)
58102 inode->i_size = de->size;
58103diff --git a/fs/proc/internal.h b/fs/proc/internal.h
58104index d600fb0..3b495fe 100644
58105--- a/fs/proc/internal.h
58106+++ b/fs/proc/internal.h
58107@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
58108 struct pid *, struct task_struct *);
58109 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
58110 struct pid *, struct task_struct *);
58111+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
58112+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
58113+#endif
58114
58115 /*
58116 * base.c
58117diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
58118index 0a22194..a9fc8c1 100644
58119--- a/fs/proc/kcore.c
58120+++ b/fs/proc/kcore.c
58121@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58122 * the addresses in the elf_phdr on our list.
58123 */
58124 start = kc_offset_to_vaddr(*fpos - elf_buflen);
58125- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
58126+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
58127+ if (tsz > buflen)
58128 tsz = buflen;
58129-
58130+
58131 while (buflen) {
58132 struct kcore_list *m;
58133
58134@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58135 kfree(elf_buf);
58136 } else {
58137 if (kern_addr_valid(start)) {
58138- unsigned long n;
58139+ char *elf_buf;
58140+ mm_segment_t oldfs;
58141
58142- n = copy_to_user(buffer, (char *)start, tsz);
58143- /*
58144- * We cannot distinguish between fault on source
58145- * and fault on destination. When this happens
58146- * we clear too and hope it will trigger the
58147- * EFAULT again.
58148- */
58149- if (n) {
58150- if (clear_user(buffer + tsz - n,
58151- n))
58152+ elf_buf = kmalloc(tsz, GFP_KERNEL);
58153+ if (!elf_buf)
58154+ return -ENOMEM;
58155+ oldfs = get_fs();
58156+ set_fs(KERNEL_DS);
58157+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
58158+ set_fs(oldfs);
58159+ if (copy_to_user(buffer, elf_buf, tsz)) {
58160+ kfree(elf_buf);
58161 return -EFAULT;
58162+ }
58163 }
58164+ set_fs(oldfs);
58165+ kfree(elf_buf);
58166 } else {
58167 if (clear_user(buffer, tsz))
58168 return -EFAULT;
58169@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58170
58171 static int open_kcore(struct inode *inode, struct file *filp)
58172 {
58173+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58174+ return -EPERM;
58175+#endif
58176 if (!capable(CAP_SYS_RAWIO))
58177 return -EPERM;
58178 if (kcore_need_update)
58179diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
58180index 5aa847a..f77c8d4 100644
58181--- a/fs/proc/meminfo.c
58182+++ b/fs/proc/meminfo.c
58183@@ -159,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
58184 vmi.used >> 10,
58185 vmi.largest_chunk >> 10
58186 #ifdef CONFIG_MEMORY_FAILURE
58187- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
58188+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
58189 #endif
58190 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
58191 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
58192diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
58193index ccfd99b..1b7e255 100644
58194--- a/fs/proc/nommu.c
58195+++ b/fs/proc/nommu.c
58196@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
58197 if (len < 1)
58198 len = 1;
58199 seq_printf(m, "%*c", len, ' ');
58200- seq_path(m, &file->f_path, "");
58201+ seq_path(m, &file->f_path, "\n\\");
58202 }
58203
58204 seq_putc(m, '\n');
58205diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
58206index 986e832..6e8e859 100644
58207--- a/fs/proc/proc_net.c
58208+++ b/fs/proc/proc_net.c
58209@@ -23,6 +23,7 @@
58210 #include <linux/nsproxy.h>
58211 #include <net/net_namespace.h>
58212 #include <linux/seq_file.h>
58213+#include <linux/grsecurity.h>
58214
58215 #include "internal.h"
58216
58217@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
58218 struct task_struct *task;
58219 struct nsproxy *ns;
58220 struct net *net = NULL;
58221+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58222+ const struct cred *cred = current_cred();
58223+#endif
58224+
58225+#ifdef CONFIG_GRKERNSEC_PROC_USER
58226+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
58227+ return net;
58228+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58229+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
58230+ return net;
58231+#endif
58232
58233 rcu_read_lock();
58234 task = pid_task(proc_pid(dir), PIDTYPE_PID);
58235diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
58236index ac05f33..1e6dc7e 100644
58237--- a/fs/proc/proc_sysctl.c
58238+++ b/fs/proc/proc_sysctl.c
58239@@ -13,11 +13,15 @@
58240 #include <linux/module.h>
58241 #include "internal.h"
58242
58243+extern int gr_handle_chroot_sysctl(const int op);
58244+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
58245+ const int op);
58246+
58247 static const struct dentry_operations proc_sys_dentry_operations;
58248 static const struct file_operations proc_sys_file_operations;
58249-static const struct inode_operations proc_sys_inode_operations;
58250+const struct inode_operations proc_sys_inode_operations;
58251 static const struct file_operations proc_sys_dir_file_operations;
58252-static const struct inode_operations proc_sys_dir_operations;
58253+const struct inode_operations proc_sys_dir_operations;
58254
58255 void proc_sys_poll_notify(struct ctl_table_poll *poll)
58256 {
58257@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
58258
58259 err = NULL;
58260 d_set_d_op(dentry, &proc_sys_dentry_operations);
58261+
58262+ gr_handle_proc_create(dentry, inode);
58263+
58264 d_add(dentry, inode);
58265
58266 out:
58267@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
58268 struct inode *inode = file_inode(filp);
58269 struct ctl_table_header *head = grab_header(inode);
58270 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
58271+ int op = write ? MAY_WRITE : MAY_READ;
58272 ssize_t error;
58273 size_t res;
58274
58275@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
58276 * and won't be until we finish.
58277 */
58278 error = -EPERM;
58279- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
58280+ if (sysctl_perm(head, table, op))
58281 goto out;
58282
58283 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
58284@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
58285 if (!table->proc_handler)
58286 goto out;
58287
58288+#ifdef CONFIG_GRKERNSEC
58289+ error = -EPERM;
58290+ if (gr_handle_chroot_sysctl(op))
58291+ goto out;
58292+ dget(filp->f_path.dentry);
58293+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
58294+ dput(filp->f_path.dentry);
58295+ goto out;
58296+ }
58297+ dput(filp->f_path.dentry);
58298+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
58299+ goto out;
58300+ if (write && !capable(CAP_SYS_ADMIN))
58301+ goto out;
58302+#endif
58303+
58304 /* careful: calling conventions are nasty here */
58305 res = count;
58306 error = table->proc_handler(table, write, buf, &res, ppos);
58307@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
58308 return -ENOMEM;
58309 } else {
58310 d_set_d_op(child, &proc_sys_dentry_operations);
58311+
58312+ gr_handle_proc_create(child, inode);
58313+
58314 d_add(child, inode);
58315 }
58316 } else {
58317@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
58318 if ((*pos)++ < file->f_pos)
58319 return 0;
58320
58321+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
58322+ return 0;
58323+
58324 if (unlikely(S_ISLNK(table->mode)))
58325 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
58326 else
58327@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
58328 if (IS_ERR(head))
58329 return PTR_ERR(head);
58330
58331+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
58332+ return -ENOENT;
58333+
58334 generic_fillattr(inode, stat);
58335 if (table)
58336 stat->mode = (stat->mode & S_IFMT) | table->mode;
58337@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
58338 .llseek = generic_file_llseek,
58339 };
58340
58341-static const struct inode_operations proc_sys_inode_operations = {
58342+const struct inode_operations proc_sys_inode_operations = {
58343 .permission = proc_sys_permission,
58344 .setattr = proc_sys_setattr,
58345 .getattr = proc_sys_getattr,
58346 };
58347
58348-static const struct inode_operations proc_sys_dir_operations = {
58349+const struct inode_operations proc_sys_dir_operations = {
58350 .lookup = proc_sys_lookup,
58351 .permission = proc_sys_permission,
58352 .setattr = proc_sys_setattr,
58353@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
58354 static struct ctl_dir *new_dir(struct ctl_table_set *set,
58355 const char *name, int namelen)
58356 {
58357- struct ctl_table *table;
58358+ ctl_table_no_const *table;
58359 struct ctl_dir *new;
58360 struct ctl_node *node;
58361 char *new_name;
58362@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
58363 return NULL;
58364
58365 node = (struct ctl_node *)(new + 1);
58366- table = (struct ctl_table *)(node + 1);
58367+ table = (ctl_table_no_const *)(node + 1);
58368 new_name = (char *)(table + 2);
58369 memcpy(new_name, name, namelen);
58370 new_name[namelen] = '\0';
58371@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
58372 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
58373 struct ctl_table_root *link_root)
58374 {
58375- struct ctl_table *link_table, *entry, *link;
58376+ ctl_table_no_const *link_table, *link;
58377+ struct ctl_table *entry;
58378 struct ctl_table_header *links;
58379 struct ctl_node *node;
58380 char *link_name;
58381@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
58382 return NULL;
58383
58384 node = (struct ctl_node *)(links + 1);
58385- link_table = (struct ctl_table *)(node + nr_entries);
58386+ link_table = (ctl_table_no_const *)(node + nr_entries);
58387 link_name = (char *)&link_table[nr_entries + 1];
58388
58389 for (link = link_table, entry = table; entry->procname; link++, entry++) {
58390@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
58391 struct ctl_table_header ***subheader, struct ctl_table_set *set,
58392 struct ctl_table *table)
58393 {
58394- struct ctl_table *ctl_table_arg = NULL;
58395- struct ctl_table *entry, *files;
58396+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
58397+ struct ctl_table *entry;
58398 int nr_files = 0;
58399 int nr_dirs = 0;
58400 int err = -ENOMEM;
58401@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
58402 nr_files++;
58403 }
58404
58405- files = table;
58406 /* If there are mixed files and directories we need a new table */
58407 if (nr_dirs && nr_files) {
58408- struct ctl_table *new;
58409+ ctl_table_no_const *new;
58410 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
58411 GFP_KERNEL);
58412 if (!files)
58413@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
58414 /* Register everything except a directory full of subdirectories */
58415 if (nr_files || !nr_dirs) {
58416 struct ctl_table_header *header;
58417- header = __register_sysctl_table(set, path, files);
58418+ header = __register_sysctl_table(set, path, files ? files : table);
58419 if (!header) {
58420 kfree(ctl_table_arg);
58421 goto out;
58422diff --git a/fs/proc/root.c b/fs/proc/root.c
58423index 41a6ea9..23eaa92 100644
58424--- a/fs/proc/root.c
58425+++ b/fs/proc/root.c
58426@@ -182,7 +182,15 @@ void __init proc_root_init(void)
58427 #ifdef CONFIG_PROC_DEVICETREE
58428 proc_device_tree_init();
58429 #endif
58430+#ifdef CONFIG_GRKERNSEC_PROC_ADD
58431+#ifdef CONFIG_GRKERNSEC_PROC_USER
58432+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
58433+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58434+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
58435+#endif
58436+#else
58437 proc_mkdir("bus", NULL);
58438+#endif
58439 proc_sys_init();
58440 }
58441
58442diff --git a/fs/proc/self.c b/fs/proc/self.c
58443index 6b6a993..807cccc 100644
58444--- a/fs/proc/self.c
58445+++ b/fs/proc/self.c
58446@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
58447 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
58448 void *cookie)
58449 {
58450- char *s = nd_get_link(nd);
58451+ const char *s = nd_get_link(nd);
58452 if (!IS_ERR(s))
58453 kfree(s);
58454 }
58455diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
58456index 3e636d8..350cc48 100644
58457--- a/fs/proc/task_mmu.c
58458+++ b/fs/proc/task_mmu.c
58459@@ -11,12 +11,19 @@
58460 #include <linux/rmap.h>
58461 #include <linux/swap.h>
58462 #include <linux/swapops.h>
58463+#include <linux/grsecurity.h>
58464
58465 #include <asm/elf.h>
58466 #include <asm/uaccess.h>
58467 #include <asm/tlbflush.h>
58468 #include "internal.h"
58469
58470+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58471+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
58472+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
58473+ _mm->pax_flags & MF_PAX_SEGMEXEC))
58474+#endif
58475+
58476 void task_mem(struct seq_file *m, struct mm_struct *mm)
58477 {
58478 unsigned long data, text, lib, swap;
58479@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58480 "VmExe:\t%8lu kB\n"
58481 "VmLib:\t%8lu kB\n"
58482 "VmPTE:\t%8lu kB\n"
58483- "VmSwap:\t%8lu kB\n",
58484- hiwater_vm << (PAGE_SHIFT-10),
58485+ "VmSwap:\t%8lu kB\n"
58486+
58487+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58488+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
58489+#endif
58490+
58491+ ,hiwater_vm << (PAGE_SHIFT-10),
58492 total_vm << (PAGE_SHIFT-10),
58493 mm->locked_vm << (PAGE_SHIFT-10),
58494 mm->pinned_vm << (PAGE_SHIFT-10),
58495@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58496 data << (PAGE_SHIFT-10),
58497 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
58498 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
58499- swap << (PAGE_SHIFT-10));
58500+ swap << (PAGE_SHIFT-10)
58501+
58502+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58503+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58504+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
58505+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
58506+#else
58507+ , mm->context.user_cs_base
58508+ , mm->context.user_cs_limit
58509+#endif
58510+#endif
58511+
58512+ );
58513 }
58514
58515 unsigned long task_vsize(struct mm_struct *mm)
58516@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58517 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
58518 }
58519
58520- /* We don't show the stack guard page in /proc/maps */
58521+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58522+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
58523+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
58524+#else
58525 start = vma->vm_start;
58526- if (stack_guard_page_start(vma, start))
58527- start += PAGE_SIZE;
58528 end = vma->vm_end;
58529- if (stack_guard_page_end(vma, end))
58530- end -= PAGE_SIZE;
58531+#endif
58532
58533 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
58534 start,
58535@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58536 flags & VM_WRITE ? 'w' : '-',
58537 flags & VM_EXEC ? 'x' : '-',
58538 flags & VM_MAYSHARE ? 's' : 'p',
58539+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58540+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
58541+#else
58542 pgoff,
58543+#endif
58544 MAJOR(dev), MINOR(dev), ino, &len);
58545
58546 /*
58547@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58548 */
58549 if (file) {
58550 pad_len_spaces(m, len);
58551- seq_path(m, &file->f_path, "\n");
58552+ seq_path(m, &file->f_path, "\n\\");
58553 goto done;
58554 }
58555
58556@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58557 * Thread stack in /proc/PID/task/TID/maps or
58558 * the main process stack.
58559 */
58560- if (!is_pid || (vma->vm_start <= mm->start_stack &&
58561- vma->vm_end >= mm->start_stack)) {
58562+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
58563+ (vma->vm_start <= mm->start_stack &&
58564+ vma->vm_end >= mm->start_stack)) {
58565 name = "[stack]";
58566 } else {
58567 /* Thread stack in /proc/PID/maps */
58568@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
58569 struct proc_maps_private *priv = m->private;
58570 struct task_struct *task = priv->task;
58571
58572+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58573+ if (current->exec_id != m->exec_id) {
58574+ gr_log_badprocpid("maps");
58575+ return 0;
58576+ }
58577+#endif
58578+
58579 show_map_vma(m, vma, is_pid);
58580
58581 if (m->count < m->size) /* vma is copied successfully */
58582@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
58583 .private = &mss,
58584 };
58585
58586+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58587+ if (current->exec_id != m->exec_id) {
58588+ gr_log_badprocpid("smaps");
58589+ return 0;
58590+ }
58591+#endif
58592 memset(&mss, 0, sizeof mss);
58593- mss.vma = vma;
58594- /* mmap_sem is held in m_start */
58595- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
58596- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
58597-
58598+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58599+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
58600+#endif
58601+ mss.vma = vma;
58602+ /* mmap_sem is held in m_start */
58603+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
58604+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
58605+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58606+ }
58607+#endif
58608 show_map_vma(m, vma, is_pid);
58609
58610 seq_printf(m,
58611@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
58612 "KernelPageSize: %8lu kB\n"
58613 "MMUPageSize: %8lu kB\n"
58614 "Locked: %8lu kB\n",
58615+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58616+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
58617+#else
58618 (vma->vm_end - vma->vm_start) >> 10,
58619+#endif
58620 mss.resident >> 10,
58621 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
58622 mss.shared_clean >> 10,
58623@@ -792,14 +843,14 @@ typedef struct {
58624 } pagemap_entry_t;
58625
58626 struct pagemapread {
58627- int pos, len;
58628+ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
58629 pagemap_entry_t *buffer;
58630 };
58631
58632 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
58633 #define PAGEMAP_WALK_MASK (PMD_MASK)
58634
58635-#define PM_ENTRY_BYTES sizeof(u64)
58636+#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
58637 #define PM_STATUS_BITS 3
58638 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
58639 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
58640@@ -1038,8 +1089,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
58641 if (!count)
58642 goto out_task;
58643
58644- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
58645- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
58646+ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
58647+ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
58648 ret = -ENOMEM;
58649 if (!pm.buffer)
58650 goto out_task;
58651@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
58652 int n;
58653 char buffer[50];
58654
58655+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58656+ if (current->exec_id != m->exec_id) {
58657+ gr_log_badprocpid("numa_maps");
58658+ return 0;
58659+ }
58660+#endif
58661+
58662 if (!mm)
58663 return 0;
58664
58665@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
58666 mpol_to_str(buffer, sizeof(buffer), pol);
58667 mpol_cond_put(pol);
58668
58669+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58670+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
58671+#else
58672 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
58673+#endif
58674
58675 if (file) {
58676 seq_printf(m, " file=");
58677- seq_path(m, &file->f_path, "\n\t= ");
58678+ seq_path(m, &file->f_path, "\n\t\\= ");
58679 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
58680 seq_printf(m, " heap");
58681 } else {
58682diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
58683index 56123a6..5a2f6ec 100644
58684--- a/fs/proc/task_nommu.c
58685+++ b/fs/proc/task_nommu.c
58686@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58687 else
58688 bytes += kobjsize(mm);
58689
58690- if (current->fs && current->fs->users > 1)
58691+ if (current->fs && atomic_read(&current->fs->users) > 1)
58692 sbytes += kobjsize(current->fs);
58693 else
58694 bytes += kobjsize(current->fs);
58695@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
58696
58697 if (file) {
58698 pad_len_spaces(m, len);
58699- seq_path(m, &file->f_path, "");
58700+ seq_path(m, &file->f_path, "\n\\");
58701 } else if (mm) {
58702 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
58703
58704diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
58705index 17f7e08..e4b1529 100644
58706--- a/fs/proc/vmcore.c
58707+++ b/fs/proc/vmcore.c
58708@@ -99,9 +99,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
58709 nr_bytes = count;
58710
58711 /* If pfn is not ram, return zeros for sparse dump files */
58712- if (pfn_is_ram(pfn) == 0)
58713- memset(buf, 0, nr_bytes);
58714- else {
58715+ if (pfn_is_ram(pfn) == 0) {
58716+ if (userbuf) {
58717+ if (clear_user((char __force_user *)buf, nr_bytes))
58718+ return -EFAULT;
58719+ } else
58720+ memset(buf, 0, nr_bytes);
58721+ } else {
58722 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
58723 offset, userbuf);
58724 if (tmp < 0)
58725@@ -186,7 +190,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
58726 if (tsz > nr_bytes)
58727 tsz = nr_bytes;
58728
58729- tmp = read_from_oldmem(buffer, tsz, &start, 1);
58730+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
58731 if (tmp < 0)
58732 return tmp;
58733 buflen -= tsz;
58734diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
58735index b00fcc9..e0c6381 100644
58736--- a/fs/qnx6/qnx6.h
58737+++ b/fs/qnx6/qnx6.h
58738@@ -74,7 +74,7 @@ enum {
58739 BYTESEX_BE,
58740 };
58741
58742-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
58743+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
58744 {
58745 if (sbi->s_bytesex == BYTESEX_LE)
58746 return le64_to_cpu((__force __le64)n);
58747@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
58748 return (__force __fs64)cpu_to_be64(n);
58749 }
58750
58751-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
58752+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
58753 {
58754 if (sbi->s_bytesex == BYTESEX_LE)
58755 return le32_to_cpu((__force __le32)n);
58756diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
58757index 16e8abb..2dcf914 100644
58758--- a/fs/quota/netlink.c
58759+++ b/fs/quota/netlink.c
58760@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
58761 void quota_send_warning(struct kqid qid, dev_t dev,
58762 const char warntype)
58763 {
58764- static atomic_t seq;
58765+ static atomic_unchecked_t seq;
58766 struct sk_buff *skb;
58767 void *msg_head;
58768 int ret;
58769@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
58770 "VFS: Not enough memory to send quota warning.\n");
58771 return;
58772 }
58773- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
58774+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
58775 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
58776 if (!msg_head) {
58777 printk(KERN_ERR
58778diff --git a/fs/read_write.c b/fs/read_write.c
58779index 2cefa41..c7e2fe0 100644
58780--- a/fs/read_write.c
58781+++ b/fs/read_write.c
58782@@ -411,7 +411,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
58783
58784 old_fs = get_fs();
58785 set_fs(get_ds());
58786- p = (__force const char __user *)buf;
58787+ p = (const char __force_user *)buf;
58788 if (count > MAX_RW_COUNT)
58789 count = MAX_RW_COUNT;
58790 if (file->f_op->write)
58791diff --git a/fs/readdir.c b/fs/readdir.c
58792index fee38e0..12fdf47 100644
58793--- a/fs/readdir.c
58794+++ b/fs/readdir.c
58795@@ -17,6 +17,7 @@
58796 #include <linux/security.h>
58797 #include <linux/syscalls.h>
58798 #include <linux/unistd.h>
58799+#include <linux/namei.h>
58800
58801 #include <asm/uaccess.h>
58802
58803@@ -67,6 +68,7 @@ struct old_linux_dirent {
58804
58805 struct readdir_callback {
58806 struct old_linux_dirent __user * dirent;
58807+ struct file * file;
58808 int result;
58809 };
58810
58811@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
58812 buf->result = -EOVERFLOW;
58813 return -EOVERFLOW;
58814 }
58815+
58816+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58817+ return 0;
58818+
58819 buf->result++;
58820 dirent = buf->dirent;
58821 if (!access_ok(VERIFY_WRITE, dirent,
58822@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
58823
58824 buf.result = 0;
58825 buf.dirent = dirent;
58826+ buf.file = f.file;
58827
58828 error = vfs_readdir(f.file, fillonedir, &buf);
58829 if (buf.result)
58830@@ -139,6 +146,7 @@ struct linux_dirent {
58831 struct getdents_callback {
58832 struct linux_dirent __user * current_dir;
58833 struct linux_dirent __user * previous;
58834+ struct file * file;
58835 int count;
58836 int error;
58837 };
58838@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
58839 buf->error = -EOVERFLOW;
58840 return -EOVERFLOW;
58841 }
58842+
58843+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58844+ return 0;
58845+
58846 dirent = buf->previous;
58847 if (dirent) {
58848 if (__put_user(offset, &dirent->d_off))
58849@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
58850 buf.previous = NULL;
58851 buf.count = count;
58852 buf.error = 0;
58853+ buf.file = f.file;
58854
58855 error = vfs_readdir(f.file, filldir, &buf);
58856 if (error >= 0)
58857@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
58858 struct getdents_callback64 {
58859 struct linux_dirent64 __user * current_dir;
58860 struct linux_dirent64 __user * previous;
58861+ struct file *file;
58862 int count;
58863 int error;
58864 };
58865@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
58866 buf->error = -EINVAL; /* only used if we fail.. */
58867 if (reclen > buf->count)
58868 return -EINVAL;
58869+
58870+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58871+ return 0;
58872+
58873 dirent = buf->previous;
58874 if (dirent) {
58875 if (__put_user(offset, &dirent->d_off))
58876@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
58877
58878 buf.current_dir = dirent;
58879 buf.previous = NULL;
58880+ buf.file = f.file;
58881 buf.count = count;
58882 buf.error = 0;
58883
58884@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
58885 error = buf.error;
58886 lastdirent = buf.previous;
58887 if (lastdirent) {
58888- typeof(lastdirent->d_off) d_off = f.file->f_pos;
58889+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
58890 if (__put_user(d_off, &lastdirent->d_off))
58891 error = -EFAULT;
58892 else
58893diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
58894index 2b7882b..1c5ef48 100644
58895--- a/fs/reiserfs/do_balan.c
58896+++ b/fs/reiserfs/do_balan.c
58897@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
58898 return;
58899 }
58900
58901- atomic_inc(&(fs_generation(tb->tb_sb)));
58902+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
58903 do_balance_starts(tb);
58904
58905 /* balance leaf returns 0 except if combining L R and S into
58906diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
58907index 1d48974..2f8f4e0 100644
58908--- a/fs/reiserfs/procfs.c
58909+++ b/fs/reiserfs/procfs.c
58910@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
58911 "SMALL_TAILS " : "NO_TAILS ",
58912 replay_only(sb) ? "REPLAY_ONLY " : "",
58913 convert_reiserfs(sb) ? "CONV " : "",
58914- atomic_read(&r->s_generation_counter),
58915+ atomic_read_unchecked(&r->s_generation_counter),
58916 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
58917 SF(s_do_balance), SF(s_unneeded_left_neighbor),
58918 SF(s_good_search_by_key_reada), SF(s_bmaps),
58919diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
58920index 157e474..65a6114 100644
58921--- a/fs/reiserfs/reiserfs.h
58922+++ b/fs/reiserfs/reiserfs.h
58923@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
58924 /* Comment? -Hans */
58925 wait_queue_head_t s_wait;
58926 /* To be obsoleted soon by per buffer seals.. -Hans */
58927- atomic_t s_generation_counter; // increased by one every time the
58928+ atomic_unchecked_t s_generation_counter; // increased by one every time the
58929 // tree gets re-balanced
58930 unsigned long s_properties; /* File system properties. Currently holds
58931 on-disk FS format */
58932@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
58933 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
58934
58935 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
58936-#define get_generation(s) atomic_read (&fs_generation(s))
58937+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
58938 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
58939 #define __fs_changed(gen,s) (gen != get_generation (s))
58940 #define fs_changed(gen,s) \
58941diff --git a/fs/select.c b/fs/select.c
58942index 8c1c96c..a0f9b6d 100644
58943--- a/fs/select.c
58944+++ b/fs/select.c
58945@@ -20,6 +20,7 @@
58946 #include <linux/export.h>
58947 #include <linux/slab.h>
58948 #include <linux/poll.h>
58949+#include <linux/security.h>
58950 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
58951 #include <linux/file.h>
58952 #include <linux/fdtable.h>
58953@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
58954 struct poll_list *walk = head;
58955 unsigned long todo = nfds;
58956
58957+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
58958 if (nfds > rlimit(RLIMIT_NOFILE))
58959 return -EINVAL;
58960
58961diff --git a/fs/seq_file.c b/fs/seq_file.c
58962index 774c1eb..b67582a 100644
58963--- a/fs/seq_file.c
58964+++ b/fs/seq_file.c
58965@@ -10,6 +10,7 @@
58966 #include <linux/seq_file.h>
58967 #include <linux/slab.h>
58968 #include <linux/cred.h>
58969+#include <linux/sched.h>
58970
58971 #include <asm/uaccess.h>
58972 #include <asm/page.h>
58973@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
58974 #ifdef CONFIG_USER_NS
58975 p->user_ns = file->f_cred->user_ns;
58976 #endif
58977+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58978+ p->exec_id = current->exec_id;
58979+#endif
58980
58981 /*
58982 * Wrappers around seq_open(e.g. swaps_open) need to be
58983@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
58984 return 0;
58985 }
58986 if (!m->buf) {
58987- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
58988+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
58989 if (!m->buf)
58990 return -ENOMEM;
58991 }
58992@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
58993 Eoverflow:
58994 m->op->stop(m, p);
58995 kfree(m->buf);
58996- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
58997+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
58998 return !m->buf ? -ENOMEM : -EAGAIN;
58999 }
59000
59001@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
59002
59003 /* grab buffer if we didn't have one */
59004 if (!m->buf) {
59005- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
59006+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
59007 if (!m->buf)
59008 goto Enomem;
59009 }
59010@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
59011 goto Fill;
59012 m->op->stop(m, p);
59013 kfree(m->buf);
59014- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
59015+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
59016 if (!m->buf)
59017 goto Enomem;
59018 m->count = 0;
59019@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
59020 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
59021 void *data)
59022 {
59023- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
59024+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
59025 int res = -ENOMEM;
59026
59027 if (op) {
59028diff --git a/fs/splice.c b/fs/splice.c
59029index d37431d..81c3044 100644
59030--- a/fs/splice.c
59031+++ b/fs/splice.c
59032@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
59033 pipe_lock(pipe);
59034
59035 for (;;) {
59036- if (!pipe->readers) {
59037+ if (!atomic_read(&pipe->readers)) {
59038 send_sig(SIGPIPE, current, 0);
59039 if (!ret)
59040 ret = -EPIPE;
59041@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
59042 page_nr++;
59043 ret += buf->len;
59044
59045- if (pipe->files)
59046+ if (atomic_read(&pipe->files))
59047 do_wakeup = 1;
59048
59049 if (!--spd->nr_pages)
59050@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
59051 do_wakeup = 0;
59052 }
59053
59054- pipe->waiting_writers++;
59055+ atomic_inc(&pipe->waiting_writers);
59056 pipe_wait(pipe);
59057- pipe->waiting_writers--;
59058+ atomic_dec(&pipe->waiting_writers);
59059 }
59060
59061 pipe_unlock(pipe);
59062@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
59063 old_fs = get_fs();
59064 set_fs(get_ds());
59065 /* The cast to a user pointer is valid due to the set_fs() */
59066- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
59067+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
59068 set_fs(old_fs);
59069
59070 return res;
59071@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
59072 old_fs = get_fs();
59073 set_fs(get_ds());
59074 /* The cast to a user pointer is valid due to the set_fs() */
59075- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
59076+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
59077 set_fs(old_fs);
59078
59079 return res;
59080@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
59081 goto err;
59082
59083 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
59084- vec[i].iov_base = (void __user *) page_address(page);
59085+ vec[i].iov_base = (void __force_user *) page_address(page);
59086 vec[i].iov_len = this_len;
59087 spd.pages[i] = page;
59088 spd.nr_pages++;
59089@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
59090 ops->release(pipe, buf);
59091 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
59092 pipe->nrbufs--;
59093- if (pipe->files)
59094+ if (atomic_read(&pipe->files))
59095 sd->need_wakeup = true;
59096 }
59097
59098@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
59099 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
59100 {
59101 while (!pipe->nrbufs) {
59102- if (!pipe->writers)
59103+ if (!atomic_read(&pipe->writers))
59104 return 0;
59105
59106- if (!pipe->waiting_writers && sd->num_spliced)
59107+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
59108 return 0;
59109
59110 if (sd->flags & SPLICE_F_NONBLOCK)
59111@@ -1193,7 +1193,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
59112 * out of the pipe right after the splice_to_pipe(). So set
59113 * PIPE_READERS appropriately.
59114 */
59115- pipe->readers = 1;
59116+ atomic_set(&pipe->readers, 1);
59117
59118 current->splice_pipe = pipe;
59119 }
59120@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
59121 ret = -ERESTARTSYS;
59122 break;
59123 }
59124- if (!pipe->writers)
59125+ if (!atomic_read(&pipe->writers))
59126 break;
59127- if (!pipe->waiting_writers) {
59128+ if (!atomic_read(&pipe->waiting_writers)) {
59129 if (flags & SPLICE_F_NONBLOCK) {
59130 ret = -EAGAIN;
59131 break;
59132@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
59133 pipe_lock(pipe);
59134
59135 while (pipe->nrbufs >= pipe->buffers) {
59136- if (!pipe->readers) {
59137+ if (!atomic_read(&pipe->readers)) {
59138 send_sig(SIGPIPE, current, 0);
59139 ret = -EPIPE;
59140 break;
59141@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
59142 ret = -ERESTARTSYS;
59143 break;
59144 }
59145- pipe->waiting_writers++;
59146+ atomic_inc(&pipe->waiting_writers);
59147 pipe_wait(pipe);
59148- pipe->waiting_writers--;
59149+ atomic_dec(&pipe->waiting_writers);
59150 }
59151
59152 pipe_unlock(pipe);
59153@@ -1854,14 +1854,14 @@ retry:
59154 pipe_double_lock(ipipe, opipe);
59155
59156 do {
59157- if (!opipe->readers) {
59158+ if (!atomic_read(&opipe->readers)) {
59159 send_sig(SIGPIPE, current, 0);
59160 if (!ret)
59161 ret = -EPIPE;
59162 break;
59163 }
59164
59165- if (!ipipe->nrbufs && !ipipe->writers)
59166+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
59167 break;
59168
59169 /*
59170@@ -1958,7 +1958,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
59171 pipe_double_lock(ipipe, opipe);
59172
59173 do {
59174- if (!opipe->readers) {
59175+ if (!atomic_read(&opipe->readers)) {
59176 send_sig(SIGPIPE, current, 0);
59177 if (!ret)
59178 ret = -EPIPE;
59179@@ -2003,7 +2003,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
59180 * return EAGAIN if we have the potential of some data in the
59181 * future, otherwise just return 0
59182 */
59183- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
59184+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
59185 ret = -EAGAIN;
59186
59187 pipe_unlock(ipipe);
59188diff --git a/fs/stat.c b/fs/stat.c
59189index 04ce1ac..a13dd1e 100644
59190--- a/fs/stat.c
59191+++ b/fs/stat.c
59192@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
59193 stat->gid = inode->i_gid;
59194 stat->rdev = inode->i_rdev;
59195 stat->size = i_size_read(inode);
59196- stat->atime = inode->i_atime;
59197- stat->mtime = inode->i_mtime;
59198+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
59199+ stat->atime = inode->i_ctime;
59200+ stat->mtime = inode->i_ctime;
59201+ } else {
59202+ stat->atime = inode->i_atime;
59203+ stat->mtime = inode->i_mtime;
59204+ }
59205 stat->ctime = inode->i_ctime;
59206 stat->blksize = (1 << inode->i_blkbits);
59207 stat->blocks = inode->i_blocks;
59208@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
59209 if (retval)
59210 return retval;
59211
59212- if (inode->i_op->getattr)
59213- return inode->i_op->getattr(path->mnt, path->dentry, stat);
59214+ if (inode->i_op->getattr) {
59215+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
59216+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
59217+ stat->atime = stat->ctime;
59218+ stat->mtime = stat->ctime;
59219+ }
59220+ return retval;
59221+ }
59222
59223 generic_fillattr(inode, stat);
59224 return 0;
59225diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
59226index 15c68f9..36a8b3e 100644
59227--- a/fs/sysfs/bin.c
59228+++ b/fs/sysfs/bin.c
59229@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
59230 return ret;
59231 }
59232
59233-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
59234- void *buf, int len, int write)
59235+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
59236+ void *buf, size_t len, int write)
59237 {
59238 struct file *file = vma->vm_file;
59239 struct bin_buffer *bb = file->private_data;
59240 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
59241- int ret;
59242+ ssize_t ret;
59243
59244 if (!bb->vm_ops)
59245 return -EINVAL;
59246diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
59247index e8e0e71..79c28ac5 100644
59248--- a/fs/sysfs/dir.c
59249+++ b/fs/sysfs/dir.c
59250@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
59251 *
59252 * Returns 31 bit hash of ns + name (so it fits in an off_t )
59253 */
59254-static unsigned int sysfs_name_hash(const void *ns, const char *name)
59255+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
59256 {
59257 unsigned long hash = init_name_hash();
59258 unsigned int len = strlen(name);
59259@@ -679,6 +679,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
59260 struct sysfs_dirent *sd;
59261 int rc;
59262
59263+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
59264+ const char *parent_name = parent_sd->s_name;
59265+
59266+ mode = S_IFDIR | S_IRWXU;
59267+
59268+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
59269+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
59270+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
59271+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
59272+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
59273+#endif
59274+
59275 /* allocate */
59276 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
59277 if (!sd)
59278diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
59279index 602f56d..6853db8 100644
59280--- a/fs/sysfs/file.c
59281+++ b/fs/sysfs/file.c
59282@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
59283
59284 struct sysfs_open_dirent {
59285 atomic_t refcnt;
59286- atomic_t event;
59287+ atomic_unchecked_t event;
59288 wait_queue_head_t poll;
59289 struct list_head buffers; /* goes through sysfs_buffer.list */
59290 };
59291@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
59292 if (!sysfs_get_active(attr_sd))
59293 return -ENODEV;
59294
59295- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
59296+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
59297 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
59298
59299 sysfs_put_active(attr_sd);
59300@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
59301 return -ENOMEM;
59302
59303 atomic_set(&new_od->refcnt, 0);
59304- atomic_set(&new_od->event, 1);
59305+ atomic_set_unchecked(&new_od->event, 1);
59306 init_waitqueue_head(&new_od->poll);
59307 INIT_LIST_HEAD(&new_od->buffers);
59308 goto retry;
59309@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
59310
59311 sysfs_put_active(attr_sd);
59312
59313- if (buffer->event != atomic_read(&od->event))
59314+ if (buffer->event != atomic_read_unchecked(&od->event))
59315 goto trigger;
59316
59317 return DEFAULT_POLLMASK;
59318@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
59319
59320 od = sd->s_attr.open;
59321 if (od) {
59322- atomic_inc(&od->event);
59323+ atomic_inc_unchecked(&od->event);
59324 wake_up_interruptible(&od->poll);
59325 }
59326
59327diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
59328index 8c940df..25b733e 100644
59329--- a/fs/sysfs/symlink.c
59330+++ b/fs/sysfs/symlink.c
59331@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59332
59333 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
59334 {
59335- char *page = nd_get_link(nd);
59336+ const char *page = nd_get_link(nd);
59337 if (!IS_ERR(page))
59338 free_page((unsigned long)page);
59339 }
59340diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
59341index 69d4889..a810bd4 100644
59342--- a/fs/sysv/sysv.h
59343+++ b/fs/sysv/sysv.h
59344@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
59345 #endif
59346 }
59347
59348-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
59349+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
59350 {
59351 if (sbi->s_bytesex == BYTESEX_PDP)
59352 return PDP_swab((__force __u32)n);
59353diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
59354index e18b988..f1d4ad0f 100644
59355--- a/fs/ubifs/io.c
59356+++ b/fs/ubifs/io.c
59357@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
59358 return err;
59359 }
59360
59361-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
59362+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
59363 {
59364 int err;
59365
59366diff --git a/fs/udf/misc.c b/fs/udf/misc.c
59367index c175b4d..8f36a16 100644
59368--- a/fs/udf/misc.c
59369+++ b/fs/udf/misc.c
59370@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
59371
59372 u8 udf_tag_checksum(const struct tag *t)
59373 {
59374- u8 *data = (u8 *)t;
59375+ const u8 *data = (const u8 *)t;
59376 u8 checksum = 0;
59377 int i;
59378 for (i = 0; i < sizeof(struct tag); ++i)
59379diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
59380index 8d974c4..b82f6ec 100644
59381--- a/fs/ufs/swab.h
59382+++ b/fs/ufs/swab.h
59383@@ -22,7 +22,7 @@ enum {
59384 BYTESEX_BE
59385 };
59386
59387-static inline u64
59388+static inline u64 __intentional_overflow(-1)
59389 fs64_to_cpu(struct super_block *sbp, __fs64 n)
59390 {
59391 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
59392@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
59393 return (__force __fs64)cpu_to_be64(n);
59394 }
59395
59396-static inline u32
59397+static inline u32 __intentional_overflow(-1)
59398 fs32_to_cpu(struct super_block *sbp, __fs32 n)
59399 {
59400 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
59401diff --git a/fs/utimes.c b/fs/utimes.c
59402index f4fb7ec..3fe03c0 100644
59403--- a/fs/utimes.c
59404+++ b/fs/utimes.c
59405@@ -1,6 +1,7 @@
59406 #include <linux/compiler.h>
59407 #include <linux/file.h>
59408 #include <linux/fs.h>
59409+#include <linux/security.h>
59410 #include <linux/linkage.h>
59411 #include <linux/mount.h>
59412 #include <linux/namei.h>
59413@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
59414 goto mnt_drop_write_and_out;
59415 }
59416 }
59417+
59418+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
59419+ error = -EACCES;
59420+ goto mnt_drop_write_and_out;
59421+ }
59422+
59423 mutex_lock(&inode->i_mutex);
59424 error = notify_change(path->dentry, &newattrs);
59425 mutex_unlock(&inode->i_mutex);
59426diff --git a/fs/xattr.c b/fs/xattr.c
59427index 3377dff..4d074d9 100644
59428--- a/fs/xattr.c
59429+++ b/fs/xattr.c
59430@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
59431 return rc;
59432 }
59433
59434+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59435+ssize_t
59436+pax_getxattr(struct dentry *dentry, void *value, size_t size)
59437+{
59438+ struct inode *inode = dentry->d_inode;
59439+ ssize_t error;
59440+
59441+ error = inode_permission(inode, MAY_EXEC);
59442+ if (error)
59443+ return error;
59444+
59445+ if (inode->i_op->getxattr)
59446+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
59447+ else
59448+ error = -EOPNOTSUPP;
59449+
59450+ return error;
59451+}
59452+EXPORT_SYMBOL(pax_getxattr);
59453+#endif
59454+
59455 ssize_t
59456 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
59457 {
59458@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
59459 * Extended attribute SET operations
59460 */
59461 static long
59462-setxattr(struct dentry *d, const char __user *name, const void __user *value,
59463+setxattr(struct path *path, const char __user *name, const void __user *value,
59464 size_t size, int flags)
59465 {
59466 int error;
59467@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
59468 posix_acl_fix_xattr_from_user(kvalue, size);
59469 }
59470
59471- error = vfs_setxattr(d, kname, kvalue, size, flags);
59472+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
59473+ error = -EACCES;
59474+ goto out;
59475+ }
59476+
59477+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
59478 out:
59479 if (vvalue)
59480 vfree(vvalue);
59481@@ -377,7 +403,7 @@ retry:
59482 return error;
59483 error = mnt_want_write(path.mnt);
59484 if (!error) {
59485- error = setxattr(path.dentry, name, value, size, flags);
59486+ error = setxattr(&path, name, value, size, flags);
59487 mnt_drop_write(path.mnt);
59488 }
59489 path_put(&path);
59490@@ -401,7 +427,7 @@ retry:
59491 return error;
59492 error = mnt_want_write(path.mnt);
59493 if (!error) {
59494- error = setxattr(path.dentry, name, value, size, flags);
59495+ error = setxattr(&path, name, value, size, flags);
59496 mnt_drop_write(path.mnt);
59497 }
59498 path_put(&path);
59499@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
59500 const void __user *,value, size_t, size, int, flags)
59501 {
59502 struct fd f = fdget(fd);
59503- struct dentry *dentry;
59504 int error = -EBADF;
59505
59506 if (!f.file)
59507 return error;
59508- dentry = f.file->f_path.dentry;
59509- audit_inode(NULL, dentry, 0);
59510+ audit_inode(NULL, f.file->f_path.dentry, 0);
59511 error = mnt_want_write_file(f.file);
59512 if (!error) {
59513- error = setxattr(dentry, name, value, size, flags);
59514+ error = setxattr(&f.file->f_path, name, value, size, flags);
59515 mnt_drop_write_file(f.file);
59516 }
59517 fdput(f);
59518diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
59519index 9fbea87..6b19972 100644
59520--- a/fs/xattr_acl.c
59521+++ b/fs/xattr_acl.c
59522@@ -76,8 +76,8 @@ struct posix_acl *
59523 posix_acl_from_xattr(struct user_namespace *user_ns,
59524 const void *value, size_t size)
59525 {
59526- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
59527- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
59528+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
59529+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
59530 int count;
59531 struct posix_acl *acl;
59532 struct posix_acl_entry *acl_e;
59533diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
59534index 8904284..ee0e14b 100644
59535--- a/fs/xfs/xfs_bmap.c
59536+++ b/fs/xfs/xfs_bmap.c
59537@@ -765,7 +765,7 @@ xfs_bmap_validate_ret(
59538
59539 #else
59540 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
59541-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
59542+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
59543 #endif /* DEBUG */
59544
59545 /*
59546diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
59547index 6157424..ac98f6d 100644
59548--- a/fs/xfs/xfs_dir2_sf.c
59549+++ b/fs/xfs/xfs_dir2_sf.c
59550@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
59551 }
59552
59553 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
59554- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
59555+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
59556+ char name[sfep->namelen];
59557+ memcpy(name, sfep->name, sfep->namelen);
59558+ if (filldir(dirent, name, sfep->namelen,
59559+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
59560+ *offset = off & 0x7fffffff;
59561+ return 0;
59562+ }
59563+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
59564 off & 0x7fffffff, ino, DT_UNKNOWN)) {
59565 *offset = off & 0x7fffffff;
59566 return 0;
59567diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
59568index 5e99968..45bd327 100644
59569--- a/fs/xfs/xfs_ioctl.c
59570+++ b/fs/xfs/xfs_ioctl.c
59571@@ -127,7 +127,7 @@ xfs_find_handle(
59572 }
59573
59574 error = -EFAULT;
59575- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
59576+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
59577 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
59578 goto out_put;
59579
59580diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
59581index ca9ecaa..60100c7 100644
59582--- a/fs/xfs/xfs_iops.c
59583+++ b/fs/xfs/xfs_iops.c
59584@@ -395,7 +395,7 @@ xfs_vn_put_link(
59585 struct nameidata *nd,
59586 void *p)
59587 {
59588- char *s = nd_get_link(nd);
59589+ const char *s = nd_get_link(nd);
59590
59591 if (!IS_ERR(s))
59592 kfree(s);
59593diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
59594new file mode 100644
59595index 0000000..712a85d
59596--- /dev/null
59597+++ b/grsecurity/Kconfig
59598@@ -0,0 +1,1043 @@
59599+#
59600+# grecurity configuration
59601+#
59602+menu "Memory Protections"
59603+depends on GRKERNSEC
59604+
59605+config GRKERNSEC_KMEM
59606+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
59607+ default y if GRKERNSEC_CONFIG_AUTO
59608+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
59609+ help
59610+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
59611+ be written to or read from to modify or leak the contents of the running
59612+ kernel. /dev/port will also not be allowed to be opened and support
59613+ for /dev/cpu/*/msr will be removed. If you have module
59614+ support disabled, enabling this will close up five ways that are
59615+ currently used to insert malicious code into the running kernel.
59616+
59617+ Even with all these features enabled, we still highly recommend that
59618+ you use the RBAC system, as it is still possible for an attacker to
59619+ modify the running kernel through privileged I/O granted by ioperm/iopl.
59620+
59621+ If you are not using XFree86, you may be able to stop this additional
59622+ case by enabling the 'Disable privileged I/O' option. Though nothing
59623+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
59624+ but only to video memory, which is the only writing we allow in this
59625+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
59626+ not be allowed to mprotect it with PROT_WRITE later.
59627+ Enabling this feature will prevent the "cpupower" and "powertop" tools
59628+ from working.
59629+
59630+ It is highly recommended that you say Y here if you meet all the
59631+ conditions above.
59632+
59633+config GRKERNSEC_VM86
59634+ bool "Restrict VM86 mode"
59635+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
59636+ depends on X86_32
59637+
59638+ help
59639+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
59640+ make use of a special execution mode on 32bit x86 processors called
59641+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
59642+ video cards and will still work with this option enabled. The purpose
59643+ of the option is to prevent exploitation of emulation errors in
59644+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
59645+ Nearly all users should be able to enable this option.
59646+
59647+config GRKERNSEC_IO
59648+ bool "Disable privileged I/O"
59649+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
59650+ depends on X86
59651+ select RTC_CLASS
59652+ select RTC_INTF_DEV
59653+ select RTC_DRV_CMOS
59654+
59655+ help
59656+ If you say Y here, all ioperm and iopl calls will return an error.
59657+ Ioperm and iopl can be used to modify the running kernel.
59658+ Unfortunately, some programs need this access to operate properly,
59659+ the most notable of which are XFree86 and hwclock. hwclock can be
59660+ remedied by having RTC support in the kernel, so real-time
59661+ clock support is enabled if this option is enabled, to ensure
59662+ that hwclock operates correctly. XFree86 still will not
59663+ operate correctly with this option enabled, so DO NOT CHOOSE Y
59664+ IF YOU USE XFree86. If you use XFree86 and you still want to
59665+ protect your kernel against modification, use the RBAC system.
59666+
59667+config GRKERNSEC_JIT_HARDEN
59668+ bool "Harden BPF JIT against spray attacks"
59669+ default y if GRKERNSEC_CONFIG_AUTO
59670+ depends on BPF_JIT
59671+ help
59672+ If you say Y here, the native code generated by the kernel's Berkeley
59673+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
59674+ attacks that attempt to fit attacker-beneficial instructions in
59675+ 32bit immediate fields of JIT-generated native instructions. The
59676+ attacker will generally aim to cause an unintended instruction sequence
59677+ of JIT-generated native code to execute by jumping into the middle of
59678+ a generated instruction. This feature effectively randomizes the 32bit
59679+ immediate constants present in the generated code to thwart such attacks.
59680+
59681+ If you're using KERNEXEC, it's recommended that you enable this option
59682+ to supplement the hardening of the kernel.
59683+
59684+config GRKERNSEC_PERF_HARDEN
59685+ bool "Disable unprivileged PERF_EVENTS usage by default"
59686+ default y if GRKERNSEC_CONFIG_AUTO
59687+ depends on PERF_EVENTS
59688+ help
59689+ If you say Y here, the range of acceptable values for the
59690+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
59691+ default to a new value: 3. When the sysctl is set to this value, no
59692+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
59693+
59694+ Though PERF_EVENTS can be used legitimately for performance monitoring
59695+ and low-level application profiling, it is forced on regardless of
59696+ configuration, has been at fault for several vulnerabilities, and
59697+ creates new opportunities for side channels and other information leaks.
59698+
59699+ This feature puts PERF_EVENTS into a secure default state and permits
59700+ the administrator to change out of it temporarily if unprivileged
59701+ application profiling is needed.
59702+
59703+config GRKERNSEC_RAND_THREADSTACK
59704+ bool "Insert random gaps between thread stacks"
59705+ default y if GRKERNSEC_CONFIG_AUTO
59706+ depends on PAX_RANDMMAP && !PPC
59707+ help
59708+ If you say Y here, a random-sized gap will be enforced between allocated
59709+ thread stacks. Glibc's NPTL and other threading libraries that
59710+ pass MAP_STACK to the kernel for thread stack allocation are supported.
59711+ The implementation currently provides 8 bits of entropy for the gap.
59712+
59713+ Many distributions do not compile threaded remote services with the
59714+ -fstack-check argument to GCC, causing the variable-sized stack-based
59715+ allocator, alloca(), to not probe the stack on allocation. This
59716+ permits an unbounded alloca() to skip over any guard page and potentially
59717+ modify another thread's stack reliably. An enforced random gap
59718+ reduces the reliability of such an attack and increases the chance
59719+ that such a read/write to another thread's stack instead lands in
59720+ an unmapped area, causing a crash and triggering grsecurity's
59721+ anti-bruteforcing logic.
59722+
59723+config GRKERNSEC_PROC_MEMMAP
59724+ bool "Harden ASLR against information leaks and entropy reduction"
59725+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
59726+ depends on PAX_NOEXEC || PAX_ASLR
59727+ help
59728+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
59729+ give no information about the addresses of its mappings if
59730+ PaX features that rely on random addresses are enabled on the task.
59731+ In addition to sanitizing this information and disabling other
59732+ dangerous sources of information, this option causes reads of sensitive
59733+ /proc/<pid> entries where the file descriptor was opened in a different
59734+ task than the one performing the read. Such attempts are logged.
59735+ This option also limits argv/env strings for suid/sgid binaries
59736+ to 512KB to prevent a complete exhaustion of the stack entropy provided
59737+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
59738+ binaries to prevent alternative mmap layouts from being abused.
59739+
59740+ If you use PaX it is essential that you say Y here as it closes up
59741+ several holes that make full ASLR useless locally.
59742+
59743+config GRKERNSEC_BRUTE
59744+ bool "Deter exploit bruteforcing"
59745+ default y if GRKERNSEC_CONFIG_AUTO
59746+ help
59747+ If you say Y here, attempts to bruteforce exploits against forking
59748+ daemons such as apache or sshd, as well as against suid/sgid binaries
59749+ will be deterred. When a child of a forking daemon is killed by PaX
59750+ or crashes due to an illegal instruction or other suspicious signal,
59751+ the parent process will be delayed 30 seconds upon every subsequent
59752+ fork until the administrator is able to assess the situation and
59753+ restart the daemon.
59754+ In the suid/sgid case, the attempt is logged, the user has all their
59755+ existing instances of the suid/sgid binary terminated and will
59756+ be unable to execute any suid/sgid binaries for 15 minutes.
59757+
59758+ It is recommended that you also enable signal logging in the auditing
59759+ section so that logs are generated when a process triggers a suspicious
59760+ signal.
59761+ If the sysctl option is enabled, a sysctl option with name
59762+ "deter_bruteforce" is created.
59763+
59764+
59765+config GRKERNSEC_MODHARDEN
59766+ bool "Harden module auto-loading"
59767+ default y if GRKERNSEC_CONFIG_AUTO
59768+ depends on MODULES
59769+ help
59770+ If you say Y here, module auto-loading in response to use of some
59771+ feature implemented by an unloaded module will be restricted to
59772+ root users. Enabling this option helps defend against attacks
59773+ by unprivileged users who abuse the auto-loading behavior to
59774+ cause a vulnerable module to load that is then exploited.
59775+
59776+ If this option prevents a legitimate use of auto-loading for a
59777+ non-root user, the administrator can execute modprobe manually
59778+ with the exact name of the module mentioned in the alert log.
59779+ Alternatively, the administrator can add the module to the list
59780+ of modules loaded at boot by modifying init scripts.
59781+
59782+ Modification of init scripts will most likely be needed on
59783+ Ubuntu servers with encrypted home directory support enabled,
59784+ as the first non-root user logging in will cause the ecb(aes),
59785+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
59786+
59787+config GRKERNSEC_HIDESYM
59788+ bool "Hide kernel symbols"
59789+ default y if GRKERNSEC_CONFIG_AUTO
59790+ select PAX_USERCOPY_SLABS
59791+ help
59792+ If you say Y here, getting information on loaded modules, and
59793+ displaying all kernel symbols through a syscall will be restricted
59794+ to users with CAP_SYS_MODULE. For software compatibility reasons,
59795+ /proc/kallsyms will be restricted to the root user. The RBAC
59796+ system can hide that entry even from root.
59797+
59798+ This option also prevents leaking of kernel addresses through
59799+ several /proc entries.
59800+
59801+ Note that this option is only effective provided the following
59802+ conditions are met:
59803+ 1) The kernel using grsecurity is not precompiled by some distribution
59804+ 2) You have also enabled GRKERNSEC_DMESG
59805+ 3) You are using the RBAC system and hiding other files such as your
59806+ kernel image and System.map. Alternatively, enabling this option
59807+ causes the permissions on /boot, /lib/modules, and the kernel
59808+ source directory to change at compile time to prevent
59809+ reading by non-root users.
59810+ If the above conditions are met, this option will aid in providing a
59811+ useful protection against local kernel exploitation of overflows
59812+ and arbitrary read/write vulnerabilities.
59813+
59814+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
59815+ in addition to this feature.
59816+
59817+config GRKERNSEC_KERN_LOCKOUT
59818+ bool "Active kernel exploit response"
59819+ default y if GRKERNSEC_CONFIG_AUTO
59820+ depends on X86 || ARM || PPC || SPARC
59821+ help
59822+ If you say Y here, when a PaX alert is triggered due to suspicious
59823+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
59824+ or an OOPS occurs due to bad memory accesses, instead of just
59825+ terminating the offending process (and potentially allowing
59826+ a subsequent exploit from the same user), we will take one of two
59827+ actions:
59828+ If the user was root, we will panic the system
59829+ If the user was non-root, we will log the attempt, terminate
59830+ all processes owned by the user, then prevent them from creating
59831+ any new processes until the system is restarted
59832+ This deters repeated kernel exploitation/bruteforcing attempts
59833+ and is useful for later forensics.
59834+
59835+endmenu
59836+menu "Role Based Access Control Options"
59837+depends on GRKERNSEC
59838+
59839+config GRKERNSEC_RBAC_DEBUG
59840+ bool
59841+
59842+config GRKERNSEC_NO_RBAC
59843+ bool "Disable RBAC system"
59844+ help
59845+ If you say Y here, the /dev/grsec device will be removed from the kernel,
59846+ preventing the RBAC system from being enabled. You should only say Y
59847+ here if you have no intention of using the RBAC system, so as to prevent
59848+ an attacker with root access from misusing the RBAC system to hide files
59849+ and processes when loadable module support and /dev/[k]mem have been
59850+ locked down.
59851+
59852+config GRKERNSEC_ACL_HIDEKERN
59853+ bool "Hide kernel processes"
59854+ help
59855+ If you say Y here, all kernel threads will be hidden to all
59856+ processes but those whose subject has the "view hidden processes"
59857+ flag.
59858+
59859+config GRKERNSEC_ACL_MAXTRIES
59860+ int "Maximum tries before password lockout"
59861+ default 3
59862+ help
59863+ This option enforces the maximum number of times a user can attempt
59864+ to authorize themselves with the grsecurity RBAC system before being
59865+ denied the ability to attempt authorization again for a specified time.
59866+ The lower the number, the harder it will be to brute-force a password.
59867+
59868+config GRKERNSEC_ACL_TIMEOUT
59869+ int "Time to wait after max password tries, in seconds"
59870+ default 30
59871+ help
59872+ This option specifies the time the user must wait after attempting to
59873+ authorize to the RBAC system with the maximum number of invalid
59874+ passwords. The higher the number, the harder it will be to brute-force
59875+ a password.
59876+
59877+endmenu
59878+menu "Filesystem Protections"
59879+depends on GRKERNSEC
59880+
59881+config GRKERNSEC_PROC
59882+ bool "Proc restrictions"
59883+ default y if GRKERNSEC_CONFIG_AUTO
59884+ help
59885+ If you say Y here, the permissions of the /proc filesystem
59886+ will be altered to enhance system security and privacy. You MUST
59887+ choose either a user only restriction or a user and group restriction.
59888+ Depending upon the option you choose, you can either restrict users to
59889+ see only the processes they themselves run, or choose a group that can
59890+ view all processes and files normally restricted to root if you choose
59891+ the "restrict to user only" option. NOTE: If you're running identd or
59892+ ntpd as a non-root user, you will have to run it as the group you
59893+ specify here.
59894+
59895+config GRKERNSEC_PROC_USER
59896+ bool "Restrict /proc to user only"
59897+ depends on GRKERNSEC_PROC
59898+ help
59899+ If you say Y here, non-root users will only be able to view their own
59900+ processes, and restricts them from viewing network-related information,
59901+ and viewing kernel symbol and module information.
59902+
59903+config GRKERNSEC_PROC_USERGROUP
59904+ bool "Allow special group"
59905+ default y if GRKERNSEC_CONFIG_AUTO
59906+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
59907+ help
59908+ If you say Y here, you will be able to select a group that will be
59909+ able to view all processes and network-related information. If you've
59910+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
59911+ remain hidden. This option is useful if you want to run identd as
59912+ a non-root user. The group you select may also be chosen at boot time
59913+ via "grsec_proc_gid=" on the kernel commandline.
59914+
59915+config GRKERNSEC_PROC_GID
59916+ int "GID for special group"
59917+ depends on GRKERNSEC_PROC_USERGROUP
59918+ default 1001
59919+
59920+config GRKERNSEC_PROC_ADD
59921+ bool "Additional restrictions"
59922+ default y if GRKERNSEC_CONFIG_AUTO
59923+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
59924+ help
59925+ If you say Y here, additional restrictions will be placed on
59926+ /proc that keep normal users from viewing device information and
59927+ slabinfo information that could be useful for exploits.
59928+
59929+config GRKERNSEC_LINK
59930+ bool "Linking restrictions"
59931+ default y if GRKERNSEC_CONFIG_AUTO
59932+ help
59933+ If you say Y here, /tmp race exploits will be prevented, since users
59934+ will no longer be able to follow symlinks owned by other users in
59935+ world-writable +t directories (e.g. /tmp), unless the owner of the
59936+ symlink is the owner of the directory. users will also not be
59937+ able to hardlink to files they do not own. If the sysctl option is
59938+ enabled, a sysctl option with name "linking_restrictions" is created.
59939+
59940+config GRKERNSEC_SYMLINKOWN
59941+ bool "Kernel-enforced SymlinksIfOwnerMatch"
59942+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
59943+ help
59944+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
59945+ that prevents it from being used as a security feature. As Apache
59946+ verifies the symlink by performing a stat() against the target of
59947+ the symlink before it is followed, an attacker can setup a symlink
59948+ to point to a same-owned file, then replace the symlink with one
59949+ that targets another user's file just after Apache "validates" the
59950+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
59951+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
59952+ will be in place for the group you specify. If the sysctl option
59953+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
59954+ created.
59955+
59956+config GRKERNSEC_SYMLINKOWN_GID
59957+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
59958+ depends on GRKERNSEC_SYMLINKOWN
59959+ default 1006
59960+ help
59961+ Setting this GID determines what group kernel-enforced
59962+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
59963+ is enabled, a sysctl option with name "symlinkown_gid" is created.
59964+
59965+config GRKERNSEC_FIFO
59966+ bool "FIFO restrictions"
59967+ default y if GRKERNSEC_CONFIG_AUTO
59968+ help
59969+ If you say Y here, users will not be able to write to FIFOs they don't
59970+ own in world-writable +t directories (e.g. /tmp), unless the owner of
59971+ the FIFO is the same owner of the directory it's held in. If the sysctl
59972+ option is enabled, a sysctl option with name "fifo_restrictions" is
59973+ created.
59974+
59975+config GRKERNSEC_SYSFS_RESTRICT
59976+ bool "Sysfs/debugfs restriction"
59977+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
59978+ depends on SYSFS
59979+ help
59980+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
59981+ any filesystem normally mounted under it (e.g. debugfs) will be
59982+ mostly accessible only by root. These filesystems generally provide access
59983+ to hardware and debug information that isn't appropriate for unprivileged
59984+ users of the system. Sysfs and debugfs have also become a large source
59985+ of new vulnerabilities, ranging from infoleaks to local compromise.
59986+ There has been very little oversight with an eye toward security involved
59987+ in adding new exporters of information to these filesystems, so their
59988+ use is discouraged.
59989+ For reasons of compatibility, a few directories have been whitelisted
59990+ for access by non-root users:
59991+ /sys/fs/selinux
59992+ /sys/fs/fuse
59993+ /sys/devices/system/cpu
59994+
59995+config GRKERNSEC_ROFS
59996+ bool "Runtime read-only mount protection"
59997+ help
59998+ If you say Y here, a sysctl option with name "romount_protect" will
59999+ be created. By setting this option to 1 at runtime, filesystems
60000+ will be protected in the following ways:
60001+ * No new writable mounts will be allowed
60002+ * Existing read-only mounts won't be able to be remounted read/write
60003+ * Write operations will be denied on all block devices
60004+ This option acts independently of grsec_lock: once it is set to 1,
60005+ it cannot be turned off. Therefore, please be mindful of the resulting
60006+ behavior if this option is enabled in an init script on a read-only
60007+ filesystem. This feature is mainly intended for secure embedded systems.
60008+
60009+config GRKERNSEC_DEVICE_SIDECHANNEL
60010+ bool "Eliminate stat/notify-based device sidechannels"
60011+ default y if GRKERNSEC_CONFIG_AUTO
60012+ help
60013+ If you say Y here, timing analyses on block or character
60014+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
60015+ will be thwarted for unprivileged users. If a process without
60016+ CAP_MKNOD stats such a device, the last access and last modify times
60017+ will match the device's create time. No access or modify events
60018+ will be triggered through inotify/dnotify/fanotify for such devices.
60019+ This feature will prevent attacks that may at a minimum
60020+ allow an attacker to determine the administrator's password length.
60021+
60022+config GRKERNSEC_CHROOT
60023+ bool "Chroot jail restrictions"
60024+ default y if GRKERNSEC_CONFIG_AUTO
60025+ help
60026+ If you say Y here, you will be able to choose several options that will
60027+ make breaking out of a chrooted jail much more difficult. If you
60028+ encounter no software incompatibilities with the following options, it
60029+ is recommended that you enable each one.
60030+
60031+config GRKERNSEC_CHROOT_MOUNT
60032+ bool "Deny mounts"
60033+ default y if GRKERNSEC_CONFIG_AUTO
60034+ depends on GRKERNSEC_CHROOT
60035+ help
60036+ If you say Y here, processes inside a chroot will not be able to
60037+ mount or remount filesystems. If the sysctl option is enabled, a
60038+ sysctl option with name "chroot_deny_mount" is created.
60039+
60040+config GRKERNSEC_CHROOT_DOUBLE
60041+ bool "Deny double-chroots"
60042+ default y if GRKERNSEC_CONFIG_AUTO
60043+ depends on GRKERNSEC_CHROOT
60044+ help
60045+ If you say Y here, processes inside a chroot will not be able to chroot
60046+ again outside the chroot. This is a widely used method of breaking
60047+ out of a chroot jail and should not be allowed. If the sysctl
60048+ option is enabled, a sysctl option with name
60049+ "chroot_deny_chroot" is created.
60050+
60051+config GRKERNSEC_CHROOT_PIVOT
60052+ bool "Deny pivot_root in chroot"
60053+ default y if GRKERNSEC_CONFIG_AUTO
60054+ depends on GRKERNSEC_CHROOT
60055+ help
60056+ If you say Y here, processes inside a chroot will not be able to use
60057+ a function called pivot_root() that was introduced in Linux 2.3.41. It
60058+ works similar to chroot in that it changes the root filesystem. This
60059+ function could be misused in a chrooted process to attempt to break out
60060+ of the chroot, and therefore should not be allowed. If the sysctl
60061+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
60062+ created.
60063+
60064+config GRKERNSEC_CHROOT_CHDIR
60065+ bool "Enforce chdir(\"/\") on all chroots"
60066+ default y if GRKERNSEC_CONFIG_AUTO
60067+ depends on GRKERNSEC_CHROOT
60068+ help
60069+ If you say Y here, the current working directory of all newly-chrooted
60070+ applications will be set to the the root directory of the chroot.
60071+ The man page on chroot(2) states:
60072+ Note that this call does not change the current working
60073+ directory, so that `.' can be outside the tree rooted at
60074+ `/'. In particular, the super-user can escape from a
60075+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
60076+
60077+ It is recommended that you say Y here, since it's not known to break
60078+ any software. If the sysctl option is enabled, a sysctl option with
60079+ name "chroot_enforce_chdir" is created.
60080+
60081+config GRKERNSEC_CHROOT_CHMOD
60082+ bool "Deny (f)chmod +s"
60083+ default y if GRKERNSEC_CONFIG_AUTO
60084+ depends on GRKERNSEC_CHROOT
60085+ help
60086+ If you say Y here, processes inside a chroot will not be able to chmod
60087+ or fchmod files to make them have suid or sgid bits. This protects
60088+ against another published method of breaking a chroot. If the sysctl
60089+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
60090+ created.
60091+
60092+config GRKERNSEC_CHROOT_FCHDIR
60093+ bool "Deny fchdir out of chroot"
60094+ default y if GRKERNSEC_CONFIG_AUTO
60095+ depends on GRKERNSEC_CHROOT
60096+ help
60097+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
60098+ to a file descriptor of the chrooting process that points to a directory
60099+ outside the filesystem will be stopped. If the sysctl option
60100+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
60101+
60102+config GRKERNSEC_CHROOT_MKNOD
60103+ bool "Deny mknod"
60104+ default y if GRKERNSEC_CONFIG_AUTO
60105+ depends on GRKERNSEC_CHROOT
60106+ help
60107+ If you say Y here, processes inside a chroot will not be allowed to
60108+ mknod. The problem with using mknod inside a chroot is that it
60109+ would allow an attacker to create a device entry that is the same
60110+ as one on the physical root of your system, which could range from
60111+ anything from the console device to a device for your harddrive (which
60112+ they could then use to wipe the drive or steal data). It is recommended
60113+ that you say Y here, unless you run into software incompatibilities.
60114+ If the sysctl option is enabled, a sysctl option with name
60115+ "chroot_deny_mknod" is created.
60116+
60117+config GRKERNSEC_CHROOT_SHMAT
60118+ bool "Deny shmat() out of chroot"
60119+ default y if GRKERNSEC_CONFIG_AUTO
60120+ depends on GRKERNSEC_CHROOT
60121+ help
60122+ If you say Y here, processes inside a chroot will not be able to attach
60123+ to shared memory segments that were created outside of the chroot jail.
60124+ It is recommended that you say Y here. If the sysctl option is enabled,
60125+ a sysctl option with name "chroot_deny_shmat" is created.
60126+
60127+config GRKERNSEC_CHROOT_UNIX
60128+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
60129+ default y if GRKERNSEC_CONFIG_AUTO
60130+ depends on GRKERNSEC_CHROOT
60131+ help
60132+ If you say Y here, processes inside a chroot will not be able to
60133+ connect to abstract (meaning not belonging to a filesystem) Unix
60134+ domain sockets that were bound outside of a chroot. It is recommended
60135+ that you say Y here. If the sysctl option is enabled, a sysctl option
60136+ with name "chroot_deny_unix" is created.
60137+
60138+config GRKERNSEC_CHROOT_FINDTASK
60139+ bool "Protect outside processes"
60140+ default y if GRKERNSEC_CONFIG_AUTO
60141+ depends on GRKERNSEC_CHROOT
60142+ help
60143+ If you say Y here, processes inside a chroot will not be able to
60144+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
60145+ getsid, or view any process outside of the chroot. If the sysctl
60146+ option is enabled, a sysctl option with name "chroot_findtask" is
60147+ created.
60148+
60149+config GRKERNSEC_CHROOT_NICE
60150+ bool "Restrict priority changes"
60151+ default y if GRKERNSEC_CONFIG_AUTO
60152+ depends on GRKERNSEC_CHROOT
60153+ help
60154+ If you say Y here, processes inside a chroot will not be able to raise
60155+ the priority of processes in the chroot, or alter the priority of
60156+ processes outside the chroot. This provides more security than simply
60157+ removing CAP_SYS_NICE from the process' capability set. If the
60158+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
60159+ is created.
60160+
60161+config GRKERNSEC_CHROOT_SYSCTL
60162+ bool "Deny sysctl writes"
60163+ default y if GRKERNSEC_CONFIG_AUTO
60164+ depends on GRKERNSEC_CHROOT
60165+ help
60166+ If you say Y here, an attacker in a chroot will not be able to
60167+ write to sysctl entries, either by sysctl(2) or through a /proc
60168+ interface. It is strongly recommended that you say Y here. If the
60169+ sysctl option is enabled, a sysctl option with name
60170+ "chroot_deny_sysctl" is created.
60171+
60172+config GRKERNSEC_CHROOT_CAPS
60173+ bool "Capability restrictions"
60174+ default y if GRKERNSEC_CONFIG_AUTO
60175+ depends on GRKERNSEC_CHROOT
60176+ help
60177+ If you say Y here, the capabilities on all processes within a
60178+ chroot jail will be lowered to stop module insertion, raw i/o,
60179+ system and net admin tasks, rebooting the system, modifying immutable
60180+ files, modifying IPC owned by another, and changing the system time.
60181+ This is left an option because it can break some apps. Disable this
60182+ if your chrooted apps are having problems performing those kinds of
60183+ tasks. If the sysctl option is enabled, a sysctl option with
60184+ name "chroot_caps" is created.
60185+
60186+config GRKERNSEC_CHROOT_INITRD
60187+ bool "Exempt initrd tasks from restrictions"
60188+ default y if GRKERNSEC_CONFIG_AUTO
60189+ depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
60190+ help
60191+ If you say Y here, tasks started prior to init will be exempted from
60192+ grsecurity's chroot restrictions. This option is mainly meant to
60193+ resolve Plymouth's performing privileged operations unnecessarily
60194+ in a chroot.
60195+
60196+endmenu
60197+menu "Kernel Auditing"
60198+depends on GRKERNSEC
60199+
60200+config GRKERNSEC_AUDIT_GROUP
60201+ bool "Single group for auditing"
60202+ help
60203+ If you say Y here, the exec and chdir logging features will only operate
60204+ on a group you specify. This option is recommended if you only want to
60205+ watch certain users instead of having a large amount of logs from the
60206+ entire system. If the sysctl option is enabled, a sysctl option with
60207+ name "audit_group" is created.
60208+
60209+config GRKERNSEC_AUDIT_GID
60210+ int "GID for auditing"
60211+ depends on GRKERNSEC_AUDIT_GROUP
60212+ default 1007
60213+
60214+config GRKERNSEC_EXECLOG
60215+ bool "Exec logging"
60216+ help
60217+ If you say Y here, all execve() calls will be logged (since the
60218+ other exec*() calls are frontends to execve(), all execution
60219+ will be logged). Useful for shell-servers that like to keep track
60220+ of their users. If the sysctl option is enabled, a sysctl option with
60221+ name "exec_logging" is created.
60222+ WARNING: This option when enabled will produce a LOT of logs, especially
60223+ on an active system.
60224+
60225+config GRKERNSEC_RESLOG
60226+ bool "Resource logging"
60227+ default y if GRKERNSEC_CONFIG_AUTO
60228+ help
60229+ If you say Y here, all attempts to overstep resource limits will
60230+ be logged with the resource name, the requested size, and the current
60231+ limit. It is highly recommended that you say Y here. If the sysctl
60232+ option is enabled, a sysctl option with name "resource_logging" is
60233+ created. If the RBAC system is enabled, the sysctl value is ignored.
60234+
60235+config GRKERNSEC_CHROOT_EXECLOG
60236+ bool "Log execs within chroot"
60237+ help
60238+ If you say Y here, all executions inside a chroot jail will be logged
60239+ to syslog. This can cause a large amount of logs if certain
60240+ applications (eg. djb's daemontools) are installed on the system, and
60241+ is therefore left as an option. If the sysctl option is enabled, a
60242+ sysctl option with name "chroot_execlog" is created.
60243+
60244+config GRKERNSEC_AUDIT_PTRACE
60245+ bool "Ptrace logging"
60246+ help
60247+ If you say Y here, all attempts to attach to a process via ptrace
60248+ will be logged. If the sysctl option is enabled, a sysctl option
60249+ with name "audit_ptrace" is created.
60250+
60251+config GRKERNSEC_AUDIT_CHDIR
60252+ bool "Chdir logging"
60253+ help
60254+ If you say Y here, all chdir() calls will be logged. If the sysctl
60255+ option is enabled, a sysctl option with name "audit_chdir" is created.
60256+
60257+config GRKERNSEC_AUDIT_MOUNT
60258+ bool "(Un)Mount logging"
60259+ help
60260+ If you say Y here, all mounts and unmounts will be logged. If the
60261+ sysctl option is enabled, a sysctl option with name "audit_mount" is
60262+ created.
60263+
60264+config GRKERNSEC_SIGNAL
60265+ bool "Signal logging"
60266+ default y if GRKERNSEC_CONFIG_AUTO
60267+ help
60268+ If you say Y here, certain important signals will be logged, such as
60269+ SIGSEGV, which will as a result inform you of when a error in a program
60270+ occurred, which in some cases could mean a possible exploit attempt.
60271+ If the sysctl option is enabled, a sysctl option with name
60272+ "signal_logging" is created.
60273+
60274+config GRKERNSEC_FORKFAIL
60275+ bool "Fork failure logging"
60276+ help
60277+ If you say Y here, all failed fork() attempts will be logged.
60278+ This could suggest a fork bomb, or someone attempting to overstep
60279+ their process limit. If the sysctl option is enabled, a sysctl option
60280+ with name "forkfail_logging" is created.
60281+
60282+config GRKERNSEC_TIME
60283+ bool "Time change logging"
60284+ default y if GRKERNSEC_CONFIG_AUTO
60285+ help
60286+ If you say Y here, any changes of the system clock will be logged.
60287+ If the sysctl option is enabled, a sysctl option with name
60288+ "timechange_logging" is created.
60289+
60290+config GRKERNSEC_PROC_IPADDR
60291+ bool "/proc/<pid>/ipaddr support"
60292+ default y if GRKERNSEC_CONFIG_AUTO
60293+ help
60294+ If you say Y here, a new entry will be added to each /proc/<pid>
60295+ directory that contains the IP address of the person using the task.
60296+ The IP is carried across local TCP and AF_UNIX stream sockets.
60297+ This information can be useful for IDS/IPSes to perform remote response
60298+ to a local attack. The entry is readable by only the owner of the
60299+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
60300+ the RBAC system), and thus does not create privacy concerns.
60301+
60302+config GRKERNSEC_RWXMAP_LOG
60303+ bool 'Denied RWX mmap/mprotect logging'
60304+ default y if GRKERNSEC_CONFIG_AUTO
60305+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
60306+ help
60307+ If you say Y here, calls to mmap() and mprotect() with explicit
60308+ usage of PROT_WRITE and PROT_EXEC together will be logged when
60309+ denied by the PAX_MPROTECT feature. This feature will also
60310+ log other problematic scenarios that can occur when PAX_MPROTECT
60311+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
60312+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
60313+ is created.
60314+
60315+endmenu
60316+
60317+menu "Executable Protections"
60318+depends on GRKERNSEC
60319+
60320+config GRKERNSEC_DMESG
60321+ bool "Dmesg(8) restriction"
60322+ default y if GRKERNSEC_CONFIG_AUTO
60323+ help
60324+ If you say Y here, non-root users will not be able to use dmesg(8)
60325+ to view the contents of the kernel's circular log buffer.
60326+ The kernel's log buffer often contains kernel addresses and other
60327+ identifying information useful to an attacker in fingerprinting a
60328+ system for a targeted exploit.
60329+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
60330+ created.
60331+
60332+config GRKERNSEC_HARDEN_PTRACE
60333+ bool "Deter ptrace-based process snooping"
60334+ default y if GRKERNSEC_CONFIG_AUTO
60335+ help
60336+ If you say Y here, TTY sniffers and other malicious monitoring
60337+ programs implemented through ptrace will be defeated. If you
60338+ have been using the RBAC system, this option has already been
60339+ enabled for several years for all users, with the ability to make
60340+ fine-grained exceptions.
60341+
60342+ This option only affects the ability of non-root users to ptrace
60343+ processes that are not a descendent of the ptracing process.
60344+ This means that strace ./binary and gdb ./binary will still work,
60345+ but attaching to arbitrary processes will not. If the sysctl
60346+ option is enabled, a sysctl option with name "harden_ptrace" is
60347+ created.
60348+
60349+config GRKERNSEC_PTRACE_READEXEC
60350+ bool "Require read access to ptrace sensitive binaries"
60351+ default y if GRKERNSEC_CONFIG_AUTO
60352+ help
60353+ If you say Y here, unprivileged users will not be able to ptrace unreadable
60354+ binaries. This option is useful in environments that
60355+ remove the read bits (e.g. file mode 4711) from suid binaries to
60356+ prevent infoleaking of their contents. This option adds
60357+ consistency to the use of that file mode, as the binary could normally
60358+ be read out when run without privileges while ptracing.
60359+
60360+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
60361+ is created.
60362+
60363+config GRKERNSEC_SETXID
60364+ bool "Enforce consistent multithreaded privileges"
60365+ default y if GRKERNSEC_CONFIG_AUTO
60366+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
60367+ help
60368+ If you say Y here, a change from a root uid to a non-root uid
60369+ in a multithreaded application will cause the resulting uids,
60370+ gids, supplementary groups, and capabilities in that thread
60371+ to be propagated to the other threads of the process. In most
60372+ cases this is unnecessary, as glibc will emulate this behavior
60373+ on behalf of the application. Other libcs do not act in the
60374+ same way, allowing the other threads of the process to continue
60375+ running with root privileges. If the sysctl option is enabled,
60376+ a sysctl option with name "consistent_setxid" is created.
60377+
60378+config GRKERNSEC_TPE
60379+ bool "Trusted Path Execution (TPE)"
60380+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
60381+ help
60382+ If you say Y here, you will be able to choose a gid to add to the
60383+ supplementary groups of users you want to mark as "untrusted."
60384+ These users will not be able to execute any files that are not in
60385+ root-owned directories writable only by root. If the sysctl option
60386+ is enabled, a sysctl option with name "tpe" is created.
60387+
60388+config GRKERNSEC_TPE_ALL
60389+ bool "Partially restrict all non-root users"
60390+ depends on GRKERNSEC_TPE
60391+ help
60392+ If you say Y here, all non-root users will be covered under
60393+ a weaker TPE restriction. This is separate from, and in addition to,
60394+ the main TPE options that you have selected elsewhere. Thus, if a
60395+ "trusted" GID is chosen, this restriction applies to even that GID.
60396+ Under this restriction, all non-root users will only be allowed to
60397+ execute files in directories they own that are not group or
60398+ world-writable, or in directories owned by root and writable only by
60399+ root. If the sysctl option is enabled, a sysctl option with name
60400+ "tpe_restrict_all" is created.
60401+
60402+config GRKERNSEC_TPE_INVERT
60403+ bool "Invert GID option"
60404+ depends on GRKERNSEC_TPE
60405+ help
60406+ If you say Y here, the group you specify in the TPE configuration will
60407+ decide what group TPE restrictions will be *disabled* for. This
60408+ option is useful if you want TPE restrictions to be applied to most
60409+ users on the system. If the sysctl option is enabled, a sysctl option
60410+ with name "tpe_invert" is created. Unlike other sysctl options, this
60411+ entry will default to on for backward-compatibility.
60412+
60413+config GRKERNSEC_TPE_GID
60414+ int
60415+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
60416+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
60417+
60418+config GRKERNSEC_TPE_UNTRUSTED_GID
60419+ int "GID for TPE-untrusted users"
60420+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
60421+ default 1005
60422+ help
60423+ Setting this GID determines what group TPE restrictions will be
60424+ *enabled* for. If the sysctl option is enabled, a sysctl option
60425+ with name "tpe_gid" is created.
60426+
60427+config GRKERNSEC_TPE_TRUSTED_GID
60428+ int "GID for TPE-trusted users"
60429+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
60430+ default 1005
60431+ help
60432+ Setting this GID determines what group TPE restrictions will be
60433+ *disabled* for. If the sysctl option is enabled, a sysctl option
60434+ with name "tpe_gid" is created.
60435+
60436+endmenu
60437+menu "Network Protections"
60438+depends on GRKERNSEC
60439+
60440+config GRKERNSEC_RANDNET
60441+ bool "Larger entropy pools"
60442+ default y if GRKERNSEC_CONFIG_AUTO
60443+ help
60444+ If you say Y here, the entropy pools used for many features of Linux
60445+ and grsecurity will be doubled in size. Since several grsecurity
60446+ features use additional randomness, it is recommended that you say Y
60447+ here. Saying Y here has a similar effect as modifying
60448+ /proc/sys/kernel/random/poolsize.
60449+
60450+config GRKERNSEC_BLACKHOLE
60451+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
60452+ default y if GRKERNSEC_CONFIG_AUTO
60453+ depends on NET
60454+ help
60455+ If you say Y here, neither TCP resets nor ICMP
60456+ destination-unreachable packets will be sent in response to packets
60457+ sent to ports for which no associated listening process exists.
60458+ This feature supports both IPV4 and IPV6 and exempts the
60459+ loopback interface from blackholing. Enabling this feature
60460+ makes a host more resilient to DoS attacks and reduces network
60461+ visibility against scanners.
60462+
60463+ The blackhole feature as-implemented is equivalent to the FreeBSD
60464+ blackhole feature, as it prevents RST responses to all packets, not
60465+ just SYNs. Under most application behavior this causes no
60466+ problems, but applications (like haproxy) may not close certain
60467+ connections in a way that cleanly terminates them on the remote
60468+ end, leaving the remote host in LAST_ACK state. Because of this
60469+ side-effect and to prevent intentional LAST_ACK DoSes, this
60470+ feature also adds automatic mitigation against such attacks.
60471+ The mitigation drastically reduces the amount of time a socket
60472+ can spend in LAST_ACK state. If you're using haproxy and not
60473+ all servers it connects to have this option enabled, consider
60474+ disabling this feature on the haproxy host.
60475+
60476+ If the sysctl option is enabled, two sysctl options with names
60477+ "ip_blackhole" and "lastack_retries" will be created.
60478+ While "ip_blackhole" takes the standard zero/non-zero on/off
60479+ toggle, "lastack_retries" uses the same kinds of values as
60480+ "tcp_retries1" and "tcp_retries2". The default value of 4
60481+ prevents a socket from lasting more than 45 seconds in LAST_ACK
60482+ state.
60483+
60484+config GRKERNSEC_NO_SIMULT_CONNECT
60485+ bool "Disable TCP Simultaneous Connect"
60486+ default y if GRKERNSEC_CONFIG_AUTO
60487+ depends on NET
60488+ help
60489+ If you say Y here, a feature by Willy Tarreau will be enabled that
60490+ removes a weakness in Linux's strict implementation of TCP that
60491+ allows two clients to connect to each other without either entering
60492+ a listening state. The weakness allows an attacker to easily prevent
60493+ a client from connecting to a known server provided the source port
60494+ for the connection is guessed correctly.
60495+
60496+ As the weakness could be used to prevent an antivirus or IPS from
60497+ fetching updates, or prevent an SSL gateway from fetching a CRL,
60498+ it should be eliminated by enabling this option. Though Linux is
60499+ one of few operating systems supporting simultaneous connect, it
60500+ has no legitimate use in practice and is rarely supported by firewalls.
60501+
60502+config GRKERNSEC_SOCKET
60503+ bool "Socket restrictions"
60504+ depends on NET
60505+ help
60506+ If you say Y here, you will be able to choose from several options.
60507+ If you assign a GID on your system and add it to the supplementary
60508+ groups of users you want to restrict socket access to, this patch
60509+ will perform up to three things, based on the option(s) you choose.
60510+
60511+config GRKERNSEC_SOCKET_ALL
60512+ bool "Deny any sockets to group"
60513+ depends on GRKERNSEC_SOCKET
60514+ help
60515+ If you say Y here, you will be able to choose a GID of whose users will
60516+ be unable to connect to other hosts from your machine or run server
60517+ applications from your machine. If the sysctl option is enabled, a
60518+ sysctl option with name "socket_all" is created.
60519+
60520+config GRKERNSEC_SOCKET_ALL_GID
60521+ int "GID to deny all sockets for"
60522+ depends on GRKERNSEC_SOCKET_ALL
60523+ default 1004
60524+ help
60525+ Here you can choose the GID to disable socket access for. Remember to
60526+ add the users you want socket access disabled for to the GID
60527+ specified here. If the sysctl option is enabled, a sysctl option
60528+ with name "socket_all_gid" is created.
60529+
60530+config GRKERNSEC_SOCKET_CLIENT
60531+ bool "Deny client sockets to group"
60532+ depends on GRKERNSEC_SOCKET
60533+ help
60534+ If you say Y here, you will be able to choose a GID of whose users will
60535+ be unable to connect to other hosts from your machine, but will be
60536+ able to run servers. If this option is enabled, all users in the group
60537+ you specify will have to use passive mode when initiating ftp transfers
60538+ from the shell on your machine. If the sysctl option is enabled, a
60539+ sysctl option with name "socket_client" is created.
60540+
60541+config GRKERNSEC_SOCKET_CLIENT_GID
60542+ int "GID to deny client sockets for"
60543+ depends on GRKERNSEC_SOCKET_CLIENT
60544+ default 1003
60545+ help
60546+ Here you can choose the GID to disable client socket access for.
60547+ Remember to add the users you want client socket access disabled for to
60548+ the GID specified here. If the sysctl option is enabled, a sysctl
60549+ option with name "socket_client_gid" is created.
60550+
60551+config GRKERNSEC_SOCKET_SERVER
60552+ bool "Deny server sockets to group"
60553+ depends on GRKERNSEC_SOCKET
60554+ help
60555+ If you say Y here, you will be able to choose a GID of whose users will
60556+ be unable to run server applications from your machine. If the sysctl
60557+ option is enabled, a sysctl option with name "socket_server" is created.
60558+
60559+config GRKERNSEC_SOCKET_SERVER_GID
60560+ int "GID to deny server sockets for"
60561+ depends on GRKERNSEC_SOCKET_SERVER
60562+ default 1002
60563+ help
60564+ Here you can choose the GID to disable server socket access for.
60565+ Remember to add the users you want server socket access disabled for to
60566+ the GID specified here. If the sysctl option is enabled, a sysctl
60567+ option with name "socket_server_gid" is created.
60568+
60569+endmenu
60570+menu "Sysctl Support"
60571+depends on GRKERNSEC && SYSCTL
60572+
60573+config GRKERNSEC_SYSCTL
60574+ bool "Sysctl support"
60575+ default y if GRKERNSEC_CONFIG_AUTO
60576+ help
60577+ If you say Y here, you will be able to change the options that
60578+ grsecurity runs with at bootup, without having to recompile your
60579+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
60580+ to enable (1) or disable (0) various features. All the sysctl entries
60581+ are mutable until the "grsec_lock" entry is set to a non-zero value.
60582+ All features enabled in the kernel configuration are disabled at boot
60583+ if you do not say Y to the "Turn on features by default" option.
60584+ All options should be set at startup, and the grsec_lock entry should
60585+ be set to a non-zero value after all the options are set.
60586+ *THIS IS EXTREMELY IMPORTANT*
60587+
60588+config GRKERNSEC_SYSCTL_DISTRO
60589+ bool "Extra sysctl support for distro makers (READ HELP)"
60590+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
60591+ help
60592+ If you say Y here, additional sysctl options will be created
60593+ for features that affect processes running as root. Therefore,
60594+ it is critical when using this option that the grsec_lock entry be
60595+ enabled after boot. Only distros with prebuilt kernel packages
60596+ with this option enabled that can ensure grsec_lock is enabled
60597+ after boot should use this option.
60598+ *Failure to set grsec_lock after boot makes all grsec features
60599+ this option covers useless*
60600+
60601+ Currently this option creates the following sysctl entries:
60602+ "Disable Privileged I/O": "disable_priv_io"
60603+
60604+config GRKERNSEC_SYSCTL_ON
60605+ bool "Turn on features by default"
60606+ default y if GRKERNSEC_CONFIG_AUTO
60607+ depends on GRKERNSEC_SYSCTL
60608+ help
60609+ If you say Y here, instead of having all features enabled in the
60610+ kernel configuration disabled at boot time, the features will be
60611+ enabled at boot time. It is recommended you say Y here unless
60612+ there is some reason you would want all sysctl-tunable features to
60613+ be disabled by default. As mentioned elsewhere, it is important
60614+ to enable the grsec_lock entry once you have finished modifying
60615+ the sysctl entries.
60616+
60617+endmenu
60618+menu "Logging Options"
60619+depends on GRKERNSEC
60620+
60621+config GRKERNSEC_FLOODTIME
60622+ int "Seconds in between log messages (minimum)"
60623+ default 10
60624+ help
60625+ This option allows you to enforce the number of seconds between
60626+ grsecurity log messages. The default should be suitable for most
60627+ people, however, if you choose to change it, choose a value small enough
60628+ to allow informative logs to be produced, but large enough to
60629+ prevent flooding.
60630+
60631+config GRKERNSEC_FLOODBURST
60632+ int "Number of messages in a burst (maximum)"
60633+ default 6
60634+ help
60635+ This option allows you to choose the maximum number of messages allowed
60636+ within the flood time interval you chose in a separate option. The
60637+ default should be suitable for most people, however if you find that
60638+ many of your logs are being interpreted as flooding, you may want to
60639+ raise this value.
60640+
60641+endmenu
60642diff --git a/grsecurity/Makefile b/grsecurity/Makefile
60643new file mode 100644
60644index 0000000..36845aa
60645--- /dev/null
60646+++ b/grsecurity/Makefile
60647@@ -0,0 +1,42 @@
60648+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
60649+# during 2001-2009 it has been completely redesigned by Brad Spengler
60650+# into an RBAC system
60651+#
60652+# All code in this directory and various hooks inserted throughout the kernel
60653+# are copyright Brad Spengler - Open Source Security, Inc., and released
60654+# under the GPL v2 or higher
60655+
60656+KBUILD_CFLAGS += -Werror
60657+
60658+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
60659+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
60660+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
60661+
60662+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
60663+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
60664+ gracl_learn.o grsec_log.o
60665+ifdef CONFIG_COMPAT
60666+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
60667+endif
60668+
60669+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
60670+
60671+ifdef CONFIG_NET
60672+obj-y += grsec_sock.o
60673+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
60674+endif
60675+
60676+ifndef CONFIG_GRKERNSEC
60677+obj-y += grsec_disabled.o
60678+endif
60679+
60680+ifdef CONFIG_GRKERNSEC_HIDESYM
60681+extra-y := grsec_hidesym.o
60682+$(obj)/grsec_hidesym.o:
60683+ @-chmod -f 500 /boot
60684+ @-chmod -f 500 /lib/modules
60685+ @-chmod -f 500 /lib64/modules
60686+ @-chmod -f 500 /lib32/modules
60687+ @-chmod -f 700 .
60688+ @echo ' grsec: protected kernel image paths'
60689+endif
60690diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
60691new file mode 100644
60692index 0000000..c0793fd
60693--- /dev/null
60694+++ b/grsecurity/gracl.c
60695@@ -0,0 +1,4178 @@
60696+#include <linux/kernel.h>
60697+#include <linux/module.h>
60698+#include <linux/sched.h>
60699+#include <linux/mm.h>
60700+#include <linux/file.h>
60701+#include <linux/fs.h>
60702+#include <linux/namei.h>
60703+#include <linux/mount.h>
60704+#include <linux/tty.h>
60705+#include <linux/proc_fs.h>
60706+#include <linux/lglock.h>
60707+#include <linux/slab.h>
60708+#include <linux/vmalloc.h>
60709+#include <linux/types.h>
60710+#include <linux/sysctl.h>
60711+#include <linux/netdevice.h>
60712+#include <linux/ptrace.h>
60713+#include <linux/gracl.h>
60714+#include <linux/gralloc.h>
60715+#include <linux/security.h>
60716+#include <linux/grinternal.h>
60717+#include <linux/pid_namespace.h>
60718+#include <linux/stop_machine.h>
60719+#include <linux/fdtable.h>
60720+#include <linux/percpu.h>
60721+#include <linux/lglock.h>
60722+#include <linux/hugetlb.h>
60723+#include <linux/posix-timers.h>
60724+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
60725+#include <linux/magic.h>
60726+#include <linux/pagemap.h>
60727+#include "../fs/btrfs/async-thread.h"
60728+#include "../fs/btrfs/ctree.h"
60729+#include "../fs/btrfs/btrfs_inode.h"
60730+#endif
60731+#include "../fs/mount.h"
60732+
60733+#include <asm/uaccess.h>
60734+#include <asm/errno.h>
60735+#include <asm/mman.h>
60736+
60737+extern struct lglock vfsmount_lock;
60738+
60739+static struct acl_role_db acl_role_set;
60740+static struct name_db name_set;
60741+static struct inodev_db inodev_set;
60742+
60743+/* for keeping track of userspace pointers used for subjects, so we
60744+ can share references in the kernel as well
60745+*/
60746+
60747+static struct path real_root;
60748+
60749+static struct acl_subj_map_db subj_map_set;
60750+
60751+static struct acl_role_label *default_role;
60752+
60753+static struct acl_role_label *role_list;
60754+
60755+static u16 acl_sp_role_value;
60756+
60757+extern char *gr_shared_page[4];
60758+static DEFINE_MUTEX(gr_dev_mutex);
60759+DEFINE_RWLOCK(gr_inode_lock);
60760+
60761+struct gr_arg *gr_usermode;
60762+
60763+static unsigned int gr_status __read_only = GR_STATUS_INIT;
60764+
60765+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
60766+extern void gr_clear_learn_entries(void);
60767+
60768+unsigned char *gr_system_salt;
60769+unsigned char *gr_system_sum;
60770+
60771+static struct sprole_pw **acl_special_roles = NULL;
60772+static __u16 num_sprole_pws = 0;
60773+
60774+static struct acl_role_label *kernel_role = NULL;
60775+
60776+static unsigned int gr_auth_attempts = 0;
60777+static unsigned long gr_auth_expires = 0UL;
60778+
60779+#ifdef CONFIG_NET
60780+extern struct vfsmount *sock_mnt;
60781+#endif
60782+
60783+extern struct vfsmount *pipe_mnt;
60784+extern struct vfsmount *shm_mnt;
60785+
60786+#ifdef CONFIG_HUGETLBFS
60787+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
60788+#endif
60789+
60790+static struct acl_object_label *fakefs_obj_rw;
60791+static struct acl_object_label *fakefs_obj_rwx;
60792+
60793+extern int gr_init_uidset(void);
60794+extern void gr_free_uidset(void);
60795+extern void gr_remove_uid(uid_t uid);
60796+extern int gr_find_uid(uid_t uid);
60797+
60798+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
60799+{
60800+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
60801+ return -EFAULT;
60802+
60803+ return 0;
60804+}
60805+
60806+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
60807+{
60808+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
60809+ return -EFAULT;
60810+
60811+ return 0;
60812+}
60813+
60814+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
60815+{
60816+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
60817+ return -EFAULT;
60818+
60819+ return 0;
60820+}
60821+
60822+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
60823+{
60824+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
60825+ return -EFAULT;
60826+
60827+ return 0;
60828+}
60829+
60830+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
60831+{
60832+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
60833+ return -EFAULT;
60834+
60835+ return 0;
60836+}
60837+
60838+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
60839+{
60840+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
60841+ return -EFAULT;
60842+
60843+ return 0;
60844+}
60845+
60846+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
60847+{
60848+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
60849+ return -EFAULT;
60850+
60851+ return 0;
60852+}
60853+
60854+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
60855+{
60856+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
60857+ return -EFAULT;
60858+
60859+ return 0;
60860+}
60861+
60862+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
60863+{
60864+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
60865+ return -EFAULT;
60866+
60867+ return 0;
60868+}
60869+
60870+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
60871+{
60872+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
60873+ return -EFAULT;
60874+
60875+ if ((uwrap->version != GRSECURITY_VERSION) || (uwrap->size != sizeof(struct gr_arg)))
60876+ return -EINVAL;
60877+
60878+ return 0;
60879+}
60880+
60881+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
60882+{
60883+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
60884+ return -EFAULT;
60885+
60886+ return 0;
60887+}
60888+
60889+static size_t get_gr_arg_wrapper_size_normal(void)
60890+{
60891+ return sizeof(struct gr_arg_wrapper);
60892+}
60893+
60894+#ifdef CONFIG_COMPAT
60895+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
60896+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
60897+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
60898+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
60899+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
60900+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
60901+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
60902+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
60903+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
60904+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
60905+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
60906+extern size_t get_gr_arg_wrapper_size_compat(void);
60907+
60908+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
60909+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
60910+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
60911+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
60912+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
60913+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
60914+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
60915+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
60916+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
60917+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
60918+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
60919+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
60920+
60921+#else
60922+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
60923+#define copy_gr_arg copy_gr_arg_normal
60924+#define copy_gr_hash_struct copy_gr_hash_struct_normal
60925+#define copy_acl_object_label copy_acl_object_label_normal
60926+#define copy_acl_subject_label copy_acl_subject_label_normal
60927+#define copy_acl_role_label copy_acl_role_label_normal
60928+#define copy_acl_ip_label copy_acl_ip_label_normal
60929+#define copy_pointer_from_array copy_pointer_from_array_normal
60930+#define copy_sprole_pw copy_sprole_pw_normal
60931+#define copy_role_transition copy_role_transition_normal
60932+#define copy_role_allowed_ip copy_role_allowed_ip_normal
60933+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
60934+#endif
60935+
60936+__inline__ int
60937+gr_acl_is_enabled(void)
60938+{
60939+ return (gr_status & GR_READY);
60940+}
60941+
60942+static inline dev_t __get_dev(const struct dentry *dentry)
60943+{
60944+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
60945+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
60946+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
60947+ else
60948+#endif
60949+ return dentry->d_sb->s_dev;
60950+}
60951+
60952+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
60953+{
60954+ return __get_dev(dentry);
60955+}
60956+
60957+static char gr_task_roletype_to_char(struct task_struct *task)
60958+{
60959+ switch (task->role->roletype &
60960+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
60961+ GR_ROLE_SPECIAL)) {
60962+ case GR_ROLE_DEFAULT:
60963+ return 'D';
60964+ case GR_ROLE_USER:
60965+ return 'U';
60966+ case GR_ROLE_GROUP:
60967+ return 'G';
60968+ case GR_ROLE_SPECIAL:
60969+ return 'S';
60970+ }
60971+
60972+ return 'X';
60973+}
60974+
60975+char gr_roletype_to_char(void)
60976+{
60977+ return gr_task_roletype_to_char(current);
60978+}
60979+
60980+__inline__ int
60981+gr_acl_tpe_check(void)
60982+{
60983+ if (unlikely(!(gr_status & GR_READY)))
60984+ return 0;
60985+ if (current->role->roletype & GR_ROLE_TPE)
60986+ return 1;
60987+ else
60988+ return 0;
60989+}
60990+
60991+int
60992+gr_handle_rawio(const struct inode *inode)
60993+{
60994+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
60995+ if (inode && S_ISBLK(inode->i_mode) &&
60996+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
60997+ !capable(CAP_SYS_RAWIO))
60998+ return 1;
60999+#endif
61000+ return 0;
61001+}
61002+
61003+static int
61004+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
61005+{
61006+ if (likely(lena != lenb))
61007+ return 0;
61008+
61009+ return !memcmp(a, b, lena);
61010+}
61011+
61012+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
61013+{
61014+ *buflen -= namelen;
61015+ if (*buflen < 0)
61016+ return -ENAMETOOLONG;
61017+ *buffer -= namelen;
61018+ memcpy(*buffer, str, namelen);
61019+ return 0;
61020+}
61021+
61022+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
61023+{
61024+ return prepend(buffer, buflen, name->name, name->len);
61025+}
61026+
61027+static int prepend_path(const struct path *path, struct path *root,
61028+ char **buffer, int *buflen)
61029+{
61030+ struct dentry *dentry = path->dentry;
61031+ struct vfsmount *vfsmnt = path->mnt;
61032+ struct mount *mnt = real_mount(vfsmnt);
61033+ bool slash = false;
61034+ int error = 0;
61035+
61036+ while (dentry != root->dentry || vfsmnt != root->mnt) {
61037+ struct dentry * parent;
61038+
61039+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
61040+ /* Global root? */
61041+ if (!mnt_has_parent(mnt)) {
61042+ goto out;
61043+ }
61044+ dentry = mnt->mnt_mountpoint;
61045+ mnt = mnt->mnt_parent;
61046+ vfsmnt = &mnt->mnt;
61047+ continue;
61048+ }
61049+ parent = dentry->d_parent;
61050+ prefetch(parent);
61051+ spin_lock(&dentry->d_lock);
61052+ error = prepend_name(buffer, buflen, &dentry->d_name);
61053+ spin_unlock(&dentry->d_lock);
61054+ if (!error)
61055+ error = prepend(buffer, buflen, "/", 1);
61056+ if (error)
61057+ break;
61058+
61059+ slash = true;
61060+ dentry = parent;
61061+ }
61062+
61063+out:
61064+ if (!error && !slash)
61065+ error = prepend(buffer, buflen, "/", 1);
61066+
61067+ return error;
61068+}
61069+
61070+/* this must be called with vfsmount_lock and rename_lock held */
61071+
61072+static char *__our_d_path(const struct path *path, struct path *root,
61073+ char *buf, int buflen)
61074+{
61075+ char *res = buf + buflen;
61076+ int error;
61077+
61078+ prepend(&res, &buflen, "\0", 1);
61079+ error = prepend_path(path, root, &res, &buflen);
61080+ if (error)
61081+ return ERR_PTR(error);
61082+
61083+ return res;
61084+}
61085+
61086+static char *
61087+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
61088+{
61089+ char *retval;
61090+
61091+ retval = __our_d_path(path, root, buf, buflen);
61092+ if (unlikely(IS_ERR(retval)))
61093+ retval = strcpy(buf, "<path too long>");
61094+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
61095+ retval[1] = '\0';
61096+
61097+ return retval;
61098+}
61099+
61100+static char *
61101+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
61102+ char *buf, int buflen)
61103+{
61104+ struct path path;
61105+ char *res;
61106+
61107+ path.dentry = (struct dentry *)dentry;
61108+ path.mnt = (struct vfsmount *)vfsmnt;
61109+
61110+ /* we can use real_root.dentry, real_root.mnt, because this is only called
61111+ by the RBAC system */
61112+ res = gen_full_path(&path, &real_root, buf, buflen);
61113+
61114+ return res;
61115+}
61116+
61117+static char *
61118+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
61119+ char *buf, int buflen)
61120+{
61121+ char *res;
61122+ struct path path;
61123+ struct path root;
61124+ struct task_struct *reaper = init_pid_ns.child_reaper;
61125+
61126+ path.dentry = (struct dentry *)dentry;
61127+ path.mnt = (struct vfsmount *)vfsmnt;
61128+
61129+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
61130+ get_fs_root(reaper->fs, &root);
61131+
61132+ br_read_lock(&vfsmount_lock);
61133+ write_seqlock(&rename_lock);
61134+ res = gen_full_path(&path, &root, buf, buflen);
61135+ write_sequnlock(&rename_lock);
61136+ br_read_unlock(&vfsmount_lock);
61137+
61138+ path_put(&root);
61139+ return res;
61140+}
61141+
61142+static char *
61143+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
61144+{
61145+ char *ret;
61146+ br_read_lock(&vfsmount_lock);
61147+ write_seqlock(&rename_lock);
61148+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
61149+ PAGE_SIZE);
61150+ write_sequnlock(&rename_lock);
61151+ br_read_unlock(&vfsmount_lock);
61152+ return ret;
61153+}
61154+
61155+static char *
61156+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
61157+{
61158+ char *ret;
61159+ char *buf;
61160+ int buflen;
61161+
61162+ br_read_lock(&vfsmount_lock);
61163+ write_seqlock(&rename_lock);
61164+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
61165+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
61166+ buflen = (int)(ret - buf);
61167+ if (buflen >= 5)
61168+ prepend(&ret, &buflen, "/proc", 5);
61169+ else
61170+ ret = strcpy(buf, "<path too long>");
61171+ write_sequnlock(&rename_lock);
61172+ br_read_unlock(&vfsmount_lock);
61173+ return ret;
61174+}
61175+
61176+char *
61177+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
61178+{
61179+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
61180+ PAGE_SIZE);
61181+}
61182+
61183+char *
61184+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
61185+{
61186+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
61187+ PAGE_SIZE);
61188+}
61189+
61190+char *
61191+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
61192+{
61193+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
61194+ PAGE_SIZE);
61195+}
61196+
61197+char *
61198+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
61199+{
61200+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
61201+ PAGE_SIZE);
61202+}
61203+
61204+char *
61205+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
61206+{
61207+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
61208+ PAGE_SIZE);
61209+}
61210+
61211+__inline__ __u32
61212+to_gr_audit(const __u32 reqmode)
61213+{
61214+ /* masks off auditable permission flags, then shifts them to create
61215+ auditing flags, and adds the special case of append auditing if
61216+ we're requesting write */
61217+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
61218+}
61219+
61220+struct acl_subject_label *
61221+lookup_subject_map(const struct acl_subject_label *userp)
61222+{
61223+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
61224+ struct subject_map *match;
61225+
61226+ match = subj_map_set.s_hash[index];
61227+
61228+ while (match && match->user != userp)
61229+ match = match->next;
61230+
61231+ if (match != NULL)
61232+ return match->kernel;
61233+ else
61234+ return NULL;
61235+}
61236+
61237+static void
61238+insert_subj_map_entry(struct subject_map *subjmap)
61239+{
61240+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
61241+ struct subject_map **curr;
61242+
61243+ subjmap->prev = NULL;
61244+
61245+ curr = &subj_map_set.s_hash[index];
61246+ if (*curr != NULL)
61247+ (*curr)->prev = subjmap;
61248+
61249+ subjmap->next = *curr;
61250+ *curr = subjmap;
61251+
61252+ return;
61253+}
61254+
61255+static struct acl_role_label *
61256+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
61257+ const gid_t gid)
61258+{
61259+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
61260+ struct acl_role_label *match;
61261+ struct role_allowed_ip *ipp;
61262+ unsigned int x;
61263+ u32 curr_ip = task->signal->curr_ip;
61264+
61265+ task->signal->saved_ip = curr_ip;
61266+
61267+ match = acl_role_set.r_hash[index];
61268+
61269+ while (match) {
61270+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
61271+ for (x = 0; x < match->domain_child_num; x++) {
61272+ if (match->domain_children[x] == uid)
61273+ goto found;
61274+ }
61275+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
61276+ break;
61277+ match = match->next;
61278+ }
61279+found:
61280+ if (match == NULL) {
61281+ try_group:
61282+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
61283+ match = acl_role_set.r_hash[index];
61284+
61285+ while (match) {
61286+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
61287+ for (x = 0; x < match->domain_child_num; x++) {
61288+ if (match->domain_children[x] == gid)
61289+ goto found2;
61290+ }
61291+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
61292+ break;
61293+ match = match->next;
61294+ }
61295+found2:
61296+ if (match == NULL)
61297+ match = default_role;
61298+ if (match->allowed_ips == NULL)
61299+ return match;
61300+ else {
61301+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
61302+ if (likely
61303+ ((ntohl(curr_ip) & ipp->netmask) ==
61304+ (ntohl(ipp->addr) & ipp->netmask)))
61305+ return match;
61306+ }
61307+ match = default_role;
61308+ }
61309+ } else if (match->allowed_ips == NULL) {
61310+ return match;
61311+ } else {
61312+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
61313+ if (likely
61314+ ((ntohl(curr_ip) & ipp->netmask) ==
61315+ (ntohl(ipp->addr) & ipp->netmask)))
61316+ return match;
61317+ }
61318+ goto try_group;
61319+ }
61320+
61321+ return match;
61322+}
61323+
61324+struct acl_subject_label *
61325+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
61326+ const struct acl_role_label *role)
61327+{
61328+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
61329+ struct acl_subject_label *match;
61330+
61331+ match = role->subj_hash[index];
61332+
61333+ while (match && (match->inode != ino || match->device != dev ||
61334+ (match->mode & GR_DELETED))) {
61335+ match = match->next;
61336+ }
61337+
61338+ if (match && !(match->mode & GR_DELETED))
61339+ return match;
61340+ else
61341+ return NULL;
61342+}
61343+
61344+struct acl_subject_label *
61345+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
61346+ const struct acl_role_label *role)
61347+{
61348+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
61349+ struct acl_subject_label *match;
61350+
61351+ match = role->subj_hash[index];
61352+
61353+ while (match && (match->inode != ino || match->device != dev ||
61354+ !(match->mode & GR_DELETED))) {
61355+ match = match->next;
61356+ }
61357+
61358+ if (match && (match->mode & GR_DELETED))
61359+ return match;
61360+ else
61361+ return NULL;
61362+}
61363+
61364+static struct acl_object_label *
61365+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
61366+ const struct acl_subject_label *subj)
61367+{
61368+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
61369+ struct acl_object_label *match;
61370+
61371+ match = subj->obj_hash[index];
61372+
61373+ while (match && (match->inode != ino || match->device != dev ||
61374+ (match->mode & GR_DELETED))) {
61375+ match = match->next;
61376+ }
61377+
61378+ if (match && !(match->mode & GR_DELETED))
61379+ return match;
61380+ else
61381+ return NULL;
61382+}
61383+
61384+static struct acl_object_label *
61385+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
61386+ const struct acl_subject_label *subj)
61387+{
61388+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
61389+ struct acl_object_label *match;
61390+
61391+ match = subj->obj_hash[index];
61392+
61393+ while (match && (match->inode != ino || match->device != dev ||
61394+ !(match->mode & GR_DELETED))) {
61395+ match = match->next;
61396+ }
61397+
61398+ if (match && (match->mode & GR_DELETED))
61399+ return match;
61400+
61401+ match = subj->obj_hash[index];
61402+
61403+ while (match && (match->inode != ino || match->device != dev ||
61404+ (match->mode & GR_DELETED))) {
61405+ match = match->next;
61406+ }
61407+
61408+ if (match && !(match->mode & GR_DELETED))
61409+ return match;
61410+ else
61411+ return NULL;
61412+}
61413+
61414+static struct name_entry *
61415+lookup_name_entry(const char *name)
61416+{
61417+ unsigned int len = strlen(name);
61418+ unsigned int key = full_name_hash(name, len);
61419+ unsigned int index = key % name_set.n_size;
61420+ struct name_entry *match;
61421+
61422+ match = name_set.n_hash[index];
61423+
61424+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
61425+ match = match->next;
61426+
61427+ return match;
61428+}
61429+
61430+static struct name_entry *
61431+lookup_name_entry_create(const char *name)
61432+{
61433+ unsigned int len = strlen(name);
61434+ unsigned int key = full_name_hash(name, len);
61435+ unsigned int index = key % name_set.n_size;
61436+ struct name_entry *match;
61437+
61438+ match = name_set.n_hash[index];
61439+
61440+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
61441+ !match->deleted))
61442+ match = match->next;
61443+
61444+ if (match && match->deleted)
61445+ return match;
61446+
61447+ match = name_set.n_hash[index];
61448+
61449+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
61450+ match->deleted))
61451+ match = match->next;
61452+
61453+ if (match && !match->deleted)
61454+ return match;
61455+ else
61456+ return NULL;
61457+}
61458+
61459+static struct inodev_entry *
61460+lookup_inodev_entry(const ino_t ino, const dev_t dev)
61461+{
61462+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
61463+ struct inodev_entry *match;
61464+
61465+ match = inodev_set.i_hash[index];
61466+
61467+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
61468+ match = match->next;
61469+
61470+ return match;
61471+}
61472+
61473+static void
61474+insert_inodev_entry(struct inodev_entry *entry)
61475+{
61476+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
61477+ inodev_set.i_size);
61478+ struct inodev_entry **curr;
61479+
61480+ entry->prev = NULL;
61481+
61482+ curr = &inodev_set.i_hash[index];
61483+ if (*curr != NULL)
61484+ (*curr)->prev = entry;
61485+
61486+ entry->next = *curr;
61487+ *curr = entry;
61488+
61489+ return;
61490+}
61491+
61492+static void
61493+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
61494+{
61495+ unsigned int index =
61496+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
61497+ struct acl_role_label **curr;
61498+ struct acl_role_label *tmp, *tmp2;
61499+
61500+ curr = &acl_role_set.r_hash[index];
61501+
61502+ /* simple case, slot is empty, just set it to our role */
61503+ if (*curr == NULL) {
61504+ *curr = role;
61505+ } else {
61506+ /* example:
61507+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
61508+ 2 -> 3
61509+ */
61510+ /* first check to see if we can already be reached via this slot */
61511+ tmp = *curr;
61512+ while (tmp && tmp != role)
61513+ tmp = tmp->next;
61514+ if (tmp == role) {
61515+ /* we don't need to add ourselves to this slot's chain */
61516+ return;
61517+ }
61518+ /* we need to add ourselves to this chain, two cases */
61519+ if (role->next == NULL) {
61520+ /* simple case, append the current chain to our role */
61521+ role->next = *curr;
61522+ *curr = role;
61523+ } else {
61524+ /* 1 -> 2 -> 3 -> 4
61525+ 2 -> 3 -> 4
61526+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
61527+ */
61528+ /* trickier case: walk our role's chain until we find
61529+ the role for the start of the current slot's chain */
61530+ tmp = role;
61531+ tmp2 = *curr;
61532+ while (tmp->next && tmp->next != tmp2)
61533+ tmp = tmp->next;
61534+ if (tmp->next == tmp2) {
61535+ /* from example above, we found 3, so just
61536+ replace this slot's chain with ours */
61537+ *curr = role;
61538+ } else {
61539+ /* we didn't find a subset of our role's chain
61540+ in the current slot's chain, so append their
61541+ chain to ours, and set us as the first role in
61542+ the slot's chain
61543+
61544+ we could fold this case with the case above,
61545+ but making it explicit for clarity
61546+ */
61547+ tmp->next = tmp2;
61548+ *curr = role;
61549+ }
61550+ }
61551+ }
61552+
61553+ return;
61554+}
61555+
61556+static void
61557+insert_acl_role_label(struct acl_role_label *role)
61558+{
61559+ int i;
61560+
61561+ if (role_list == NULL) {
61562+ role_list = role;
61563+ role->prev = NULL;
61564+ } else {
61565+ role->prev = role_list;
61566+ role_list = role;
61567+ }
61568+
61569+ /* used for hash chains */
61570+ role->next = NULL;
61571+
61572+ if (role->roletype & GR_ROLE_DOMAIN) {
61573+ for (i = 0; i < role->domain_child_num; i++)
61574+ __insert_acl_role_label(role, role->domain_children[i]);
61575+ } else
61576+ __insert_acl_role_label(role, role->uidgid);
61577+}
61578+
61579+static int
61580+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
61581+{
61582+ struct name_entry **curr, *nentry;
61583+ struct inodev_entry *ientry;
61584+ unsigned int len = strlen(name);
61585+ unsigned int key = full_name_hash(name, len);
61586+ unsigned int index = key % name_set.n_size;
61587+
61588+ curr = &name_set.n_hash[index];
61589+
61590+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
61591+ curr = &((*curr)->next);
61592+
61593+ if (*curr != NULL)
61594+ return 1;
61595+
61596+ nentry = acl_alloc(sizeof (struct name_entry));
61597+ if (nentry == NULL)
61598+ return 0;
61599+ ientry = acl_alloc(sizeof (struct inodev_entry));
61600+ if (ientry == NULL)
61601+ return 0;
61602+ ientry->nentry = nentry;
61603+
61604+ nentry->key = key;
61605+ nentry->name = name;
61606+ nentry->inode = inode;
61607+ nentry->device = device;
61608+ nentry->len = len;
61609+ nentry->deleted = deleted;
61610+
61611+ nentry->prev = NULL;
61612+ curr = &name_set.n_hash[index];
61613+ if (*curr != NULL)
61614+ (*curr)->prev = nentry;
61615+ nentry->next = *curr;
61616+ *curr = nentry;
61617+
61618+ /* insert us into the table searchable by inode/dev */
61619+ insert_inodev_entry(ientry);
61620+
61621+ return 1;
61622+}
61623+
61624+static void
61625+insert_acl_obj_label(struct acl_object_label *obj,
61626+ struct acl_subject_label *subj)
61627+{
61628+ unsigned int index =
61629+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
61630+ struct acl_object_label **curr;
61631+
61632+
61633+ obj->prev = NULL;
61634+
61635+ curr = &subj->obj_hash[index];
61636+ if (*curr != NULL)
61637+ (*curr)->prev = obj;
61638+
61639+ obj->next = *curr;
61640+ *curr = obj;
61641+
61642+ return;
61643+}
61644+
61645+static void
61646+insert_acl_subj_label(struct acl_subject_label *obj,
61647+ struct acl_role_label *role)
61648+{
61649+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
61650+ struct acl_subject_label **curr;
61651+
61652+ obj->prev = NULL;
61653+
61654+ curr = &role->subj_hash[index];
61655+ if (*curr != NULL)
61656+ (*curr)->prev = obj;
61657+
61658+ obj->next = *curr;
61659+ *curr = obj;
61660+
61661+ return;
61662+}
61663+
61664+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
61665+
61666+static void *
61667+create_table(__u32 * len, int elementsize)
61668+{
61669+ unsigned int table_sizes[] = {
61670+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
61671+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
61672+ 4194301, 8388593, 16777213, 33554393, 67108859
61673+ };
61674+ void *newtable = NULL;
61675+ unsigned int pwr = 0;
61676+
61677+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
61678+ table_sizes[pwr] <= *len)
61679+ pwr++;
61680+
61681+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
61682+ return newtable;
61683+
61684+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
61685+ newtable =
61686+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
61687+ else
61688+ newtable = vmalloc(table_sizes[pwr] * elementsize);
61689+
61690+ *len = table_sizes[pwr];
61691+
61692+ return newtable;
61693+}
61694+
61695+static int
61696+init_variables(const struct gr_arg *arg)
61697+{
61698+ struct task_struct *reaper = init_pid_ns.child_reaper;
61699+ unsigned int stacksize;
61700+
61701+ subj_map_set.s_size = arg->role_db.num_subjects;
61702+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
61703+ name_set.n_size = arg->role_db.num_objects;
61704+ inodev_set.i_size = arg->role_db.num_objects;
61705+
61706+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
61707+ !name_set.n_size || !inodev_set.i_size)
61708+ return 1;
61709+
61710+ if (!gr_init_uidset())
61711+ return 1;
61712+
61713+ /* set up the stack that holds allocation info */
61714+
61715+ stacksize = arg->role_db.num_pointers + 5;
61716+
61717+ if (!acl_alloc_stack_init(stacksize))
61718+ return 1;
61719+
61720+ /* grab reference for the real root dentry and vfsmount */
61721+ get_fs_root(reaper->fs, &real_root);
61722+
61723+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61724+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
61725+#endif
61726+
61727+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
61728+ if (fakefs_obj_rw == NULL)
61729+ return 1;
61730+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
61731+
61732+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
61733+ if (fakefs_obj_rwx == NULL)
61734+ return 1;
61735+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
61736+
61737+ subj_map_set.s_hash =
61738+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
61739+ acl_role_set.r_hash =
61740+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
61741+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
61742+ inodev_set.i_hash =
61743+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
61744+
61745+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
61746+ !name_set.n_hash || !inodev_set.i_hash)
61747+ return 1;
61748+
61749+ memset(subj_map_set.s_hash, 0,
61750+ sizeof(struct subject_map *) * subj_map_set.s_size);
61751+ memset(acl_role_set.r_hash, 0,
61752+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
61753+ memset(name_set.n_hash, 0,
61754+ sizeof (struct name_entry *) * name_set.n_size);
61755+ memset(inodev_set.i_hash, 0,
61756+ sizeof (struct inodev_entry *) * inodev_set.i_size);
61757+
61758+ return 0;
61759+}
61760+
61761+/* free information not needed after startup
61762+ currently contains user->kernel pointer mappings for subjects
61763+*/
61764+
61765+static void
61766+free_init_variables(void)
61767+{
61768+ __u32 i;
61769+
61770+ if (subj_map_set.s_hash) {
61771+ for (i = 0; i < subj_map_set.s_size; i++) {
61772+ if (subj_map_set.s_hash[i]) {
61773+ kfree(subj_map_set.s_hash[i]);
61774+ subj_map_set.s_hash[i] = NULL;
61775+ }
61776+ }
61777+
61778+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
61779+ PAGE_SIZE)
61780+ kfree(subj_map_set.s_hash);
61781+ else
61782+ vfree(subj_map_set.s_hash);
61783+ }
61784+
61785+ return;
61786+}
61787+
61788+static void
61789+free_variables(void)
61790+{
61791+ struct acl_subject_label *s;
61792+ struct acl_role_label *r;
61793+ struct task_struct *task, *task2;
61794+ unsigned int x;
61795+
61796+ gr_clear_learn_entries();
61797+
61798+ read_lock(&tasklist_lock);
61799+ do_each_thread(task2, task) {
61800+ task->acl_sp_role = 0;
61801+ task->acl_role_id = 0;
61802+ task->acl = NULL;
61803+ task->role = NULL;
61804+ } while_each_thread(task2, task);
61805+ read_unlock(&tasklist_lock);
61806+
61807+ /* release the reference to the real root dentry and vfsmount */
61808+ path_put(&real_root);
61809+ memset(&real_root, 0, sizeof(real_root));
61810+
61811+ /* free all object hash tables */
61812+
61813+ FOR_EACH_ROLE_START(r)
61814+ if (r->subj_hash == NULL)
61815+ goto next_role;
61816+ FOR_EACH_SUBJECT_START(r, s, x)
61817+ if (s->obj_hash == NULL)
61818+ break;
61819+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
61820+ kfree(s->obj_hash);
61821+ else
61822+ vfree(s->obj_hash);
61823+ FOR_EACH_SUBJECT_END(s, x)
61824+ FOR_EACH_NESTED_SUBJECT_START(r, s)
61825+ if (s->obj_hash == NULL)
61826+ break;
61827+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
61828+ kfree(s->obj_hash);
61829+ else
61830+ vfree(s->obj_hash);
61831+ FOR_EACH_NESTED_SUBJECT_END(s)
61832+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
61833+ kfree(r->subj_hash);
61834+ else
61835+ vfree(r->subj_hash);
61836+ r->subj_hash = NULL;
61837+next_role:
61838+ FOR_EACH_ROLE_END(r)
61839+
61840+ acl_free_all();
61841+
61842+ if (acl_role_set.r_hash) {
61843+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
61844+ PAGE_SIZE)
61845+ kfree(acl_role_set.r_hash);
61846+ else
61847+ vfree(acl_role_set.r_hash);
61848+ }
61849+ if (name_set.n_hash) {
61850+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
61851+ PAGE_SIZE)
61852+ kfree(name_set.n_hash);
61853+ else
61854+ vfree(name_set.n_hash);
61855+ }
61856+
61857+ if (inodev_set.i_hash) {
61858+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
61859+ PAGE_SIZE)
61860+ kfree(inodev_set.i_hash);
61861+ else
61862+ vfree(inodev_set.i_hash);
61863+ }
61864+
61865+ gr_free_uidset();
61866+
61867+ memset(&name_set, 0, sizeof (struct name_db));
61868+ memset(&inodev_set, 0, sizeof (struct inodev_db));
61869+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
61870+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
61871+
61872+ default_role = NULL;
61873+ kernel_role = NULL;
61874+ role_list = NULL;
61875+
61876+ return;
61877+}
61878+
61879+static struct acl_subject_label *
61880+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
61881+
61882+static int alloc_and_copy_string(char **name, unsigned int maxlen)
61883+{
61884+ unsigned int len = strnlen_user(*name, maxlen);
61885+ char *tmp;
61886+
61887+ if (!len || len >= maxlen)
61888+ return -EINVAL;
61889+
61890+ if ((tmp = (char *) acl_alloc(len)) == NULL)
61891+ return -ENOMEM;
61892+
61893+ if (copy_from_user(tmp, *name, len))
61894+ return -EFAULT;
61895+
61896+ tmp[len-1] = '\0';
61897+ *name = tmp;
61898+
61899+ return 0;
61900+}
61901+
61902+static int
61903+copy_user_glob(struct acl_object_label *obj)
61904+{
61905+ struct acl_object_label *g_tmp, **guser;
61906+ int error;
61907+
61908+ if (obj->globbed == NULL)
61909+ return 0;
61910+
61911+ guser = &obj->globbed;
61912+ while (*guser) {
61913+ g_tmp = (struct acl_object_label *)
61914+ acl_alloc(sizeof (struct acl_object_label));
61915+ if (g_tmp == NULL)
61916+ return -ENOMEM;
61917+
61918+ if (copy_acl_object_label(g_tmp, *guser))
61919+ return -EFAULT;
61920+
61921+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
61922+ if (error)
61923+ return error;
61924+
61925+ *guser = g_tmp;
61926+ guser = &(g_tmp->next);
61927+ }
61928+
61929+ return 0;
61930+}
61931+
61932+static int
61933+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
61934+ struct acl_role_label *role)
61935+{
61936+ struct acl_object_label *o_tmp;
61937+ int ret;
61938+
61939+ while (userp) {
61940+ if ((o_tmp = (struct acl_object_label *)
61941+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
61942+ return -ENOMEM;
61943+
61944+ if (copy_acl_object_label(o_tmp, userp))
61945+ return -EFAULT;
61946+
61947+ userp = o_tmp->prev;
61948+
61949+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
61950+ if (ret)
61951+ return ret;
61952+
61953+ insert_acl_obj_label(o_tmp, subj);
61954+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
61955+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
61956+ return -ENOMEM;
61957+
61958+ ret = copy_user_glob(o_tmp);
61959+ if (ret)
61960+ return ret;
61961+
61962+ if (o_tmp->nested) {
61963+ int already_copied;
61964+
61965+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
61966+ if (IS_ERR(o_tmp->nested))
61967+ return PTR_ERR(o_tmp->nested);
61968+
61969+ /* insert into nested subject list if we haven't copied this one yet
61970+ to prevent duplicate entries */
61971+ if (!already_copied) {
61972+ o_tmp->nested->next = role->hash->first;
61973+ role->hash->first = o_tmp->nested;
61974+ }
61975+ }
61976+ }
61977+
61978+ return 0;
61979+}
61980+
61981+static __u32
61982+count_user_subjs(struct acl_subject_label *userp)
61983+{
61984+ struct acl_subject_label s_tmp;
61985+ __u32 num = 0;
61986+
61987+ while (userp) {
61988+ if (copy_acl_subject_label(&s_tmp, userp))
61989+ break;
61990+
61991+ userp = s_tmp.prev;
61992+ }
61993+
61994+ return num;
61995+}
61996+
61997+static int
61998+copy_user_allowedips(struct acl_role_label *rolep)
61999+{
62000+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
62001+
62002+ ruserip = rolep->allowed_ips;
62003+
62004+ while (ruserip) {
62005+ rlast = rtmp;
62006+
62007+ if ((rtmp = (struct role_allowed_ip *)
62008+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
62009+ return -ENOMEM;
62010+
62011+ if (copy_role_allowed_ip(rtmp, ruserip))
62012+ return -EFAULT;
62013+
62014+ ruserip = rtmp->prev;
62015+
62016+ if (!rlast) {
62017+ rtmp->prev = NULL;
62018+ rolep->allowed_ips = rtmp;
62019+ } else {
62020+ rlast->next = rtmp;
62021+ rtmp->prev = rlast;
62022+ }
62023+
62024+ if (!ruserip)
62025+ rtmp->next = NULL;
62026+ }
62027+
62028+ return 0;
62029+}
62030+
62031+static int
62032+copy_user_transitions(struct acl_role_label *rolep)
62033+{
62034+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
62035+ int error;
62036+
62037+ rusertp = rolep->transitions;
62038+
62039+ while (rusertp) {
62040+ rlast = rtmp;
62041+
62042+ if ((rtmp = (struct role_transition *)
62043+ acl_alloc(sizeof (struct role_transition))) == NULL)
62044+ return -ENOMEM;
62045+
62046+ if (copy_role_transition(rtmp, rusertp))
62047+ return -EFAULT;
62048+
62049+ rusertp = rtmp->prev;
62050+
62051+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
62052+ if (error)
62053+ return error;
62054+
62055+ if (!rlast) {
62056+ rtmp->prev = NULL;
62057+ rolep->transitions = rtmp;
62058+ } else {
62059+ rlast->next = rtmp;
62060+ rtmp->prev = rlast;
62061+ }
62062+
62063+ if (!rusertp)
62064+ rtmp->next = NULL;
62065+ }
62066+
62067+ return 0;
62068+}
62069+
62070+static __u32 count_user_objs(const struct acl_object_label __user *userp)
62071+{
62072+ struct acl_object_label o_tmp;
62073+ __u32 num = 0;
62074+
62075+ while (userp) {
62076+ if (copy_acl_object_label(&o_tmp, userp))
62077+ break;
62078+
62079+ userp = o_tmp.prev;
62080+ num++;
62081+ }
62082+
62083+ return num;
62084+}
62085+
62086+static struct acl_subject_label *
62087+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
62088+{
62089+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
62090+ __u32 num_objs;
62091+ struct acl_ip_label **i_tmp, *i_utmp2;
62092+ struct gr_hash_struct ghash;
62093+ struct subject_map *subjmap;
62094+ unsigned int i_num;
62095+ int err;
62096+
62097+ if (already_copied != NULL)
62098+ *already_copied = 0;
62099+
62100+ s_tmp = lookup_subject_map(userp);
62101+
62102+ /* we've already copied this subject into the kernel, just return
62103+ the reference to it, and don't copy it over again
62104+ */
62105+ if (s_tmp) {
62106+ if (already_copied != NULL)
62107+ *already_copied = 1;
62108+ return(s_tmp);
62109+ }
62110+
62111+ if ((s_tmp = (struct acl_subject_label *)
62112+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
62113+ return ERR_PTR(-ENOMEM);
62114+
62115+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
62116+ if (subjmap == NULL)
62117+ return ERR_PTR(-ENOMEM);
62118+
62119+ subjmap->user = userp;
62120+ subjmap->kernel = s_tmp;
62121+ insert_subj_map_entry(subjmap);
62122+
62123+ if (copy_acl_subject_label(s_tmp, userp))
62124+ return ERR_PTR(-EFAULT);
62125+
62126+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
62127+ if (err)
62128+ return ERR_PTR(err);
62129+
62130+ if (!strcmp(s_tmp->filename, "/"))
62131+ role->root_label = s_tmp;
62132+
62133+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
62134+ return ERR_PTR(-EFAULT);
62135+
62136+ /* copy user and group transition tables */
62137+
62138+ if (s_tmp->user_trans_num) {
62139+ uid_t *uidlist;
62140+
62141+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
62142+ if (uidlist == NULL)
62143+ return ERR_PTR(-ENOMEM);
62144+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
62145+ return ERR_PTR(-EFAULT);
62146+
62147+ s_tmp->user_transitions = uidlist;
62148+ }
62149+
62150+ if (s_tmp->group_trans_num) {
62151+ gid_t *gidlist;
62152+
62153+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
62154+ if (gidlist == NULL)
62155+ return ERR_PTR(-ENOMEM);
62156+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
62157+ return ERR_PTR(-EFAULT);
62158+
62159+ s_tmp->group_transitions = gidlist;
62160+ }
62161+
62162+ /* set up object hash table */
62163+ num_objs = count_user_objs(ghash.first);
62164+
62165+ s_tmp->obj_hash_size = num_objs;
62166+ s_tmp->obj_hash =
62167+ (struct acl_object_label **)
62168+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
62169+
62170+ if (!s_tmp->obj_hash)
62171+ return ERR_PTR(-ENOMEM);
62172+
62173+ memset(s_tmp->obj_hash, 0,
62174+ s_tmp->obj_hash_size *
62175+ sizeof (struct acl_object_label *));
62176+
62177+ /* add in objects */
62178+ err = copy_user_objs(ghash.first, s_tmp, role);
62179+
62180+ if (err)
62181+ return ERR_PTR(err);
62182+
62183+ /* set pointer for parent subject */
62184+ if (s_tmp->parent_subject) {
62185+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
62186+
62187+ if (IS_ERR(s_tmp2))
62188+ return s_tmp2;
62189+
62190+ s_tmp->parent_subject = s_tmp2;
62191+ }
62192+
62193+ /* add in ip acls */
62194+
62195+ if (!s_tmp->ip_num) {
62196+ s_tmp->ips = NULL;
62197+ goto insert;
62198+ }
62199+
62200+ i_tmp =
62201+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
62202+ sizeof (struct acl_ip_label *));
62203+
62204+ if (!i_tmp)
62205+ return ERR_PTR(-ENOMEM);
62206+
62207+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
62208+ *(i_tmp + i_num) =
62209+ (struct acl_ip_label *)
62210+ acl_alloc(sizeof (struct acl_ip_label));
62211+ if (!*(i_tmp + i_num))
62212+ return ERR_PTR(-ENOMEM);
62213+
62214+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
62215+ return ERR_PTR(-EFAULT);
62216+
62217+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
62218+ return ERR_PTR(-EFAULT);
62219+
62220+ if ((*(i_tmp + i_num))->iface == NULL)
62221+ continue;
62222+
62223+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
62224+ if (err)
62225+ return ERR_PTR(err);
62226+ }
62227+
62228+ s_tmp->ips = i_tmp;
62229+
62230+insert:
62231+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
62232+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
62233+ return ERR_PTR(-ENOMEM);
62234+
62235+ return s_tmp;
62236+}
62237+
62238+static int
62239+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
62240+{
62241+ struct acl_subject_label s_pre;
62242+ struct acl_subject_label * ret;
62243+ int err;
62244+
62245+ while (userp) {
62246+ if (copy_acl_subject_label(&s_pre, userp))
62247+ return -EFAULT;
62248+
62249+ ret = do_copy_user_subj(userp, role, NULL);
62250+
62251+ err = PTR_ERR(ret);
62252+ if (IS_ERR(ret))
62253+ return err;
62254+
62255+ insert_acl_subj_label(ret, role);
62256+
62257+ userp = s_pre.prev;
62258+ }
62259+
62260+ return 0;
62261+}
62262+
62263+static int
62264+copy_user_acl(struct gr_arg *arg)
62265+{
62266+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
62267+ struct acl_subject_label *subj_list;
62268+ struct sprole_pw *sptmp;
62269+ struct gr_hash_struct *ghash;
62270+ uid_t *domainlist;
62271+ unsigned int r_num;
62272+ int err = 0;
62273+ __u16 i;
62274+ __u32 num_subjs;
62275+
62276+ /* we need a default and kernel role */
62277+ if (arg->role_db.num_roles < 2)
62278+ return -EINVAL;
62279+
62280+ /* copy special role authentication info from userspace */
62281+
62282+ num_sprole_pws = arg->num_sprole_pws;
62283+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
62284+
62285+ if (!acl_special_roles && num_sprole_pws)
62286+ return -ENOMEM;
62287+
62288+ for (i = 0; i < num_sprole_pws; i++) {
62289+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
62290+ if (!sptmp)
62291+ return -ENOMEM;
62292+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
62293+ return -EFAULT;
62294+
62295+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
62296+ if (err)
62297+ return err;
62298+
62299+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
62300+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
62301+#endif
62302+
62303+ acl_special_roles[i] = sptmp;
62304+ }
62305+
62306+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
62307+
62308+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
62309+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
62310+
62311+ if (!r_tmp)
62312+ return -ENOMEM;
62313+
62314+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
62315+ return -EFAULT;
62316+
62317+ if (copy_acl_role_label(r_tmp, r_utmp2))
62318+ return -EFAULT;
62319+
62320+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
62321+ if (err)
62322+ return err;
62323+
62324+ if (!strcmp(r_tmp->rolename, "default")
62325+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
62326+ default_role = r_tmp;
62327+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
62328+ kernel_role = r_tmp;
62329+ }
62330+
62331+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
62332+ return -ENOMEM;
62333+
62334+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
62335+ return -EFAULT;
62336+
62337+ r_tmp->hash = ghash;
62338+
62339+ num_subjs = count_user_subjs(r_tmp->hash->first);
62340+
62341+ r_tmp->subj_hash_size = num_subjs;
62342+ r_tmp->subj_hash =
62343+ (struct acl_subject_label **)
62344+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
62345+
62346+ if (!r_tmp->subj_hash)
62347+ return -ENOMEM;
62348+
62349+ err = copy_user_allowedips(r_tmp);
62350+ if (err)
62351+ return err;
62352+
62353+ /* copy domain info */
62354+ if (r_tmp->domain_children != NULL) {
62355+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
62356+ if (domainlist == NULL)
62357+ return -ENOMEM;
62358+
62359+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
62360+ return -EFAULT;
62361+
62362+ r_tmp->domain_children = domainlist;
62363+ }
62364+
62365+ err = copy_user_transitions(r_tmp);
62366+ if (err)
62367+ return err;
62368+
62369+ memset(r_tmp->subj_hash, 0,
62370+ r_tmp->subj_hash_size *
62371+ sizeof (struct acl_subject_label *));
62372+
62373+ /* acquire the list of subjects, then NULL out
62374+ the list prior to parsing the subjects for this role,
62375+ as during this parsing the list is replaced with a list
62376+ of *nested* subjects for the role
62377+ */
62378+ subj_list = r_tmp->hash->first;
62379+
62380+ /* set nested subject list to null */
62381+ r_tmp->hash->first = NULL;
62382+
62383+ err = copy_user_subjs(subj_list, r_tmp);
62384+
62385+ if (err)
62386+ return err;
62387+
62388+ insert_acl_role_label(r_tmp);
62389+ }
62390+
62391+ if (default_role == NULL || kernel_role == NULL)
62392+ return -EINVAL;
62393+
62394+ return err;
62395+}
62396+
62397+static int
62398+gracl_init(struct gr_arg *args)
62399+{
62400+ int error = 0;
62401+
62402+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
62403+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
62404+
62405+ if (init_variables(args)) {
62406+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
62407+ error = -ENOMEM;
62408+ free_variables();
62409+ goto out;
62410+ }
62411+
62412+ error = copy_user_acl(args);
62413+ free_init_variables();
62414+ if (error) {
62415+ free_variables();
62416+ goto out;
62417+ }
62418+
62419+ if ((error = gr_set_acls(0))) {
62420+ free_variables();
62421+ goto out;
62422+ }
62423+
62424+ pax_open_kernel();
62425+ gr_status |= GR_READY;
62426+ pax_close_kernel();
62427+
62428+ out:
62429+ return error;
62430+}
62431+
62432+/* derived from glibc fnmatch() 0: match, 1: no match*/
62433+
62434+static int
62435+glob_match(const char *p, const char *n)
62436+{
62437+ char c;
62438+
62439+ while ((c = *p++) != '\0') {
62440+ switch (c) {
62441+ case '?':
62442+ if (*n == '\0')
62443+ return 1;
62444+ else if (*n == '/')
62445+ return 1;
62446+ break;
62447+ case '\\':
62448+ if (*n != c)
62449+ return 1;
62450+ break;
62451+ case '*':
62452+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
62453+ if (*n == '/')
62454+ return 1;
62455+ else if (c == '?') {
62456+ if (*n == '\0')
62457+ return 1;
62458+ else
62459+ ++n;
62460+ }
62461+ }
62462+ if (c == '\0') {
62463+ return 0;
62464+ } else {
62465+ const char *endp;
62466+
62467+ if ((endp = strchr(n, '/')) == NULL)
62468+ endp = n + strlen(n);
62469+
62470+ if (c == '[') {
62471+ for (--p; n < endp; ++n)
62472+ if (!glob_match(p, n))
62473+ return 0;
62474+ } else if (c == '/') {
62475+ while (*n != '\0' && *n != '/')
62476+ ++n;
62477+ if (*n == '/' && !glob_match(p, n + 1))
62478+ return 0;
62479+ } else {
62480+ for (--p; n < endp; ++n)
62481+ if (*n == c && !glob_match(p, n))
62482+ return 0;
62483+ }
62484+
62485+ return 1;
62486+ }
62487+ case '[':
62488+ {
62489+ int not;
62490+ char cold;
62491+
62492+ if (*n == '\0' || *n == '/')
62493+ return 1;
62494+
62495+ not = (*p == '!' || *p == '^');
62496+ if (not)
62497+ ++p;
62498+
62499+ c = *p++;
62500+ for (;;) {
62501+ unsigned char fn = (unsigned char)*n;
62502+
62503+ if (c == '\0')
62504+ return 1;
62505+ else {
62506+ if (c == fn)
62507+ goto matched;
62508+ cold = c;
62509+ c = *p++;
62510+
62511+ if (c == '-' && *p != ']') {
62512+ unsigned char cend = *p++;
62513+
62514+ if (cend == '\0')
62515+ return 1;
62516+
62517+ if (cold <= fn && fn <= cend)
62518+ goto matched;
62519+
62520+ c = *p++;
62521+ }
62522+ }
62523+
62524+ if (c == ']')
62525+ break;
62526+ }
62527+ if (!not)
62528+ return 1;
62529+ break;
62530+ matched:
62531+ while (c != ']') {
62532+ if (c == '\0')
62533+ return 1;
62534+
62535+ c = *p++;
62536+ }
62537+ if (not)
62538+ return 1;
62539+ }
62540+ break;
62541+ default:
62542+ if (c != *n)
62543+ return 1;
62544+ }
62545+
62546+ ++n;
62547+ }
62548+
62549+ if (*n == '\0')
62550+ return 0;
62551+
62552+ if (*n == '/')
62553+ return 0;
62554+
62555+ return 1;
62556+}
62557+
62558+static struct acl_object_label *
62559+chk_glob_label(struct acl_object_label *globbed,
62560+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
62561+{
62562+ struct acl_object_label *tmp;
62563+
62564+ if (*path == NULL)
62565+ *path = gr_to_filename_nolock(dentry, mnt);
62566+
62567+ tmp = globbed;
62568+
62569+ while (tmp) {
62570+ if (!glob_match(tmp->filename, *path))
62571+ return tmp;
62572+ tmp = tmp->next;
62573+ }
62574+
62575+ return NULL;
62576+}
62577+
62578+static struct acl_object_label *
62579+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
62580+ const ino_t curr_ino, const dev_t curr_dev,
62581+ const struct acl_subject_label *subj, char **path, const int checkglob)
62582+{
62583+ struct acl_subject_label *tmpsubj;
62584+ struct acl_object_label *retval;
62585+ struct acl_object_label *retval2;
62586+
62587+ tmpsubj = (struct acl_subject_label *) subj;
62588+ read_lock(&gr_inode_lock);
62589+ do {
62590+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
62591+ if (retval) {
62592+ if (checkglob && retval->globbed) {
62593+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
62594+ if (retval2)
62595+ retval = retval2;
62596+ }
62597+ break;
62598+ }
62599+ } while ((tmpsubj = tmpsubj->parent_subject));
62600+ read_unlock(&gr_inode_lock);
62601+
62602+ return retval;
62603+}
62604+
62605+static __inline__ struct acl_object_label *
62606+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
62607+ struct dentry *curr_dentry,
62608+ const struct acl_subject_label *subj, char **path, const int checkglob)
62609+{
62610+ int newglob = checkglob;
62611+ ino_t inode;
62612+ dev_t device;
62613+
62614+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
62615+ as we don't want a / * rule to match instead of the / object
62616+ don't do this for create lookups that call this function though, since they're looking up
62617+ on the parent and thus need globbing checks on all paths
62618+ */
62619+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
62620+ newglob = GR_NO_GLOB;
62621+
62622+ spin_lock(&curr_dentry->d_lock);
62623+ inode = curr_dentry->d_inode->i_ino;
62624+ device = __get_dev(curr_dentry);
62625+ spin_unlock(&curr_dentry->d_lock);
62626+
62627+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
62628+}
62629+
62630+#ifdef CONFIG_HUGETLBFS
62631+static inline bool
62632+is_hugetlbfs_mnt(const struct vfsmount *mnt)
62633+{
62634+ int i;
62635+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
62636+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
62637+ return true;
62638+ }
62639+
62640+ return false;
62641+}
62642+#endif
62643+
62644+static struct acl_object_label *
62645+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62646+ const struct acl_subject_label *subj, char *path, const int checkglob)
62647+{
62648+ struct dentry *dentry = (struct dentry *) l_dentry;
62649+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
62650+ struct mount *real_mnt = real_mount(mnt);
62651+ struct acl_object_label *retval;
62652+ struct dentry *parent;
62653+
62654+ br_read_lock(&vfsmount_lock);
62655+ write_seqlock(&rename_lock);
62656+
62657+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
62658+#ifdef CONFIG_NET
62659+ mnt == sock_mnt ||
62660+#endif
62661+#ifdef CONFIG_HUGETLBFS
62662+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
62663+#endif
62664+ /* ignore Eric Biederman */
62665+ IS_PRIVATE(l_dentry->d_inode))) {
62666+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
62667+ goto out;
62668+ }
62669+
62670+ for (;;) {
62671+ if (dentry == real_root.dentry && mnt == real_root.mnt)
62672+ break;
62673+
62674+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
62675+ if (!mnt_has_parent(real_mnt))
62676+ break;
62677+
62678+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
62679+ if (retval != NULL)
62680+ goto out;
62681+
62682+ dentry = real_mnt->mnt_mountpoint;
62683+ real_mnt = real_mnt->mnt_parent;
62684+ mnt = &real_mnt->mnt;
62685+ continue;
62686+ }
62687+
62688+ parent = dentry->d_parent;
62689+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
62690+ if (retval != NULL)
62691+ goto out;
62692+
62693+ dentry = parent;
62694+ }
62695+
62696+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
62697+
62698+ /* real_root is pinned so we don't have to hold a reference */
62699+ if (retval == NULL)
62700+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
62701+out:
62702+ write_sequnlock(&rename_lock);
62703+ br_read_unlock(&vfsmount_lock);
62704+
62705+ BUG_ON(retval == NULL);
62706+
62707+ return retval;
62708+}
62709+
62710+static __inline__ struct acl_object_label *
62711+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62712+ const struct acl_subject_label *subj)
62713+{
62714+ char *path = NULL;
62715+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
62716+}
62717+
62718+static __inline__ struct acl_object_label *
62719+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62720+ const struct acl_subject_label *subj)
62721+{
62722+ char *path = NULL;
62723+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
62724+}
62725+
62726+static __inline__ struct acl_object_label *
62727+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62728+ const struct acl_subject_label *subj, char *path)
62729+{
62730+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
62731+}
62732+
62733+static struct acl_subject_label *
62734+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
62735+ const struct acl_role_label *role)
62736+{
62737+ struct dentry *dentry = (struct dentry *) l_dentry;
62738+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
62739+ struct mount *real_mnt = real_mount(mnt);
62740+ struct acl_subject_label *retval;
62741+ struct dentry *parent;
62742+
62743+ br_read_lock(&vfsmount_lock);
62744+ write_seqlock(&rename_lock);
62745+
62746+ for (;;) {
62747+ if (dentry == real_root.dentry && mnt == real_root.mnt)
62748+ break;
62749+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
62750+ if (!mnt_has_parent(real_mnt))
62751+ break;
62752+
62753+ spin_lock(&dentry->d_lock);
62754+ read_lock(&gr_inode_lock);
62755+ retval =
62756+ lookup_acl_subj_label(dentry->d_inode->i_ino,
62757+ __get_dev(dentry), role);
62758+ read_unlock(&gr_inode_lock);
62759+ spin_unlock(&dentry->d_lock);
62760+ if (retval != NULL)
62761+ goto out;
62762+
62763+ dentry = real_mnt->mnt_mountpoint;
62764+ real_mnt = real_mnt->mnt_parent;
62765+ mnt = &real_mnt->mnt;
62766+ continue;
62767+ }
62768+
62769+ spin_lock(&dentry->d_lock);
62770+ read_lock(&gr_inode_lock);
62771+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
62772+ __get_dev(dentry), role);
62773+ read_unlock(&gr_inode_lock);
62774+ parent = dentry->d_parent;
62775+ spin_unlock(&dentry->d_lock);
62776+
62777+ if (retval != NULL)
62778+ goto out;
62779+
62780+ dentry = parent;
62781+ }
62782+
62783+ spin_lock(&dentry->d_lock);
62784+ read_lock(&gr_inode_lock);
62785+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
62786+ __get_dev(dentry), role);
62787+ read_unlock(&gr_inode_lock);
62788+ spin_unlock(&dentry->d_lock);
62789+
62790+ if (unlikely(retval == NULL)) {
62791+ /* real_root is pinned, we don't need to hold a reference */
62792+ read_lock(&gr_inode_lock);
62793+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
62794+ __get_dev(real_root.dentry), role);
62795+ read_unlock(&gr_inode_lock);
62796+ }
62797+out:
62798+ write_sequnlock(&rename_lock);
62799+ br_read_unlock(&vfsmount_lock);
62800+
62801+ BUG_ON(retval == NULL);
62802+
62803+ return retval;
62804+}
62805+
62806+static void
62807+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
62808+{
62809+ struct task_struct *task = current;
62810+ const struct cred *cred = current_cred();
62811+
62812+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
62813+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
62814+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
62815+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
62816+
62817+ return;
62818+}
62819+
62820+static void
62821+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
62822+{
62823+ struct task_struct *task = current;
62824+ const struct cred *cred = current_cred();
62825+
62826+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
62827+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
62828+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
62829+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
62830+
62831+ return;
62832+}
62833+
62834+static void
62835+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
62836+{
62837+ struct task_struct *task = current;
62838+ const struct cred *cred = current_cred();
62839+
62840+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
62841+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
62842+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
62843+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
62844+
62845+ return;
62846+}
62847+
62848+__u32
62849+gr_search_file(const struct dentry * dentry, const __u32 mode,
62850+ const struct vfsmount * mnt)
62851+{
62852+ __u32 retval = mode;
62853+ struct acl_subject_label *curracl;
62854+ struct acl_object_label *currobj;
62855+
62856+ if (unlikely(!(gr_status & GR_READY)))
62857+ return (mode & ~GR_AUDITS);
62858+
62859+ curracl = current->acl;
62860+
62861+ currobj = chk_obj_label(dentry, mnt, curracl);
62862+ retval = currobj->mode & mode;
62863+
62864+ /* if we're opening a specified transfer file for writing
62865+ (e.g. /dev/initctl), then transfer our role to init
62866+ */
62867+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
62868+ current->role->roletype & GR_ROLE_PERSIST)) {
62869+ struct task_struct *task = init_pid_ns.child_reaper;
62870+
62871+ if (task->role != current->role) {
62872+ task->acl_sp_role = 0;
62873+ task->acl_role_id = current->acl_role_id;
62874+ task->role = current->role;
62875+ rcu_read_lock();
62876+ read_lock(&grsec_exec_file_lock);
62877+ gr_apply_subject_to_task(task);
62878+ read_unlock(&grsec_exec_file_lock);
62879+ rcu_read_unlock();
62880+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
62881+ }
62882+ }
62883+
62884+ if (unlikely
62885+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
62886+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
62887+ __u32 new_mode = mode;
62888+
62889+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
62890+
62891+ retval = new_mode;
62892+
62893+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
62894+ new_mode |= GR_INHERIT;
62895+
62896+ if (!(mode & GR_NOLEARN))
62897+ gr_log_learn(dentry, mnt, new_mode);
62898+ }
62899+
62900+ return retval;
62901+}
62902+
62903+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
62904+ const struct dentry *parent,
62905+ const struct vfsmount *mnt)
62906+{
62907+ struct name_entry *match;
62908+ struct acl_object_label *matchpo;
62909+ struct acl_subject_label *curracl;
62910+ char *path;
62911+
62912+ if (unlikely(!(gr_status & GR_READY)))
62913+ return NULL;
62914+
62915+ preempt_disable();
62916+ path = gr_to_filename_rbac(new_dentry, mnt);
62917+ match = lookup_name_entry_create(path);
62918+
62919+ curracl = current->acl;
62920+
62921+ if (match) {
62922+ read_lock(&gr_inode_lock);
62923+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
62924+ read_unlock(&gr_inode_lock);
62925+
62926+ if (matchpo) {
62927+ preempt_enable();
62928+ return matchpo;
62929+ }
62930+ }
62931+
62932+ // lookup parent
62933+
62934+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
62935+
62936+ preempt_enable();
62937+ return matchpo;
62938+}
62939+
62940+__u32
62941+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
62942+ const struct vfsmount * mnt, const __u32 mode)
62943+{
62944+ struct acl_object_label *matchpo;
62945+ __u32 retval;
62946+
62947+ if (unlikely(!(gr_status & GR_READY)))
62948+ return (mode & ~GR_AUDITS);
62949+
62950+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
62951+
62952+ retval = matchpo->mode & mode;
62953+
62954+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
62955+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
62956+ __u32 new_mode = mode;
62957+
62958+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
62959+
62960+ gr_log_learn(new_dentry, mnt, new_mode);
62961+ return new_mode;
62962+ }
62963+
62964+ return retval;
62965+}
62966+
62967+__u32
62968+gr_check_link(const struct dentry * new_dentry,
62969+ const struct dentry * parent_dentry,
62970+ const struct vfsmount * parent_mnt,
62971+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
62972+{
62973+ struct acl_object_label *obj;
62974+ __u32 oldmode, newmode;
62975+ __u32 needmode;
62976+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
62977+ GR_DELETE | GR_INHERIT;
62978+
62979+ if (unlikely(!(gr_status & GR_READY)))
62980+ return (GR_CREATE | GR_LINK);
62981+
62982+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
62983+ oldmode = obj->mode;
62984+
62985+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
62986+ newmode = obj->mode;
62987+
62988+ needmode = newmode & checkmodes;
62989+
62990+ // old name for hardlink must have at least the permissions of the new name
62991+ if ((oldmode & needmode) != needmode)
62992+ goto bad;
62993+
62994+ // if old name had restrictions/auditing, make sure the new name does as well
62995+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
62996+
62997+ // don't allow hardlinking of suid/sgid/fcapped files without permission
62998+ if (is_privileged_binary(old_dentry))
62999+ needmode |= GR_SETID;
63000+
63001+ if ((newmode & needmode) != needmode)
63002+ goto bad;
63003+
63004+ // enforce minimum permissions
63005+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
63006+ return newmode;
63007+bad:
63008+ needmode = oldmode;
63009+ if (is_privileged_binary(old_dentry))
63010+ needmode |= GR_SETID;
63011+
63012+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
63013+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
63014+ return (GR_CREATE | GR_LINK);
63015+ } else if (newmode & GR_SUPPRESS)
63016+ return GR_SUPPRESS;
63017+ else
63018+ return 0;
63019+}
63020+
63021+int
63022+gr_check_hidden_task(const struct task_struct *task)
63023+{
63024+ if (unlikely(!(gr_status & GR_READY)))
63025+ return 0;
63026+
63027+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
63028+ return 1;
63029+
63030+ return 0;
63031+}
63032+
63033+int
63034+gr_check_protected_task(const struct task_struct *task)
63035+{
63036+ if (unlikely(!(gr_status & GR_READY) || !task))
63037+ return 0;
63038+
63039+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
63040+ task->acl != current->acl)
63041+ return 1;
63042+
63043+ return 0;
63044+}
63045+
63046+int
63047+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
63048+{
63049+ struct task_struct *p;
63050+ int ret = 0;
63051+
63052+ if (unlikely(!(gr_status & GR_READY) || !pid))
63053+ return ret;
63054+
63055+ read_lock(&tasklist_lock);
63056+ do_each_pid_task(pid, type, p) {
63057+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
63058+ p->acl != current->acl) {
63059+ ret = 1;
63060+ goto out;
63061+ }
63062+ } while_each_pid_task(pid, type, p);
63063+out:
63064+ read_unlock(&tasklist_lock);
63065+
63066+ return ret;
63067+}
63068+
63069+void
63070+gr_copy_label(struct task_struct *tsk)
63071+{
63072+ tsk->signal->used_accept = 0;
63073+ tsk->acl_sp_role = 0;
63074+ tsk->acl_role_id = current->acl_role_id;
63075+ tsk->acl = current->acl;
63076+ tsk->role = current->role;
63077+ tsk->signal->curr_ip = current->signal->curr_ip;
63078+ tsk->signal->saved_ip = current->signal->saved_ip;
63079+ if (current->exec_file)
63080+ get_file(current->exec_file);
63081+ tsk->exec_file = current->exec_file;
63082+ tsk->is_writable = current->is_writable;
63083+ if (unlikely(current->signal->used_accept)) {
63084+ current->signal->curr_ip = 0;
63085+ current->signal->saved_ip = 0;
63086+ }
63087+
63088+ return;
63089+}
63090+
63091+static void
63092+gr_set_proc_res(struct task_struct *task)
63093+{
63094+ struct acl_subject_label *proc;
63095+ unsigned short i;
63096+
63097+ proc = task->acl;
63098+
63099+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
63100+ return;
63101+
63102+ for (i = 0; i < RLIM_NLIMITS; i++) {
63103+ if (!(proc->resmask & (1U << i)))
63104+ continue;
63105+
63106+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
63107+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
63108+
63109+ if (i == RLIMIT_CPU)
63110+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
63111+ }
63112+
63113+ return;
63114+}
63115+
63116+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
63117+
63118+int
63119+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
63120+{
63121+ unsigned int i;
63122+ __u16 num;
63123+ uid_t *uidlist;
63124+ uid_t curuid;
63125+ int realok = 0;
63126+ int effectiveok = 0;
63127+ int fsok = 0;
63128+ uid_t globalreal, globaleffective, globalfs;
63129+
63130+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
63131+ struct user_struct *user;
63132+
63133+ if (!uid_valid(real))
63134+ goto skipit;
63135+
63136+ /* find user based on global namespace */
63137+
63138+ globalreal = GR_GLOBAL_UID(real);
63139+
63140+ user = find_user(make_kuid(&init_user_ns, globalreal));
63141+ if (user == NULL)
63142+ goto skipit;
63143+
63144+ if (gr_process_kernel_setuid_ban(user)) {
63145+ /* for find_user */
63146+ free_uid(user);
63147+ return 1;
63148+ }
63149+
63150+ /* for find_user */
63151+ free_uid(user);
63152+
63153+skipit:
63154+#endif
63155+
63156+ if (unlikely(!(gr_status & GR_READY)))
63157+ return 0;
63158+
63159+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
63160+ gr_log_learn_uid_change(real, effective, fs);
63161+
63162+ num = current->acl->user_trans_num;
63163+ uidlist = current->acl->user_transitions;
63164+
63165+ if (uidlist == NULL)
63166+ return 0;
63167+
63168+ if (!uid_valid(real)) {
63169+ realok = 1;
63170+ globalreal = (uid_t)-1;
63171+ } else {
63172+ globalreal = GR_GLOBAL_UID(real);
63173+ }
63174+ if (!uid_valid(effective)) {
63175+ effectiveok = 1;
63176+ globaleffective = (uid_t)-1;
63177+ } else {
63178+ globaleffective = GR_GLOBAL_UID(effective);
63179+ }
63180+ if (!uid_valid(fs)) {
63181+ fsok = 1;
63182+ globalfs = (uid_t)-1;
63183+ } else {
63184+ globalfs = GR_GLOBAL_UID(fs);
63185+ }
63186+
63187+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
63188+ for (i = 0; i < num; i++) {
63189+ curuid = uidlist[i];
63190+ if (globalreal == curuid)
63191+ realok = 1;
63192+ if (globaleffective == curuid)
63193+ effectiveok = 1;
63194+ if (globalfs == curuid)
63195+ fsok = 1;
63196+ }
63197+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
63198+ for (i = 0; i < num; i++) {
63199+ curuid = uidlist[i];
63200+ if (globalreal == curuid)
63201+ break;
63202+ if (globaleffective == curuid)
63203+ break;
63204+ if (globalfs == curuid)
63205+ break;
63206+ }
63207+ /* not in deny list */
63208+ if (i == num) {
63209+ realok = 1;
63210+ effectiveok = 1;
63211+ fsok = 1;
63212+ }
63213+ }
63214+
63215+ if (realok && effectiveok && fsok)
63216+ return 0;
63217+ else {
63218+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
63219+ return 1;
63220+ }
63221+}
63222+
63223+int
63224+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
63225+{
63226+ unsigned int i;
63227+ __u16 num;
63228+ gid_t *gidlist;
63229+ gid_t curgid;
63230+ int realok = 0;
63231+ int effectiveok = 0;
63232+ int fsok = 0;
63233+ gid_t globalreal, globaleffective, globalfs;
63234+
63235+ if (unlikely(!(gr_status & GR_READY)))
63236+ return 0;
63237+
63238+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
63239+ gr_log_learn_gid_change(real, effective, fs);
63240+
63241+ num = current->acl->group_trans_num;
63242+ gidlist = current->acl->group_transitions;
63243+
63244+ if (gidlist == NULL)
63245+ return 0;
63246+
63247+ if (!gid_valid(real)) {
63248+ realok = 1;
63249+ globalreal = (gid_t)-1;
63250+ } else {
63251+ globalreal = GR_GLOBAL_GID(real);
63252+ }
63253+ if (!gid_valid(effective)) {
63254+ effectiveok = 1;
63255+ globaleffective = (gid_t)-1;
63256+ } else {
63257+ globaleffective = GR_GLOBAL_GID(effective);
63258+ }
63259+ if (!gid_valid(fs)) {
63260+ fsok = 1;
63261+ globalfs = (gid_t)-1;
63262+ } else {
63263+ globalfs = GR_GLOBAL_GID(fs);
63264+ }
63265+
63266+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
63267+ for (i = 0; i < num; i++) {
63268+ curgid = gidlist[i];
63269+ if (globalreal == curgid)
63270+ realok = 1;
63271+ if (globaleffective == curgid)
63272+ effectiveok = 1;
63273+ if (globalfs == curgid)
63274+ fsok = 1;
63275+ }
63276+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
63277+ for (i = 0; i < num; i++) {
63278+ curgid = gidlist[i];
63279+ if (globalreal == curgid)
63280+ break;
63281+ if (globaleffective == curgid)
63282+ break;
63283+ if (globalfs == curgid)
63284+ break;
63285+ }
63286+ /* not in deny list */
63287+ if (i == num) {
63288+ realok = 1;
63289+ effectiveok = 1;
63290+ fsok = 1;
63291+ }
63292+ }
63293+
63294+ if (realok && effectiveok && fsok)
63295+ return 0;
63296+ else {
63297+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
63298+ return 1;
63299+ }
63300+}
63301+
63302+extern int gr_acl_is_capable(const int cap);
63303+
63304+void
63305+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
63306+{
63307+ struct acl_role_label *role = task->role;
63308+ struct acl_subject_label *subj = NULL;
63309+ struct acl_object_label *obj;
63310+ struct file *filp;
63311+ uid_t uid;
63312+ gid_t gid;
63313+
63314+ if (unlikely(!(gr_status & GR_READY)))
63315+ return;
63316+
63317+ uid = GR_GLOBAL_UID(kuid);
63318+ gid = GR_GLOBAL_GID(kgid);
63319+
63320+ filp = task->exec_file;
63321+
63322+ /* kernel process, we'll give them the kernel role */
63323+ if (unlikely(!filp)) {
63324+ task->role = kernel_role;
63325+ task->acl = kernel_role->root_label;
63326+ return;
63327+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
63328+ role = lookup_acl_role_label(task, uid, gid);
63329+
63330+ /* don't change the role if we're not a privileged process */
63331+ if (role && task->role != role &&
63332+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
63333+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
63334+ return;
63335+
63336+ /* perform subject lookup in possibly new role
63337+ we can use this result below in the case where role == task->role
63338+ */
63339+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
63340+
63341+ /* if we changed uid/gid, but result in the same role
63342+ and are using inheritance, don't lose the inherited subject
63343+ if current subject is other than what normal lookup
63344+ would result in, we arrived via inheritance, don't
63345+ lose subject
63346+ */
63347+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
63348+ (subj == task->acl)))
63349+ task->acl = subj;
63350+
63351+ task->role = role;
63352+
63353+ task->is_writable = 0;
63354+
63355+ /* ignore additional mmap checks for processes that are writable
63356+ by the default ACL */
63357+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
63358+ if (unlikely(obj->mode & GR_WRITE))
63359+ task->is_writable = 1;
63360+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
63361+ if (unlikely(obj->mode & GR_WRITE))
63362+ task->is_writable = 1;
63363+
63364+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63365+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
63366+#endif
63367+
63368+ gr_set_proc_res(task);
63369+
63370+ return;
63371+}
63372+
63373+int
63374+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
63375+ const int unsafe_flags)
63376+{
63377+ struct task_struct *task = current;
63378+ struct acl_subject_label *newacl;
63379+ struct acl_object_label *obj;
63380+ __u32 retmode;
63381+
63382+ if (unlikely(!(gr_status & GR_READY)))
63383+ return 0;
63384+
63385+ newacl = chk_subj_label(dentry, mnt, task->role);
63386+
63387+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
63388+ did an exec
63389+ */
63390+ rcu_read_lock();
63391+ read_lock(&tasklist_lock);
63392+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
63393+ (task->parent->acl->mode & GR_POVERRIDE))) {
63394+ read_unlock(&tasklist_lock);
63395+ rcu_read_unlock();
63396+ goto skip_check;
63397+ }
63398+ read_unlock(&tasklist_lock);
63399+ rcu_read_unlock();
63400+
63401+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
63402+ !(task->role->roletype & GR_ROLE_GOD) &&
63403+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
63404+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
63405+ if (unsafe_flags & LSM_UNSAFE_SHARE)
63406+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
63407+ else
63408+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
63409+ return -EACCES;
63410+ }
63411+
63412+skip_check:
63413+
63414+ obj = chk_obj_label(dentry, mnt, task->acl);
63415+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
63416+
63417+ if (!(task->acl->mode & GR_INHERITLEARN) &&
63418+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
63419+ if (obj->nested)
63420+ task->acl = obj->nested;
63421+ else
63422+ task->acl = newacl;
63423+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
63424+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
63425+
63426+ task->is_writable = 0;
63427+
63428+ /* ignore additional mmap checks for processes that are writable
63429+ by the default ACL */
63430+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
63431+ if (unlikely(obj->mode & GR_WRITE))
63432+ task->is_writable = 1;
63433+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
63434+ if (unlikely(obj->mode & GR_WRITE))
63435+ task->is_writable = 1;
63436+
63437+ gr_set_proc_res(task);
63438+
63439+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63440+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
63441+#endif
63442+ return 0;
63443+}
63444+
63445+/* always called with valid inodev ptr */
63446+static void
63447+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
63448+{
63449+ struct acl_object_label *matchpo;
63450+ struct acl_subject_label *matchps;
63451+ struct acl_subject_label *subj;
63452+ struct acl_role_label *role;
63453+ unsigned int x;
63454+
63455+ FOR_EACH_ROLE_START(role)
63456+ FOR_EACH_SUBJECT_START(role, subj, x)
63457+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
63458+ matchpo->mode |= GR_DELETED;
63459+ FOR_EACH_SUBJECT_END(subj,x)
63460+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
63461+ /* nested subjects aren't in the role's subj_hash table */
63462+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
63463+ matchpo->mode |= GR_DELETED;
63464+ FOR_EACH_NESTED_SUBJECT_END(subj)
63465+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
63466+ matchps->mode |= GR_DELETED;
63467+ FOR_EACH_ROLE_END(role)
63468+
63469+ inodev->nentry->deleted = 1;
63470+
63471+ return;
63472+}
63473+
63474+void
63475+gr_handle_delete(const ino_t ino, const dev_t dev)
63476+{
63477+ struct inodev_entry *inodev;
63478+
63479+ if (unlikely(!(gr_status & GR_READY)))
63480+ return;
63481+
63482+ write_lock(&gr_inode_lock);
63483+ inodev = lookup_inodev_entry(ino, dev);
63484+ if (inodev != NULL)
63485+ do_handle_delete(inodev, ino, dev);
63486+ write_unlock(&gr_inode_lock);
63487+
63488+ return;
63489+}
63490+
63491+static void
63492+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
63493+ const ino_t newinode, const dev_t newdevice,
63494+ struct acl_subject_label *subj)
63495+{
63496+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
63497+ struct acl_object_label *match;
63498+
63499+ match = subj->obj_hash[index];
63500+
63501+ while (match && (match->inode != oldinode ||
63502+ match->device != olddevice ||
63503+ !(match->mode & GR_DELETED)))
63504+ match = match->next;
63505+
63506+ if (match && (match->inode == oldinode)
63507+ && (match->device == olddevice)
63508+ && (match->mode & GR_DELETED)) {
63509+ if (match->prev == NULL) {
63510+ subj->obj_hash[index] = match->next;
63511+ if (match->next != NULL)
63512+ match->next->prev = NULL;
63513+ } else {
63514+ match->prev->next = match->next;
63515+ if (match->next != NULL)
63516+ match->next->prev = match->prev;
63517+ }
63518+ match->prev = NULL;
63519+ match->next = NULL;
63520+ match->inode = newinode;
63521+ match->device = newdevice;
63522+ match->mode &= ~GR_DELETED;
63523+
63524+ insert_acl_obj_label(match, subj);
63525+ }
63526+
63527+ return;
63528+}
63529+
63530+static void
63531+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
63532+ const ino_t newinode, const dev_t newdevice,
63533+ struct acl_role_label *role)
63534+{
63535+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
63536+ struct acl_subject_label *match;
63537+
63538+ match = role->subj_hash[index];
63539+
63540+ while (match && (match->inode != oldinode ||
63541+ match->device != olddevice ||
63542+ !(match->mode & GR_DELETED)))
63543+ match = match->next;
63544+
63545+ if (match && (match->inode == oldinode)
63546+ && (match->device == olddevice)
63547+ && (match->mode & GR_DELETED)) {
63548+ if (match->prev == NULL) {
63549+ role->subj_hash[index] = match->next;
63550+ if (match->next != NULL)
63551+ match->next->prev = NULL;
63552+ } else {
63553+ match->prev->next = match->next;
63554+ if (match->next != NULL)
63555+ match->next->prev = match->prev;
63556+ }
63557+ match->prev = NULL;
63558+ match->next = NULL;
63559+ match->inode = newinode;
63560+ match->device = newdevice;
63561+ match->mode &= ~GR_DELETED;
63562+
63563+ insert_acl_subj_label(match, role);
63564+ }
63565+
63566+ return;
63567+}
63568+
63569+static void
63570+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
63571+ const ino_t newinode, const dev_t newdevice)
63572+{
63573+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
63574+ struct inodev_entry *match;
63575+
63576+ match = inodev_set.i_hash[index];
63577+
63578+ while (match && (match->nentry->inode != oldinode ||
63579+ match->nentry->device != olddevice || !match->nentry->deleted))
63580+ match = match->next;
63581+
63582+ if (match && (match->nentry->inode == oldinode)
63583+ && (match->nentry->device == olddevice) &&
63584+ match->nentry->deleted) {
63585+ if (match->prev == NULL) {
63586+ inodev_set.i_hash[index] = match->next;
63587+ if (match->next != NULL)
63588+ match->next->prev = NULL;
63589+ } else {
63590+ match->prev->next = match->next;
63591+ if (match->next != NULL)
63592+ match->next->prev = match->prev;
63593+ }
63594+ match->prev = NULL;
63595+ match->next = NULL;
63596+ match->nentry->inode = newinode;
63597+ match->nentry->device = newdevice;
63598+ match->nentry->deleted = 0;
63599+
63600+ insert_inodev_entry(match);
63601+ }
63602+
63603+ return;
63604+}
63605+
63606+static void
63607+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
63608+{
63609+ struct acl_subject_label *subj;
63610+ struct acl_role_label *role;
63611+ unsigned int x;
63612+
63613+ FOR_EACH_ROLE_START(role)
63614+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
63615+
63616+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
63617+ if ((subj->inode == ino) && (subj->device == dev)) {
63618+ subj->inode = ino;
63619+ subj->device = dev;
63620+ }
63621+ /* nested subjects aren't in the role's subj_hash table */
63622+ update_acl_obj_label(matchn->inode, matchn->device,
63623+ ino, dev, subj);
63624+ FOR_EACH_NESTED_SUBJECT_END(subj)
63625+ FOR_EACH_SUBJECT_START(role, subj, x)
63626+ update_acl_obj_label(matchn->inode, matchn->device,
63627+ ino, dev, subj);
63628+ FOR_EACH_SUBJECT_END(subj,x)
63629+ FOR_EACH_ROLE_END(role)
63630+
63631+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
63632+
63633+ return;
63634+}
63635+
63636+static void
63637+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
63638+ const struct vfsmount *mnt)
63639+{
63640+ ino_t ino = dentry->d_inode->i_ino;
63641+ dev_t dev = __get_dev(dentry);
63642+
63643+ __do_handle_create(matchn, ino, dev);
63644+
63645+ return;
63646+}
63647+
63648+void
63649+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
63650+{
63651+ struct name_entry *matchn;
63652+
63653+ if (unlikely(!(gr_status & GR_READY)))
63654+ return;
63655+
63656+ preempt_disable();
63657+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
63658+
63659+ if (unlikely((unsigned long)matchn)) {
63660+ write_lock(&gr_inode_lock);
63661+ do_handle_create(matchn, dentry, mnt);
63662+ write_unlock(&gr_inode_lock);
63663+ }
63664+ preempt_enable();
63665+
63666+ return;
63667+}
63668+
63669+void
63670+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
63671+{
63672+ struct name_entry *matchn;
63673+
63674+ if (unlikely(!(gr_status & GR_READY)))
63675+ return;
63676+
63677+ preempt_disable();
63678+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
63679+
63680+ if (unlikely((unsigned long)matchn)) {
63681+ write_lock(&gr_inode_lock);
63682+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
63683+ write_unlock(&gr_inode_lock);
63684+ }
63685+ preempt_enable();
63686+
63687+ return;
63688+}
63689+
63690+void
63691+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
63692+ struct dentry *old_dentry,
63693+ struct dentry *new_dentry,
63694+ struct vfsmount *mnt, const __u8 replace)
63695+{
63696+ struct name_entry *matchn;
63697+ struct inodev_entry *inodev;
63698+ struct inode *inode = new_dentry->d_inode;
63699+ ino_t old_ino = old_dentry->d_inode->i_ino;
63700+ dev_t old_dev = __get_dev(old_dentry);
63701+
63702+ /* vfs_rename swaps the name and parent link for old_dentry and
63703+ new_dentry
63704+ at this point, old_dentry has the new name, parent link, and inode
63705+ for the renamed file
63706+ if a file is being replaced by a rename, new_dentry has the inode
63707+ and name for the replaced file
63708+ */
63709+
63710+ if (unlikely(!(gr_status & GR_READY)))
63711+ return;
63712+
63713+ preempt_disable();
63714+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
63715+
63716+ /* we wouldn't have to check d_inode if it weren't for
63717+ NFS silly-renaming
63718+ */
63719+
63720+ write_lock(&gr_inode_lock);
63721+ if (unlikely(replace && inode)) {
63722+ ino_t new_ino = inode->i_ino;
63723+ dev_t new_dev = __get_dev(new_dentry);
63724+
63725+ inodev = lookup_inodev_entry(new_ino, new_dev);
63726+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
63727+ do_handle_delete(inodev, new_ino, new_dev);
63728+ }
63729+
63730+ inodev = lookup_inodev_entry(old_ino, old_dev);
63731+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
63732+ do_handle_delete(inodev, old_ino, old_dev);
63733+
63734+ if (unlikely((unsigned long)matchn))
63735+ do_handle_create(matchn, old_dentry, mnt);
63736+
63737+ write_unlock(&gr_inode_lock);
63738+ preempt_enable();
63739+
63740+ return;
63741+}
63742+
63743+static int
63744+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
63745+ unsigned char **sum)
63746+{
63747+ struct acl_role_label *r;
63748+ struct role_allowed_ip *ipp;
63749+ struct role_transition *trans;
63750+ unsigned int i;
63751+ int found = 0;
63752+ u32 curr_ip = current->signal->curr_ip;
63753+
63754+ current->signal->saved_ip = curr_ip;
63755+
63756+ /* check transition table */
63757+
63758+ for (trans = current->role->transitions; trans; trans = trans->next) {
63759+ if (!strcmp(rolename, trans->rolename)) {
63760+ found = 1;
63761+ break;
63762+ }
63763+ }
63764+
63765+ if (!found)
63766+ return 0;
63767+
63768+ /* handle special roles that do not require authentication
63769+ and check ip */
63770+
63771+ FOR_EACH_ROLE_START(r)
63772+ if (!strcmp(rolename, r->rolename) &&
63773+ (r->roletype & GR_ROLE_SPECIAL)) {
63774+ found = 0;
63775+ if (r->allowed_ips != NULL) {
63776+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
63777+ if ((ntohl(curr_ip) & ipp->netmask) ==
63778+ (ntohl(ipp->addr) & ipp->netmask))
63779+ found = 1;
63780+ }
63781+ } else
63782+ found = 2;
63783+ if (!found)
63784+ return 0;
63785+
63786+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
63787+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
63788+ *salt = NULL;
63789+ *sum = NULL;
63790+ return 1;
63791+ }
63792+ }
63793+ FOR_EACH_ROLE_END(r)
63794+
63795+ for (i = 0; i < num_sprole_pws; i++) {
63796+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
63797+ *salt = acl_special_roles[i]->salt;
63798+ *sum = acl_special_roles[i]->sum;
63799+ return 1;
63800+ }
63801+ }
63802+
63803+ return 0;
63804+}
63805+
63806+static void
63807+assign_special_role(char *rolename)
63808+{
63809+ struct acl_object_label *obj;
63810+ struct acl_role_label *r;
63811+ struct acl_role_label *assigned = NULL;
63812+ struct task_struct *tsk;
63813+ struct file *filp;
63814+
63815+ FOR_EACH_ROLE_START(r)
63816+ if (!strcmp(rolename, r->rolename) &&
63817+ (r->roletype & GR_ROLE_SPECIAL)) {
63818+ assigned = r;
63819+ break;
63820+ }
63821+ FOR_EACH_ROLE_END(r)
63822+
63823+ if (!assigned)
63824+ return;
63825+
63826+ read_lock(&tasklist_lock);
63827+ read_lock(&grsec_exec_file_lock);
63828+
63829+ tsk = current->real_parent;
63830+ if (tsk == NULL)
63831+ goto out_unlock;
63832+
63833+ filp = tsk->exec_file;
63834+ if (filp == NULL)
63835+ goto out_unlock;
63836+
63837+ tsk->is_writable = 0;
63838+
63839+ tsk->acl_sp_role = 1;
63840+ tsk->acl_role_id = ++acl_sp_role_value;
63841+ tsk->role = assigned;
63842+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
63843+
63844+ /* ignore additional mmap checks for processes that are writable
63845+ by the default ACL */
63846+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
63847+ if (unlikely(obj->mode & GR_WRITE))
63848+ tsk->is_writable = 1;
63849+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
63850+ if (unlikely(obj->mode & GR_WRITE))
63851+ tsk->is_writable = 1;
63852+
63853+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
63854+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
63855+#endif
63856+
63857+out_unlock:
63858+ read_unlock(&grsec_exec_file_lock);
63859+ read_unlock(&tasklist_lock);
63860+ return;
63861+}
63862+
63863+int gr_check_secure_terminal(struct task_struct *task)
63864+{
63865+ struct task_struct *p, *p2, *p3;
63866+ struct files_struct *files;
63867+ struct fdtable *fdt;
63868+ struct file *our_file = NULL, *file;
63869+ int i;
63870+
63871+ if (task->signal->tty == NULL)
63872+ return 1;
63873+
63874+ files = get_files_struct(task);
63875+ if (files != NULL) {
63876+ rcu_read_lock();
63877+ fdt = files_fdtable(files);
63878+ for (i=0; i < fdt->max_fds; i++) {
63879+ file = fcheck_files(files, i);
63880+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
63881+ get_file(file);
63882+ our_file = file;
63883+ }
63884+ }
63885+ rcu_read_unlock();
63886+ put_files_struct(files);
63887+ }
63888+
63889+ if (our_file == NULL)
63890+ return 1;
63891+
63892+ read_lock(&tasklist_lock);
63893+ do_each_thread(p2, p) {
63894+ files = get_files_struct(p);
63895+ if (files == NULL ||
63896+ (p->signal && p->signal->tty == task->signal->tty)) {
63897+ if (files != NULL)
63898+ put_files_struct(files);
63899+ continue;
63900+ }
63901+ rcu_read_lock();
63902+ fdt = files_fdtable(files);
63903+ for (i=0; i < fdt->max_fds; i++) {
63904+ file = fcheck_files(files, i);
63905+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
63906+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
63907+ p3 = task;
63908+ while (task_pid_nr(p3) > 0) {
63909+ if (p3 == p)
63910+ break;
63911+ p3 = p3->real_parent;
63912+ }
63913+ if (p3 == p)
63914+ break;
63915+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
63916+ gr_handle_alertkill(p);
63917+ rcu_read_unlock();
63918+ put_files_struct(files);
63919+ read_unlock(&tasklist_lock);
63920+ fput(our_file);
63921+ return 0;
63922+ }
63923+ }
63924+ rcu_read_unlock();
63925+ put_files_struct(files);
63926+ } while_each_thread(p2, p);
63927+ read_unlock(&tasklist_lock);
63928+
63929+ fput(our_file);
63930+ return 1;
63931+}
63932+
63933+static int gr_rbac_disable(void *unused)
63934+{
63935+ pax_open_kernel();
63936+ gr_status &= ~GR_READY;
63937+ pax_close_kernel();
63938+
63939+ return 0;
63940+}
63941+
63942+ssize_t
63943+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
63944+{
63945+ struct gr_arg_wrapper uwrap;
63946+ unsigned char *sprole_salt = NULL;
63947+ unsigned char *sprole_sum = NULL;
63948+ int error = 0;
63949+ int error2 = 0;
63950+ size_t req_count = 0;
63951+
63952+ mutex_lock(&gr_dev_mutex);
63953+
63954+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
63955+ error = -EPERM;
63956+ goto out;
63957+ }
63958+
63959+#ifdef CONFIG_COMPAT
63960+ pax_open_kernel();
63961+ if (is_compat_task()) {
63962+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
63963+ copy_gr_arg = &copy_gr_arg_compat;
63964+ copy_acl_object_label = &copy_acl_object_label_compat;
63965+ copy_acl_subject_label = &copy_acl_subject_label_compat;
63966+ copy_acl_role_label = &copy_acl_role_label_compat;
63967+ copy_acl_ip_label = &copy_acl_ip_label_compat;
63968+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
63969+ copy_role_transition = &copy_role_transition_compat;
63970+ copy_sprole_pw = &copy_sprole_pw_compat;
63971+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
63972+ copy_pointer_from_array = &copy_pointer_from_array_compat;
63973+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
63974+ } else {
63975+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
63976+ copy_gr_arg = &copy_gr_arg_normal;
63977+ copy_acl_object_label = &copy_acl_object_label_normal;
63978+ copy_acl_subject_label = &copy_acl_subject_label_normal;
63979+ copy_acl_role_label = &copy_acl_role_label_normal;
63980+ copy_acl_ip_label = &copy_acl_ip_label_normal;
63981+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
63982+ copy_role_transition = &copy_role_transition_normal;
63983+ copy_sprole_pw = &copy_sprole_pw_normal;
63984+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
63985+ copy_pointer_from_array = &copy_pointer_from_array_normal;
63986+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
63987+ }
63988+ pax_close_kernel();
63989+#endif
63990+
63991+ req_count = get_gr_arg_wrapper_size();
63992+
63993+ if (count != req_count) {
63994+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
63995+ error = -EINVAL;
63996+ goto out;
63997+ }
63998+
63999+
64000+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
64001+ gr_auth_expires = 0;
64002+ gr_auth_attempts = 0;
64003+ }
64004+
64005+ error = copy_gr_arg_wrapper(buf, &uwrap);
64006+ if (error)
64007+ goto out;
64008+
64009+ error = copy_gr_arg(uwrap.arg, gr_usermode);
64010+ if (error)
64011+ goto out;
64012+
64013+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
64014+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
64015+ time_after(gr_auth_expires, get_seconds())) {
64016+ error = -EBUSY;
64017+ goto out;
64018+ }
64019+
64020+ /* if non-root trying to do anything other than use a special role,
64021+ do not attempt authentication, do not count towards authentication
64022+ locking
64023+ */
64024+
64025+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
64026+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
64027+ gr_is_global_nonroot(current_uid())) {
64028+ error = -EPERM;
64029+ goto out;
64030+ }
64031+
64032+ /* ensure pw and special role name are null terminated */
64033+
64034+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
64035+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
64036+
64037+ /* Okay.
64038+ * We have our enough of the argument structure..(we have yet
64039+ * to copy_from_user the tables themselves) . Copy the tables
64040+ * only if we need them, i.e. for loading operations. */
64041+
64042+ switch (gr_usermode->mode) {
64043+ case GR_STATUS:
64044+ if (gr_status & GR_READY) {
64045+ error = 1;
64046+ if (!gr_check_secure_terminal(current))
64047+ error = 3;
64048+ } else
64049+ error = 2;
64050+ goto out;
64051+ case GR_SHUTDOWN:
64052+ if ((gr_status & GR_READY)
64053+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
64054+ stop_machine(gr_rbac_disable, NULL, NULL);
64055+ free_variables();
64056+ memset(gr_usermode, 0, sizeof (struct gr_arg));
64057+ memset(gr_system_salt, 0, GR_SALT_LEN);
64058+ memset(gr_system_sum, 0, GR_SHA_LEN);
64059+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
64060+ } else if (gr_status & GR_READY) {
64061+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
64062+ error = -EPERM;
64063+ } else {
64064+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
64065+ error = -EAGAIN;
64066+ }
64067+ break;
64068+ case GR_ENABLE:
64069+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
64070+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
64071+ else {
64072+ if (gr_status & GR_READY)
64073+ error = -EAGAIN;
64074+ else
64075+ error = error2;
64076+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
64077+ }
64078+ break;
64079+ case GR_RELOAD:
64080+ if (!(gr_status & GR_READY)) {
64081+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
64082+ error = -EAGAIN;
64083+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
64084+ stop_machine(gr_rbac_disable, NULL, NULL);
64085+ free_variables();
64086+ error2 = gracl_init(gr_usermode);
64087+ if (!error2)
64088+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
64089+ else {
64090+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
64091+ error = error2;
64092+ }
64093+ } else {
64094+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
64095+ error = -EPERM;
64096+ }
64097+ break;
64098+ case GR_SEGVMOD:
64099+ if (unlikely(!(gr_status & GR_READY))) {
64100+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
64101+ error = -EAGAIN;
64102+ break;
64103+ }
64104+
64105+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
64106+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
64107+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
64108+ struct acl_subject_label *segvacl;
64109+ segvacl =
64110+ lookup_acl_subj_label(gr_usermode->segv_inode,
64111+ gr_usermode->segv_device,
64112+ current->role);
64113+ if (segvacl) {
64114+ segvacl->crashes = 0;
64115+ segvacl->expires = 0;
64116+ }
64117+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
64118+ gr_remove_uid(gr_usermode->segv_uid);
64119+ }
64120+ } else {
64121+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
64122+ error = -EPERM;
64123+ }
64124+ break;
64125+ case GR_SPROLE:
64126+ case GR_SPROLEPAM:
64127+ if (unlikely(!(gr_status & GR_READY))) {
64128+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
64129+ error = -EAGAIN;
64130+ break;
64131+ }
64132+
64133+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
64134+ current->role->expires = 0;
64135+ current->role->auth_attempts = 0;
64136+ }
64137+
64138+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
64139+ time_after(current->role->expires, get_seconds())) {
64140+ error = -EBUSY;
64141+ goto out;
64142+ }
64143+
64144+ if (lookup_special_role_auth
64145+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
64146+ && ((!sprole_salt && !sprole_sum)
64147+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
64148+ char *p = "";
64149+ assign_special_role(gr_usermode->sp_role);
64150+ read_lock(&tasklist_lock);
64151+ if (current->real_parent)
64152+ p = current->real_parent->role->rolename;
64153+ read_unlock(&tasklist_lock);
64154+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
64155+ p, acl_sp_role_value);
64156+ } else {
64157+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
64158+ error = -EPERM;
64159+ if(!(current->role->auth_attempts++))
64160+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
64161+
64162+ goto out;
64163+ }
64164+ break;
64165+ case GR_UNSPROLE:
64166+ if (unlikely(!(gr_status & GR_READY))) {
64167+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
64168+ error = -EAGAIN;
64169+ break;
64170+ }
64171+
64172+ if (current->role->roletype & GR_ROLE_SPECIAL) {
64173+ char *p = "";
64174+ int i = 0;
64175+
64176+ read_lock(&tasklist_lock);
64177+ if (current->real_parent) {
64178+ p = current->real_parent->role->rolename;
64179+ i = current->real_parent->acl_role_id;
64180+ }
64181+ read_unlock(&tasklist_lock);
64182+
64183+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
64184+ gr_set_acls(1);
64185+ } else {
64186+ error = -EPERM;
64187+ goto out;
64188+ }
64189+ break;
64190+ default:
64191+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
64192+ error = -EINVAL;
64193+ break;
64194+ }
64195+
64196+ if (error != -EPERM)
64197+ goto out;
64198+
64199+ if(!(gr_auth_attempts++))
64200+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
64201+
64202+ out:
64203+ mutex_unlock(&gr_dev_mutex);
64204+
64205+ if (!error)
64206+ error = req_count;
64207+
64208+ return error;
64209+}
64210+
64211+/* must be called with
64212+ rcu_read_lock();
64213+ read_lock(&tasklist_lock);
64214+ read_lock(&grsec_exec_file_lock);
64215+*/
64216+int gr_apply_subject_to_task(struct task_struct *task)
64217+{
64218+ struct acl_object_label *obj;
64219+ char *tmpname;
64220+ struct acl_subject_label *tmpsubj;
64221+ struct file *filp;
64222+ struct name_entry *nmatch;
64223+
64224+ filp = task->exec_file;
64225+ if (filp == NULL)
64226+ return 0;
64227+
64228+ /* the following is to apply the correct subject
64229+ on binaries running when the RBAC system
64230+ is enabled, when the binaries have been
64231+ replaced or deleted since their execution
64232+ -----
64233+ when the RBAC system starts, the inode/dev
64234+ from exec_file will be one the RBAC system
64235+ is unaware of. It only knows the inode/dev
64236+ of the present file on disk, or the absence
64237+ of it.
64238+ */
64239+ preempt_disable();
64240+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
64241+
64242+ nmatch = lookup_name_entry(tmpname);
64243+ preempt_enable();
64244+ tmpsubj = NULL;
64245+ if (nmatch) {
64246+ if (nmatch->deleted)
64247+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
64248+ else
64249+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
64250+ if (tmpsubj != NULL)
64251+ task->acl = tmpsubj;
64252+ }
64253+ if (tmpsubj == NULL)
64254+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
64255+ task->role);
64256+ if (task->acl) {
64257+ task->is_writable = 0;
64258+ /* ignore additional mmap checks for processes that are writable
64259+ by the default ACL */
64260+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
64261+ if (unlikely(obj->mode & GR_WRITE))
64262+ task->is_writable = 1;
64263+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
64264+ if (unlikely(obj->mode & GR_WRITE))
64265+ task->is_writable = 1;
64266+
64267+ gr_set_proc_res(task);
64268+
64269+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64270+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64271+#endif
64272+ } else {
64273+ return 1;
64274+ }
64275+
64276+ return 0;
64277+}
64278+
64279+int
64280+gr_set_acls(const int type)
64281+{
64282+ struct task_struct *task, *task2;
64283+ struct acl_role_label *role = current->role;
64284+ __u16 acl_role_id = current->acl_role_id;
64285+ const struct cred *cred;
64286+ int ret;
64287+
64288+ rcu_read_lock();
64289+ read_lock(&tasklist_lock);
64290+ read_lock(&grsec_exec_file_lock);
64291+ do_each_thread(task2, task) {
64292+ /* check to see if we're called from the exit handler,
64293+ if so, only replace ACLs that have inherited the admin
64294+ ACL */
64295+
64296+ if (type && (task->role != role ||
64297+ task->acl_role_id != acl_role_id))
64298+ continue;
64299+
64300+ task->acl_role_id = 0;
64301+ task->acl_sp_role = 0;
64302+
64303+ if (task->exec_file) {
64304+ cred = __task_cred(task);
64305+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
64306+ ret = gr_apply_subject_to_task(task);
64307+ if (ret) {
64308+ read_unlock(&grsec_exec_file_lock);
64309+ read_unlock(&tasklist_lock);
64310+ rcu_read_unlock();
64311+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
64312+ return ret;
64313+ }
64314+ } else {
64315+ // it's a kernel process
64316+ task->role = kernel_role;
64317+ task->acl = kernel_role->root_label;
64318+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
64319+ task->acl->mode &= ~GR_PROCFIND;
64320+#endif
64321+ }
64322+ } while_each_thread(task2, task);
64323+ read_unlock(&grsec_exec_file_lock);
64324+ read_unlock(&tasklist_lock);
64325+ rcu_read_unlock();
64326+
64327+ return 0;
64328+}
64329+
64330+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
64331+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
64332+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
64333+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
64334+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
64335+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
64336+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
64337+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
64338+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
64339+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
64340+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
64341+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
64342+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
64343+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
64344+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
64345+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
64346+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
64347+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
64348+};
64349+
64350+void
64351+gr_learn_resource(const struct task_struct *task,
64352+ const int res, const unsigned long wanted, const int gt)
64353+{
64354+ struct acl_subject_label *acl;
64355+ const struct cred *cred;
64356+
64357+ if (unlikely((gr_status & GR_READY) &&
64358+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
64359+ goto skip_reslog;
64360+
64361+ gr_log_resource(task, res, wanted, gt);
64362+skip_reslog:
64363+
64364+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
64365+ return;
64366+
64367+ acl = task->acl;
64368+
64369+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
64370+ !(acl->resmask & (1U << (unsigned short) res))))
64371+ return;
64372+
64373+ if (wanted >= acl->res[res].rlim_cur) {
64374+ unsigned long res_add;
64375+
64376+ res_add = wanted + res_learn_bumps[res];
64377+
64378+ acl->res[res].rlim_cur = res_add;
64379+
64380+ if (wanted > acl->res[res].rlim_max)
64381+ acl->res[res].rlim_max = res_add;
64382+
64383+ /* only log the subject filename, since resource logging is supported for
64384+ single-subject learning only */
64385+ rcu_read_lock();
64386+ cred = __task_cred(task);
64387+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
64388+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
64389+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
64390+ "", (unsigned long) res, &task->signal->saved_ip);
64391+ rcu_read_unlock();
64392+ }
64393+
64394+ return;
64395+}
64396+EXPORT_SYMBOL(gr_learn_resource);
64397+#endif
64398+
64399+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
64400+void
64401+pax_set_initial_flags(struct linux_binprm *bprm)
64402+{
64403+ struct task_struct *task = current;
64404+ struct acl_subject_label *proc;
64405+ unsigned long flags;
64406+
64407+ if (unlikely(!(gr_status & GR_READY)))
64408+ return;
64409+
64410+ flags = pax_get_flags(task);
64411+
64412+ proc = task->acl;
64413+
64414+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
64415+ flags &= ~MF_PAX_PAGEEXEC;
64416+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
64417+ flags &= ~MF_PAX_SEGMEXEC;
64418+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
64419+ flags &= ~MF_PAX_RANDMMAP;
64420+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
64421+ flags &= ~MF_PAX_EMUTRAMP;
64422+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
64423+ flags &= ~MF_PAX_MPROTECT;
64424+
64425+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
64426+ flags |= MF_PAX_PAGEEXEC;
64427+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
64428+ flags |= MF_PAX_SEGMEXEC;
64429+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
64430+ flags |= MF_PAX_RANDMMAP;
64431+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
64432+ flags |= MF_PAX_EMUTRAMP;
64433+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
64434+ flags |= MF_PAX_MPROTECT;
64435+
64436+ pax_set_flags(task, flags);
64437+
64438+ return;
64439+}
64440+#endif
64441+
64442+int
64443+gr_handle_proc_ptrace(struct task_struct *task)
64444+{
64445+ struct file *filp;
64446+ struct task_struct *tmp = task;
64447+ struct task_struct *curtemp = current;
64448+ __u32 retmode;
64449+
64450+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
64451+ if (unlikely(!(gr_status & GR_READY)))
64452+ return 0;
64453+#endif
64454+
64455+ read_lock(&tasklist_lock);
64456+ read_lock(&grsec_exec_file_lock);
64457+ filp = task->exec_file;
64458+
64459+ while (task_pid_nr(tmp) > 0) {
64460+ if (tmp == curtemp)
64461+ break;
64462+ tmp = tmp->real_parent;
64463+ }
64464+
64465+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
64466+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
64467+ read_unlock(&grsec_exec_file_lock);
64468+ read_unlock(&tasklist_lock);
64469+ return 1;
64470+ }
64471+
64472+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64473+ if (!(gr_status & GR_READY)) {
64474+ read_unlock(&grsec_exec_file_lock);
64475+ read_unlock(&tasklist_lock);
64476+ return 0;
64477+ }
64478+#endif
64479+
64480+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
64481+ read_unlock(&grsec_exec_file_lock);
64482+ read_unlock(&tasklist_lock);
64483+
64484+ if (retmode & GR_NOPTRACE)
64485+ return 1;
64486+
64487+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
64488+ && (current->acl != task->acl || (current->acl != current->role->root_label
64489+ && task_pid_nr(current) != task_pid_nr(task))))
64490+ return 1;
64491+
64492+ return 0;
64493+}
64494+
64495+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
64496+{
64497+ if (unlikely(!(gr_status & GR_READY)))
64498+ return;
64499+
64500+ if (!(current->role->roletype & GR_ROLE_GOD))
64501+ return;
64502+
64503+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
64504+ p->role->rolename, gr_task_roletype_to_char(p),
64505+ p->acl->filename);
64506+}
64507+
64508+int
64509+gr_handle_ptrace(struct task_struct *task, const long request)
64510+{
64511+ struct task_struct *tmp = task;
64512+ struct task_struct *curtemp = current;
64513+ __u32 retmode;
64514+
64515+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
64516+ if (unlikely(!(gr_status & GR_READY)))
64517+ return 0;
64518+#endif
64519+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
64520+ read_lock(&tasklist_lock);
64521+ while (task_pid_nr(tmp) > 0) {
64522+ if (tmp == curtemp)
64523+ break;
64524+ tmp = tmp->real_parent;
64525+ }
64526+
64527+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
64528+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
64529+ read_unlock(&tasklist_lock);
64530+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
64531+ return 1;
64532+ }
64533+ read_unlock(&tasklist_lock);
64534+ }
64535+
64536+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64537+ if (!(gr_status & GR_READY))
64538+ return 0;
64539+#endif
64540+
64541+ read_lock(&grsec_exec_file_lock);
64542+ if (unlikely(!task->exec_file)) {
64543+ read_unlock(&grsec_exec_file_lock);
64544+ return 0;
64545+ }
64546+
64547+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
64548+ read_unlock(&grsec_exec_file_lock);
64549+
64550+ if (retmode & GR_NOPTRACE) {
64551+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
64552+ return 1;
64553+ }
64554+
64555+ if (retmode & GR_PTRACERD) {
64556+ switch (request) {
64557+ case PTRACE_SEIZE:
64558+ case PTRACE_POKETEXT:
64559+ case PTRACE_POKEDATA:
64560+ case PTRACE_POKEUSR:
64561+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
64562+ case PTRACE_SETREGS:
64563+ case PTRACE_SETFPREGS:
64564+#endif
64565+#ifdef CONFIG_X86
64566+ case PTRACE_SETFPXREGS:
64567+#endif
64568+#ifdef CONFIG_ALTIVEC
64569+ case PTRACE_SETVRREGS:
64570+#endif
64571+ return 1;
64572+ default:
64573+ return 0;
64574+ }
64575+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
64576+ !(current->role->roletype & GR_ROLE_GOD) &&
64577+ (current->acl != task->acl)) {
64578+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
64579+ return 1;
64580+ }
64581+
64582+ return 0;
64583+}
64584+
64585+static int is_writable_mmap(const struct file *filp)
64586+{
64587+ struct task_struct *task = current;
64588+ struct acl_object_label *obj, *obj2;
64589+
64590+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
64591+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
64592+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
64593+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
64594+ task->role->root_label);
64595+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
64596+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
64597+ return 1;
64598+ }
64599+ }
64600+ return 0;
64601+}
64602+
64603+int
64604+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
64605+{
64606+ __u32 mode;
64607+
64608+ if (unlikely(!file || !(prot & PROT_EXEC)))
64609+ return 1;
64610+
64611+ if (is_writable_mmap(file))
64612+ return 0;
64613+
64614+ mode =
64615+ gr_search_file(file->f_path.dentry,
64616+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
64617+ file->f_path.mnt);
64618+
64619+ if (!gr_tpe_allow(file))
64620+ return 0;
64621+
64622+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
64623+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
64624+ return 0;
64625+ } else if (unlikely(!(mode & GR_EXEC))) {
64626+ return 0;
64627+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
64628+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
64629+ return 1;
64630+ }
64631+
64632+ return 1;
64633+}
64634+
64635+int
64636+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
64637+{
64638+ __u32 mode;
64639+
64640+ if (unlikely(!file || !(prot & PROT_EXEC)))
64641+ return 1;
64642+
64643+ if (is_writable_mmap(file))
64644+ return 0;
64645+
64646+ mode =
64647+ gr_search_file(file->f_path.dentry,
64648+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
64649+ file->f_path.mnt);
64650+
64651+ if (!gr_tpe_allow(file))
64652+ return 0;
64653+
64654+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
64655+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
64656+ return 0;
64657+ } else if (unlikely(!(mode & GR_EXEC))) {
64658+ return 0;
64659+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
64660+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
64661+ return 1;
64662+ }
64663+
64664+ return 1;
64665+}
64666+
64667+void
64668+gr_acl_handle_psacct(struct task_struct *task, const long code)
64669+{
64670+ unsigned long runtime;
64671+ unsigned long cputime;
64672+ unsigned int wday, cday;
64673+ __u8 whr, chr;
64674+ __u8 wmin, cmin;
64675+ __u8 wsec, csec;
64676+ struct timespec timeval;
64677+
64678+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
64679+ !(task->acl->mode & GR_PROCACCT)))
64680+ return;
64681+
64682+ do_posix_clock_monotonic_gettime(&timeval);
64683+ runtime = timeval.tv_sec - task->start_time.tv_sec;
64684+ wday = runtime / (3600 * 24);
64685+ runtime -= wday * (3600 * 24);
64686+ whr = runtime / 3600;
64687+ runtime -= whr * 3600;
64688+ wmin = runtime / 60;
64689+ runtime -= wmin * 60;
64690+ wsec = runtime;
64691+
64692+ cputime = (task->utime + task->stime) / HZ;
64693+ cday = cputime / (3600 * 24);
64694+ cputime -= cday * (3600 * 24);
64695+ chr = cputime / 3600;
64696+ cputime -= chr * 3600;
64697+ cmin = cputime / 60;
64698+ cputime -= cmin * 60;
64699+ csec = cputime;
64700+
64701+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
64702+
64703+ return;
64704+}
64705+
64706+void gr_set_kernel_label(struct task_struct *task)
64707+{
64708+ if (gr_status & GR_READY) {
64709+ task->role = kernel_role;
64710+ task->acl = kernel_role->root_label;
64711+ }
64712+ return;
64713+}
64714+
64715+#ifdef CONFIG_TASKSTATS
64716+int gr_is_taskstats_denied(int pid)
64717+{
64718+ struct task_struct *task;
64719+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64720+ const struct cred *cred;
64721+#endif
64722+ int ret = 0;
64723+
64724+ /* restrict taskstats viewing to un-chrooted root users
64725+ who have the 'view' subject flag if the RBAC system is enabled
64726+ */
64727+
64728+ rcu_read_lock();
64729+ read_lock(&tasklist_lock);
64730+ task = find_task_by_vpid(pid);
64731+ if (task) {
64732+#ifdef CONFIG_GRKERNSEC_CHROOT
64733+ if (proc_is_chrooted(task))
64734+ ret = -EACCES;
64735+#endif
64736+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64737+ cred = __task_cred(task);
64738+#ifdef CONFIG_GRKERNSEC_PROC_USER
64739+ if (gr_is_global_nonroot(cred->uid))
64740+ ret = -EACCES;
64741+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64742+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
64743+ ret = -EACCES;
64744+#endif
64745+#endif
64746+ if (gr_status & GR_READY) {
64747+ if (!(task->acl->mode & GR_VIEW))
64748+ ret = -EACCES;
64749+ }
64750+ } else
64751+ ret = -ENOENT;
64752+
64753+ read_unlock(&tasklist_lock);
64754+ rcu_read_unlock();
64755+
64756+ return ret;
64757+}
64758+#endif
64759+
64760+/* AUXV entries are filled via a descendant of search_binary_handler
64761+ after we've already applied the subject for the target
64762+*/
64763+int gr_acl_enable_at_secure(void)
64764+{
64765+ if (unlikely(!(gr_status & GR_READY)))
64766+ return 0;
64767+
64768+ if (current->acl->mode & GR_ATSECURE)
64769+ return 1;
64770+
64771+ return 0;
64772+}
64773+
64774+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
64775+{
64776+ struct task_struct *task = current;
64777+ struct dentry *dentry = file->f_path.dentry;
64778+ struct vfsmount *mnt = file->f_path.mnt;
64779+ struct acl_object_label *obj, *tmp;
64780+ struct acl_subject_label *subj;
64781+ unsigned int bufsize;
64782+ int is_not_root;
64783+ char *path;
64784+ dev_t dev = __get_dev(dentry);
64785+
64786+ if (unlikely(!(gr_status & GR_READY)))
64787+ return 1;
64788+
64789+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
64790+ return 1;
64791+
64792+ /* ignore Eric Biederman */
64793+ if (IS_PRIVATE(dentry->d_inode))
64794+ return 1;
64795+
64796+ subj = task->acl;
64797+ read_lock(&gr_inode_lock);
64798+ do {
64799+ obj = lookup_acl_obj_label(ino, dev, subj);
64800+ if (obj != NULL) {
64801+ read_unlock(&gr_inode_lock);
64802+ return (obj->mode & GR_FIND) ? 1 : 0;
64803+ }
64804+ } while ((subj = subj->parent_subject));
64805+ read_unlock(&gr_inode_lock);
64806+
64807+ /* this is purely an optimization since we're looking for an object
64808+ for the directory we're doing a readdir on
64809+ if it's possible for any globbed object to match the entry we're
64810+ filling into the directory, then the object we find here will be
64811+ an anchor point with attached globbed objects
64812+ */
64813+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
64814+ if (obj->globbed == NULL)
64815+ return (obj->mode & GR_FIND) ? 1 : 0;
64816+
64817+ is_not_root = ((obj->filename[0] == '/') &&
64818+ (obj->filename[1] == '\0')) ? 0 : 1;
64819+ bufsize = PAGE_SIZE - namelen - is_not_root;
64820+
64821+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
64822+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
64823+ return 1;
64824+
64825+ preempt_disable();
64826+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
64827+ bufsize);
64828+
64829+ bufsize = strlen(path);
64830+
64831+ /* if base is "/", don't append an additional slash */
64832+ if (is_not_root)
64833+ *(path + bufsize) = '/';
64834+ memcpy(path + bufsize + is_not_root, name, namelen);
64835+ *(path + bufsize + namelen + is_not_root) = '\0';
64836+
64837+ tmp = obj->globbed;
64838+ while (tmp) {
64839+ if (!glob_match(tmp->filename, path)) {
64840+ preempt_enable();
64841+ return (tmp->mode & GR_FIND) ? 1 : 0;
64842+ }
64843+ tmp = tmp->next;
64844+ }
64845+ preempt_enable();
64846+ return (obj->mode & GR_FIND) ? 1 : 0;
64847+}
64848+
64849+void gr_put_exec_file(struct task_struct *task)
64850+{
64851+ struct file *filp;
64852+
64853+ write_lock(&grsec_exec_file_lock);
64854+ filp = task->exec_file;
64855+ task->exec_file = NULL;
64856+ write_unlock(&grsec_exec_file_lock);
64857+
64858+ if (filp)
64859+ fput(filp);
64860+
64861+ return;
64862+}
64863+
64864+
64865+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
64866+EXPORT_SYMBOL(gr_acl_is_enabled);
64867+#endif
64868+EXPORT_SYMBOL(gr_set_kernel_label);
64869+#ifdef CONFIG_SECURITY
64870+EXPORT_SYMBOL(gr_check_user_change);
64871+EXPORT_SYMBOL(gr_check_group_change);
64872+#endif
64873+
64874diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
64875new file mode 100644
64876index 0000000..34fefda
64877--- /dev/null
64878+++ b/grsecurity/gracl_alloc.c
64879@@ -0,0 +1,105 @@
64880+#include <linux/kernel.h>
64881+#include <linux/mm.h>
64882+#include <linux/slab.h>
64883+#include <linux/vmalloc.h>
64884+#include <linux/gracl.h>
64885+#include <linux/grsecurity.h>
64886+
64887+static unsigned long alloc_stack_next = 1;
64888+static unsigned long alloc_stack_size = 1;
64889+static void **alloc_stack;
64890+
64891+static __inline__ int
64892+alloc_pop(void)
64893+{
64894+ if (alloc_stack_next == 1)
64895+ return 0;
64896+
64897+ kfree(alloc_stack[alloc_stack_next - 2]);
64898+
64899+ alloc_stack_next--;
64900+
64901+ return 1;
64902+}
64903+
64904+static __inline__ int
64905+alloc_push(void *buf)
64906+{
64907+ if (alloc_stack_next >= alloc_stack_size)
64908+ return 1;
64909+
64910+ alloc_stack[alloc_stack_next - 1] = buf;
64911+
64912+ alloc_stack_next++;
64913+
64914+ return 0;
64915+}
64916+
64917+void *
64918+acl_alloc(unsigned long len)
64919+{
64920+ void *ret = NULL;
64921+
64922+ if (!len || len > PAGE_SIZE)
64923+ goto out;
64924+
64925+ ret = kmalloc(len, GFP_KERNEL);
64926+
64927+ if (ret) {
64928+ if (alloc_push(ret)) {
64929+ kfree(ret);
64930+ ret = NULL;
64931+ }
64932+ }
64933+
64934+out:
64935+ return ret;
64936+}
64937+
64938+void *
64939+acl_alloc_num(unsigned long num, unsigned long len)
64940+{
64941+ if (!len || (num > (PAGE_SIZE / len)))
64942+ return NULL;
64943+
64944+ return acl_alloc(num * len);
64945+}
64946+
64947+void
64948+acl_free_all(void)
64949+{
64950+ if (gr_acl_is_enabled() || !alloc_stack)
64951+ return;
64952+
64953+ while (alloc_pop()) ;
64954+
64955+ if (alloc_stack) {
64956+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
64957+ kfree(alloc_stack);
64958+ else
64959+ vfree(alloc_stack);
64960+ }
64961+
64962+ alloc_stack = NULL;
64963+ alloc_stack_size = 1;
64964+ alloc_stack_next = 1;
64965+
64966+ return;
64967+}
64968+
64969+int
64970+acl_alloc_stack_init(unsigned long size)
64971+{
64972+ if ((size * sizeof (void *)) <= PAGE_SIZE)
64973+ alloc_stack =
64974+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
64975+ else
64976+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
64977+
64978+ alloc_stack_size = size;
64979+
64980+ if (!alloc_stack)
64981+ return 0;
64982+ else
64983+ return 1;
64984+}
64985diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
64986new file mode 100644
64987index 0000000..bdd51ea
64988--- /dev/null
64989+++ b/grsecurity/gracl_cap.c
64990@@ -0,0 +1,110 @@
64991+#include <linux/kernel.h>
64992+#include <linux/module.h>
64993+#include <linux/sched.h>
64994+#include <linux/gracl.h>
64995+#include <linux/grsecurity.h>
64996+#include <linux/grinternal.h>
64997+
64998+extern const char *captab_log[];
64999+extern int captab_log_entries;
65000+
65001+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
65002+{
65003+ struct acl_subject_label *curracl;
65004+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
65005+ kernel_cap_t cap_audit = __cap_empty_set;
65006+
65007+ if (!gr_acl_is_enabled())
65008+ return 1;
65009+
65010+ curracl = task->acl;
65011+
65012+ cap_drop = curracl->cap_lower;
65013+ cap_mask = curracl->cap_mask;
65014+ cap_audit = curracl->cap_invert_audit;
65015+
65016+ while ((curracl = curracl->parent_subject)) {
65017+ /* if the cap isn't specified in the current computed mask but is specified in the
65018+ current level subject, and is lowered in the current level subject, then add
65019+ it to the set of dropped capabilities
65020+ otherwise, add the current level subject's mask to the current computed mask
65021+ */
65022+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
65023+ cap_raise(cap_mask, cap);
65024+ if (cap_raised(curracl->cap_lower, cap))
65025+ cap_raise(cap_drop, cap);
65026+ if (cap_raised(curracl->cap_invert_audit, cap))
65027+ cap_raise(cap_audit, cap);
65028+ }
65029+ }
65030+
65031+ if (!cap_raised(cap_drop, cap)) {
65032+ if (cap_raised(cap_audit, cap))
65033+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
65034+ return 1;
65035+ }
65036+
65037+ curracl = task->acl;
65038+
65039+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
65040+ && cap_raised(cred->cap_effective, cap)) {
65041+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
65042+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
65043+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
65044+ gr_to_filename(task->exec_file->f_path.dentry,
65045+ task->exec_file->f_path.mnt) : curracl->filename,
65046+ curracl->filename, 0UL,
65047+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
65048+ return 1;
65049+ }
65050+
65051+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
65052+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
65053+
65054+ return 0;
65055+}
65056+
65057+int
65058+gr_acl_is_capable(const int cap)
65059+{
65060+ return gr_task_acl_is_capable(current, current_cred(), cap);
65061+}
65062+
65063+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
65064+{
65065+ struct acl_subject_label *curracl;
65066+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
65067+
65068+ if (!gr_acl_is_enabled())
65069+ return 1;
65070+
65071+ curracl = task->acl;
65072+
65073+ cap_drop = curracl->cap_lower;
65074+ cap_mask = curracl->cap_mask;
65075+
65076+ while ((curracl = curracl->parent_subject)) {
65077+ /* if the cap isn't specified in the current computed mask but is specified in the
65078+ current level subject, and is lowered in the current level subject, then add
65079+ it to the set of dropped capabilities
65080+ otherwise, add the current level subject's mask to the current computed mask
65081+ */
65082+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
65083+ cap_raise(cap_mask, cap);
65084+ if (cap_raised(curracl->cap_lower, cap))
65085+ cap_raise(cap_drop, cap);
65086+ }
65087+ }
65088+
65089+ if (!cap_raised(cap_drop, cap))
65090+ return 1;
65091+
65092+ return 0;
65093+}
65094+
65095+int
65096+gr_acl_is_capable_nolog(const int cap)
65097+{
65098+ return gr_task_acl_is_capable_nolog(current, cap);
65099+}
65100+
65101diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
65102new file mode 100644
65103index 0000000..a43dd06
65104--- /dev/null
65105+++ b/grsecurity/gracl_compat.c
65106@@ -0,0 +1,269 @@
65107+#include <linux/kernel.h>
65108+#include <linux/gracl.h>
65109+#include <linux/compat.h>
65110+#include <linux/gracl_compat.h>
65111+
65112+#include <asm/uaccess.h>
65113+
65114+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
65115+{
65116+ struct gr_arg_wrapper_compat uwrapcompat;
65117+
65118+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
65119+ return -EFAULT;
65120+
65121+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
65122+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
65123+ return -EINVAL;
65124+
65125+ uwrap->arg = compat_ptr(uwrapcompat.arg);
65126+ uwrap->version = uwrapcompat.version;
65127+ uwrap->size = sizeof(struct gr_arg);
65128+
65129+ return 0;
65130+}
65131+
65132+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
65133+{
65134+ struct gr_arg_compat argcompat;
65135+
65136+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
65137+ return -EFAULT;
65138+
65139+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
65140+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
65141+ arg->role_db.num_roles = argcompat.role_db.num_roles;
65142+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
65143+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
65144+ arg->role_db.num_objects = argcompat.role_db.num_objects;
65145+
65146+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
65147+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
65148+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
65149+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
65150+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
65151+ arg->segv_device = argcompat.segv_device;
65152+ arg->segv_inode = argcompat.segv_inode;
65153+ arg->segv_uid = argcompat.segv_uid;
65154+ arg->num_sprole_pws = argcompat.num_sprole_pws;
65155+ arg->mode = argcompat.mode;
65156+
65157+ return 0;
65158+}
65159+
65160+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
65161+{
65162+ struct acl_object_label_compat objcompat;
65163+
65164+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
65165+ return -EFAULT;
65166+
65167+ obj->filename = compat_ptr(objcompat.filename);
65168+ obj->inode = objcompat.inode;
65169+ obj->device = objcompat.device;
65170+ obj->mode = objcompat.mode;
65171+
65172+ obj->nested = compat_ptr(objcompat.nested);
65173+ obj->globbed = compat_ptr(objcompat.globbed);
65174+
65175+ obj->prev = compat_ptr(objcompat.prev);
65176+ obj->next = compat_ptr(objcompat.next);
65177+
65178+ return 0;
65179+}
65180+
65181+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
65182+{
65183+ unsigned int i;
65184+ struct acl_subject_label_compat subjcompat;
65185+
65186+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
65187+ return -EFAULT;
65188+
65189+ subj->filename = compat_ptr(subjcompat.filename);
65190+ subj->inode = subjcompat.inode;
65191+ subj->device = subjcompat.device;
65192+ subj->mode = subjcompat.mode;
65193+ subj->cap_mask = subjcompat.cap_mask;
65194+ subj->cap_lower = subjcompat.cap_lower;
65195+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
65196+
65197+ for (i = 0; i < GR_NLIMITS; i++) {
65198+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
65199+ subj->res[i].rlim_cur = RLIM_INFINITY;
65200+ else
65201+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
65202+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
65203+ subj->res[i].rlim_max = RLIM_INFINITY;
65204+ else
65205+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
65206+ }
65207+ subj->resmask = subjcompat.resmask;
65208+
65209+ subj->user_trans_type = subjcompat.user_trans_type;
65210+ subj->group_trans_type = subjcompat.group_trans_type;
65211+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
65212+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
65213+ subj->user_trans_num = subjcompat.user_trans_num;
65214+ subj->group_trans_num = subjcompat.group_trans_num;
65215+
65216+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
65217+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
65218+ subj->ip_type = subjcompat.ip_type;
65219+ subj->ips = compat_ptr(subjcompat.ips);
65220+ subj->ip_num = subjcompat.ip_num;
65221+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
65222+
65223+ subj->crashes = subjcompat.crashes;
65224+ subj->expires = subjcompat.expires;
65225+
65226+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
65227+ subj->hash = compat_ptr(subjcompat.hash);
65228+ subj->prev = compat_ptr(subjcompat.prev);
65229+ subj->next = compat_ptr(subjcompat.next);
65230+
65231+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
65232+ subj->obj_hash_size = subjcompat.obj_hash_size;
65233+ subj->pax_flags = subjcompat.pax_flags;
65234+
65235+ return 0;
65236+}
65237+
65238+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
65239+{
65240+ struct acl_role_label_compat rolecompat;
65241+
65242+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
65243+ return -EFAULT;
65244+
65245+ role->rolename = compat_ptr(rolecompat.rolename);
65246+ role->uidgid = rolecompat.uidgid;
65247+ role->roletype = rolecompat.roletype;
65248+
65249+ role->auth_attempts = rolecompat.auth_attempts;
65250+ role->expires = rolecompat.expires;
65251+
65252+ role->root_label = compat_ptr(rolecompat.root_label);
65253+ role->hash = compat_ptr(rolecompat.hash);
65254+
65255+ role->prev = compat_ptr(rolecompat.prev);
65256+ role->next = compat_ptr(rolecompat.next);
65257+
65258+ role->transitions = compat_ptr(rolecompat.transitions);
65259+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
65260+ role->domain_children = compat_ptr(rolecompat.domain_children);
65261+ role->domain_child_num = rolecompat.domain_child_num;
65262+
65263+ role->umask = rolecompat.umask;
65264+
65265+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
65266+ role->subj_hash_size = rolecompat.subj_hash_size;
65267+
65268+ return 0;
65269+}
65270+
65271+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
65272+{
65273+ struct role_allowed_ip_compat roleip_compat;
65274+
65275+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
65276+ return -EFAULT;
65277+
65278+ roleip->addr = roleip_compat.addr;
65279+ roleip->netmask = roleip_compat.netmask;
65280+
65281+ roleip->prev = compat_ptr(roleip_compat.prev);
65282+ roleip->next = compat_ptr(roleip_compat.next);
65283+
65284+ return 0;
65285+}
65286+
65287+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
65288+{
65289+ struct role_transition_compat trans_compat;
65290+
65291+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
65292+ return -EFAULT;
65293+
65294+ trans->rolename = compat_ptr(trans_compat.rolename);
65295+
65296+ trans->prev = compat_ptr(trans_compat.prev);
65297+ trans->next = compat_ptr(trans_compat.next);
65298+
65299+ return 0;
65300+
65301+}
65302+
65303+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
65304+{
65305+ struct gr_hash_struct_compat hash_compat;
65306+
65307+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
65308+ return -EFAULT;
65309+
65310+ hash->table = compat_ptr(hash_compat.table);
65311+ hash->nametable = compat_ptr(hash_compat.nametable);
65312+ hash->first = compat_ptr(hash_compat.first);
65313+
65314+ hash->table_size = hash_compat.table_size;
65315+ hash->used_size = hash_compat.used_size;
65316+
65317+ hash->type = hash_compat.type;
65318+
65319+ return 0;
65320+}
65321+
65322+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
65323+{
65324+ compat_uptr_t ptrcompat;
65325+
65326+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
65327+ return -EFAULT;
65328+
65329+ *(void **)ptr = compat_ptr(ptrcompat);
65330+
65331+ return 0;
65332+}
65333+
65334+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
65335+{
65336+ struct acl_ip_label_compat ip_compat;
65337+
65338+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
65339+ return -EFAULT;
65340+
65341+ ip->iface = compat_ptr(ip_compat.iface);
65342+ ip->addr = ip_compat.addr;
65343+ ip->netmask = ip_compat.netmask;
65344+ ip->low = ip_compat.low;
65345+ ip->high = ip_compat.high;
65346+ ip->mode = ip_compat.mode;
65347+ ip->type = ip_compat.type;
65348+
65349+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
65350+
65351+ ip->prev = compat_ptr(ip_compat.prev);
65352+ ip->next = compat_ptr(ip_compat.next);
65353+
65354+ return 0;
65355+}
65356+
65357+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
65358+{
65359+ struct sprole_pw_compat pw_compat;
65360+
65361+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
65362+ return -EFAULT;
65363+
65364+ pw->rolename = compat_ptr(pw_compat.rolename);
65365+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
65366+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
65367+
65368+ return 0;
65369+}
65370+
65371+size_t get_gr_arg_wrapper_size_compat(void)
65372+{
65373+ return sizeof(struct gr_arg_wrapper_compat);
65374+}
65375+
65376diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
65377new file mode 100644
65378index 0000000..a340c17
65379--- /dev/null
65380+++ b/grsecurity/gracl_fs.c
65381@@ -0,0 +1,431 @@
65382+#include <linux/kernel.h>
65383+#include <linux/sched.h>
65384+#include <linux/types.h>
65385+#include <linux/fs.h>
65386+#include <linux/file.h>
65387+#include <linux/stat.h>
65388+#include <linux/grsecurity.h>
65389+#include <linux/grinternal.h>
65390+#include <linux/gracl.h>
65391+
65392+umode_t
65393+gr_acl_umask(void)
65394+{
65395+ if (unlikely(!gr_acl_is_enabled()))
65396+ return 0;
65397+
65398+ return current->role->umask;
65399+}
65400+
65401+__u32
65402+gr_acl_handle_hidden_file(const struct dentry * dentry,
65403+ const struct vfsmount * mnt)
65404+{
65405+ __u32 mode;
65406+
65407+ if (unlikely(!dentry->d_inode))
65408+ return GR_FIND;
65409+
65410+ mode =
65411+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
65412+
65413+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
65414+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
65415+ return mode;
65416+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
65417+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
65418+ return 0;
65419+ } else if (unlikely(!(mode & GR_FIND)))
65420+ return 0;
65421+
65422+ return GR_FIND;
65423+}
65424+
65425+__u32
65426+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
65427+ int acc_mode)
65428+{
65429+ __u32 reqmode = GR_FIND;
65430+ __u32 mode;
65431+
65432+ if (unlikely(!dentry->d_inode))
65433+ return reqmode;
65434+
65435+ if (acc_mode & MAY_APPEND)
65436+ reqmode |= GR_APPEND;
65437+ else if (acc_mode & MAY_WRITE)
65438+ reqmode |= GR_WRITE;
65439+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
65440+ reqmode |= GR_READ;
65441+
65442+ mode =
65443+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
65444+ mnt);
65445+
65446+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
65447+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
65448+ reqmode & GR_READ ? " reading" : "",
65449+ reqmode & GR_WRITE ? " writing" : reqmode &
65450+ GR_APPEND ? " appending" : "");
65451+ return reqmode;
65452+ } else
65453+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
65454+ {
65455+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
65456+ reqmode & GR_READ ? " reading" : "",
65457+ reqmode & GR_WRITE ? " writing" : reqmode &
65458+ GR_APPEND ? " appending" : "");
65459+ return 0;
65460+ } else if (unlikely((mode & reqmode) != reqmode))
65461+ return 0;
65462+
65463+ return reqmode;
65464+}
65465+
65466+__u32
65467+gr_acl_handle_creat(const struct dentry * dentry,
65468+ const struct dentry * p_dentry,
65469+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
65470+ const int imode)
65471+{
65472+ __u32 reqmode = GR_WRITE | GR_CREATE;
65473+ __u32 mode;
65474+
65475+ if (acc_mode & MAY_APPEND)
65476+ reqmode |= GR_APPEND;
65477+ // if a directory was required or the directory already exists, then
65478+ // don't count this open as a read
65479+ if ((acc_mode & MAY_READ) &&
65480+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
65481+ reqmode |= GR_READ;
65482+ if ((open_flags & O_CREAT) &&
65483+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
65484+ reqmode |= GR_SETID;
65485+
65486+ mode =
65487+ gr_check_create(dentry, p_dentry, p_mnt,
65488+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
65489+
65490+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
65491+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
65492+ reqmode & GR_READ ? " reading" : "",
65493+ reqmode & GR_WRITE ? " writing" : reqmode &
65494+ GR_APPEND ? " appending" : "");
65495+ return reqmode;
65496+ } else
65497+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
65498+ {
65499+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
65500+ reqmode & GR_READ ? " reading" : "",
65501+ reqmode & GR_WRITE ? " writing" : reqmode &
65502+ GR_APPEND ? " appending" : "");
65503+ return 0;
65504+ } else if (unlikely((mode & reqmode) != reqmode))
65505+ return 0;
65506+
65507+ return reqmode;
65508+}
65509+
65510+__u32
65511+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
65512+ const int fmode)
65513+{
65514+ __u32 mode, reqmode = GR_FIND;
65515+
65516+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
65517+ reqmode |= GR_EXEC;
65518+ if (fmode & S_IWOTH)
65519+ reqmode |= GR_WRITE;
65520+ if (fmode & S_IROTH)
65521+ reqmode |= GR_READ;
65522+
65523+ mode =
65524+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
65525+ mnt);
65526+
65527+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
65528+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
65529+ reqmode & GR_READ ? " reading" : "",
65530+ reqmode & GR_WRITE ? " writing" : "",
65531+ reqmode & GR_EXEC ? " executing" : "");
65532+ return reqmode;
65533+ } else
65534+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
65535+ {
65536+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
65537+ reqmode & GR_READ ? " reading" : "",
65538+ reqmode & GR_WRITE ? " writing" : "",
65539+ reqmode & GR_EXEC ? " executing" : "");
65540+ return 0;
65541+ } else if (unlikely((mode & reqmode) != reqmode))
65542+ return 0;
65543+
65544+ return reqmode;
65545+}
65546+
65547+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
65548+{
65549+ __u32 mode;
65550+
65551+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
65552+
65553+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
65554+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
65555+ return mode;
65556+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
65557+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
65558+ return 0;
65559+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
65560+ return 0;
65561+
65562+ return (reqmode);
65563+}
65564+
65565+__u32
65566+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
65567+{
65568+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
65569+}
65570+
65571+__u32
65572+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
65573+{
65574+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
65575+}
65576+
65577+__u32
65578+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
65579+{
65580+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
65581+}
65582+
65583+__u32
65584+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
65585+{
65586+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
65587+}
65588+
65589+__u32
65590+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
65591+ umode_t *modeptr)
65592+{
65593+ umode_t mode;
65594+
65595+ *modeptr &= ~gr_acl_umask();
65596+ mode = *modeptr;
65597+
65598+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
65599+ return 1;
65600+
65601+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
65602+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
65603+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
65604+ GR_CHMOD_ACL_MSG);
65605+ } else {
65606+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
65607+ }
65608+}
65609+
65610+__u32
65611+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
65612+{
65613+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
65614+}
65615+
65616+__u32
65617+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
65618+{
65619+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
65620+}
65621+
65622+__u32
65623+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
65624+{
65625+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
65626+}
65627+
65628+__u32
65629+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
65630+{
65631+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
65632+ GR_UNIXCONNECT_ACL_MSG);
65633+}
65634+
65635+/* hardlinks require at minimum create and link permission,
65636+ any additional privilege required is based on the
65637+ privilege of the file being linked to
65638+*/
65639+__u32
65640+gr_acl_handle_link(const struct dentry * new_dentry,
65641+ const struct dentry * parent_dentry,
65642+ const struct vfsmount * parent_mnt,
65643+ const struct dentry * old_dentry,
65644+ const struct vfsmount * old_mnt, const struct filename *to)
65645+{
65646+ __u32 mode;
65647+ __u32 needmode = GR_CREATE | GR_LINK;
65648+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
65649+
65650+ mode =
65651+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
65652+ old_mnt);
65653+
65654+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
65655+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
65656+ return mode;
65657+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
65658+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
65659+ return 0;
65660+ } else if (unlikely((mode & needmode) != needmode))
65661+ return 0;
65662+
65663+ return 1;
65664+}
65665+
65666+__u32
65667+gr_acl_handle_symlink(const struct dentry * new_dentry,
65668+ const struct dentry * parent_dentry,
65669+ const struct vfsmount * parent_mnt, const struct filename *from)
65670+{
65671+ __u32 needmode = GR_WRITE | GR_CREATE;
65672+ __u32 mode;
65673+
65674+ mode =
65675+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
65676+ GR_CREATE | GR_AUDIT_CREATE |
65677+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
65678+
65679+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
65680+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
65681+ return mode;
65682+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
65683+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
65684+ return 0;
65685+ } else if (unlikely((mode & needmode) != needmode))
65686+ return 0;
65687+
65688+ return (GR_WRITE | GR_CREATE);
65689+}
65690+
65691+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
65692+{
65693+ __u32 mode;
65694+
65695+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
65696+
65697+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
65698+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
65699+ return mode;
65700+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
65701+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
65702+ return 0;
65703+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
65704+ return 0;
65705+
65706+ return (reqmode);
65707+}
65708+
65709+__u32
65710+gr_acl_handle_mknod(const struct dentry * new_dentry,
65711+ const struct dentry * parent_dentry,
65712+ const struct vfsmount * parent_mnt,
65713+ const int mode)
65714+{
65715+ __u32 reqmode = GR_WRITE | GR_CREATE;
65716+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
65717+ reqmode |= GR_SETID;
65718+
65719+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
65720+ reqmode, GR_MKNOD_ACL_MSG);
65721+}
65722+
65723+__u32
65724+gr_acl_handle_mkdir(const struct dentry *new_dentry,
65725+ const struct dentry *parent_dentry,
65726+ const struct vfsmount *parent_mnt)
65727+{
65728+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
65729+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
65730+}
65731+
65732+#define RENAME_CHECK_SUCCESS(old, new) \
65733+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
65734+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
65735+
65736+int
65737+gr_acl_handle_rename(struct dentry *new_dentry,
65738+ struct dentry *parent_dentry,
65739+ const struct vfsmount *parent_mnt,
65740+ struct dentry *old_dentry,
65741+ struct inode *old_parent_inode,
65742+ struct vfsmount *old_mnt, const struct filename *newname)
65743+{
65744+ __u32 comp1, comp2;
65745+ int error = 0;
65746+
65747+ if (unlikely(!gr_acl_is_enabled()))
65748+ return 0;
65749+
65750+ if (!new_dentry->d_inode) {
65751+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
65752+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
65753+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
65754+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
65755+ GR_DELETE | GR_AUDIT_DELETE |
65756+ GR_AUDIT_READ | GR_AUDIT_WRITE |
65757+ GR_SUPPRESS, old_mnt);
65758+ } else {
65759+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
65760+ GR_CREATE | GR_DELETE |
65761+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
65762+ GR_AUDIT_READ | GR_AUDIT_WRITE |
65763+ GR_SUPPRESS, parent_mnt);
65764+ comp2 =
65765+ gr_search_file(old_dentry,
65766+ GR_READ | GR_WRITE | GR_AUDIT_READ |
65767+ GR_DELETE | GR_AUDIT_DELETE |
65768+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
65769+ }
65770+
65771+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
65772+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
65773+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
65774+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
65775+ && !(comp2 & GR_SUPPRESS)) {
65776+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
65777+ error = -EACCES;
65778+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
65779+ error = -EACCES;
65780+
65781+ return error;
65782+}
65783+
65784+void
65785+gr_acl_handle_exit(void)
65786+{
65787+ u16 id;
65788+ char *rolename;
65789+
65790+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
65791+ !(current->role->roletype & GR_ROLE_PERSIST))) {
65792+ id = current->acl_role_id;
65793+ rolename = current->role->rolename;
65794+ gr_set_acls(1);
65795+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
65796+ }
65797+
65798+ gr_put_exec_file(current);
65799+ return;
65800+}
65801+
65802+int
65803+gr_acl_handle_procpidmem(const struct task_struct *task)
65804+{
65805+ if (unlikely(!gr_acl_is_enabled()))
65806+ return 0;
65807+
65808+ if (task != current && task->acl->mode & GR_PROTPROCFD)
65809+ return -EACCES;
65810+
65811+ return 0;
65812+}
65813diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
65814new file mode 100644
65815index 0000000..8132048
65816--- /dev/null
65817+++ b/grsecurity/gracl_ip.c
65818@@ -0,0 +1,387 @@
65819+#include <linux/kernel.h>
65820+#include <asm/uaccess.h>
65821+#include <asm/errno.h>
65822+#include <net/sock.h>
65823+#include <linux/file.h>
65824+#include <linux/fs.h>
65825+#include <linux/net.h>
65826+#include <linux/in.h>
65827+#include <linux/skbuff.h>
65828+#include <linux/ip.h>
65829+#include <linux/udp.h>
65830+#include <linux/types.h>
65831+#include <linux/sched.h>
65832+#include <linux/netdevice.h>
65833+#include <linux/inetdevice.h>
65834+#include <linux/gracl.h>
65835+#include <linux/grsecurity.h>
65836+#include <linux/grinternal.h>
65837+
65838+#define GR_BIND 0x01
65839+#define GR_CONNECT 0x02
65840+#define GR_INVERT 0x04
65841+#define GR_BINDOVERRIDE 0x08
65842+#define GR_CONNECTOVERRIDE 0x10
65843+#define GR_SOCK_FAMILY 0x20
65844+
65845+static const char * gr_protocols[IPPROTO_MAX] = {
65846+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
65847+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
65848+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
65849+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
65850+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
65851+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
65852+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
65853+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
65854+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
65855+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
65856+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
65857+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
65858+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
65859+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
65860+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
65861+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
65862+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
65863+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
65864+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
65865+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
65866+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
65867+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
65868+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
65869+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
65870+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
65871+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
65872+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
65873+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
65874+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
65875+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
65876+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
65877+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
65878+ };
65879+
65880+static const char * gr_socktypes[SOCK_MAX] = {
65881+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
65882+ "unknown:7", "unknown:8", "unknown:9", "packet"
65883+ };
65884+
65885+static const char * gr_sockfamilies[AF_MAX+1] = {
65886+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
65887+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
65888+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
65889+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
65890+ };
65891+
65892+const char *
65893+gr_proto_to_name(unsigned char proto)
65894+{
65895+ return gr_protocols[proto];
65896+}
65897+
65898+const char *
65899+gr_socktype_to_name(unsigned char type)
65900+{
65901+ return gr_socktypes[type];
65902+}
65903+
65904+const char *
65905+gr_sockfamily_to_name(unsigned char family)
65906+{
65907+ return gr_sockfamilies[family];
65908+}
65909+
65910+int
65911+gr_search_socket(const int domain, const int type, const int protocol)
65912+{
65913+ struct acl_subject_label *curr;
65914+ const struct cred *cred = current_cred();
65915+
65916+ if (unlikely(!gr_acl_is_enabled()))
65917+ goto exit;
65918+
65919+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
65920+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
65921+ goto exit; // let the kernel handle it
65922+
65923+ curr = current->acl;
65924+
65925+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
65926+ /* the family is allowed, if this is PF_INET allow it only if
65927+ the extra sock type/protocol checks pass */
65928+ if (domain == PF_INET)
65929+ goto inet_check;
65930+ goto exit;
65931+ } else {
65932+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
65933+ __u32 fakeip = 0;
65934+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
65935+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
65936+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
65937+ gr_to_filename(current->exec_file->f_path.dentry,
65938+ current->exec_file->f_path.mnt) :
65939+ curr->filename, curr->filename,
65940+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
65941+ &current->signal->saved_ip);
65942+ goto exit;
65943+ }
65944+ goto exit_fail;
65945+ }
65946+
65947+inet_check:
65948+ /* the rest of this checking is for IPv4 only */
65949+ if (!curr->ips)
65950+ goto exit;
65951+
65952+ if ((curr->ip_type & (1U << type)) &&
65953+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
65954+ goto exit;
65955+
65956+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
65957+ /* we don't place acls on raw sockets , and sometimes
65958+ dgram/ip sockets are opened for ioctl and not
65959+ bind/connect, so we'll fake a bind learn log */
65960+ if (type == SOCK_RAW || type == SOCK_PACKET) {
65961+ __u32 fakeip = 0;
65962+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
65963+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
65964+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
65965+ gr_to_filename(current->exec_file->f_path.dentry,
65966+ current->exec_file->f_path.mnt) :
65967+ curr->filename, curr->filename,
65968+ &fakeip, 0, type,
65969+ protocol, GR_CONNECT, &current->signal->saved_ip);
65970+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
65971+ __u32 fakeip = 0;
65972+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
65973+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
65974+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
65975+ gr_to_filename(current->exec_file->f_path.dentry,
65976+ current->exec_file->f_path.mnt) :
65977+ curr->filename, curr->filename,
65978+ &fakeip, 0, type,
65979+ protocol, GR_BIND, &current->signal->saved_ip);
65980+ }
65981+ /* we'll log when they use connect or bind */
65982+ goto exit;
65983+ }
65984+
65985+exit_fail:
65986+ if (domain == PF_INET)
65987+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
65988+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
65989+ else
65990+#ifndef CONFIG_IPV6
65991+ if (domain != PF_INET6)
65992+#endif
65993+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
65994+ gr_socktype_to_name(type), protocol);
65995+
65996+ return 0;
65997+exit:
65998+ return 1;
65999+}
66000+
66001+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
66002+{
66003+ if ((ip->mode & mode) &&
66004+ (ip_port >= ip->low) &&
66005+ (ip_port <= ip->high) &&
66006+ ((ntohl(ip_addr) & our_netmask) ==
66007+ (ntohl(our_addr) & our_netmask))
66008+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
66009+ && (ip->type & (1U << type))) {
66010+ if (ip->mode & GR_INVERT)
66011+ return 2; // specifically denied
66012+ else
66013+ return 1; // allowed
66014+ }
66015+
66016+ return 0; // not specifically allowed, may continue parsing
66017+}
66018+
66019+static int
66020+gr_search_connectbind(const int full_mode, struct sock *sk,
66021+ struct sockaddr_in *addr, const int type)
66022+{
66023+ char iface[IFNAMSIZ] = {0};
66024+ struct acl_subject_label *curr;
66025+ struct acl_ip_label *ip;
66026+ struct inet_sock *isk;
66027+ struct net_device *dev;
66028+ struct in_device *idev;
66029+ unsigned long i;
66030+ int ret;
66031+ int mode = full_mode & (GR_BIND | GR_CONNECT);
66032+ __u32 ip_addr = 0;
66033+ __u32 our_addr;
66034+ __u32 our_netmask;
66035+ char *p;
66036+ __u16 ip_port = 0;
66037+ const struct cred *cred = current_cred();
66038+
66039+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
66040+ return 0;
66041+
66042+ curr = current->acl;
66043+ isk = inet_sk(sk);
66044+
66045+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
66046+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
66047+ addr->sin_addr.s_addr = curr->inaddr_any_override;
66048+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
66049+ struct sockaddr_in saddr;
66050+ int err;
66051+
66052+ saddr.sin_family = AF_INET;
66053+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
66054+ saddr.sin_port = isk->inet_sport;
66055+
66056+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
66057+ if (err)
66058+ return err;
66059+
66060+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
66061+ if (err)
66062+ return err;
66063+ }
66064+
66065+ if (!curr->ips)
66066+ return 0;
66067+
66068+ ip_addr = addr->sin_addr.s_addr;
66069+ ip_port = ntohs(addr->sin_port);
66070+
66071+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
66072+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
66073+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
66074+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
66075+ gr_to_filename(current->exec_file->f_path.dentry,
66076+ current->exec_file->f_path.mnt) :
66077+ curr->filename, curr->filename,
66078+ &ip_addr, ip_port, type,
66079+ sk->sk_protocol, mode, &current->signal->saved_ip);
66080+ return 0;
66081+ }
66082+
66083+ for (i = 0; i < curr->ip_num; i++) {
66084+ ip = *(curr->ips + i);
66085+ if (ip->iface != NULL) {
66086+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
66087+ p = strchr(iface, ':');
66088+ if (p != NULL)
66089+ *p = '\0';
66090+ dev = dev_get_by_name(sock_net(sk), iface);
66091+ if (dev == NULL)
66092+ continue;
66093+ idev = in_dev_get(dev);
66094+ if (idev == NULL) {
66095+ dev_put(dev);
66096+ continue;
66097+ }
66098+ rcu_read_lock();
66099+ for_ifa(idev) {
66100+ if (!strcmp(ip->iface, ifa->ifa_label)) {
66101+ our_addr = ifa->ifa_address;
66102+ our_netmask = 0xffffffff;
66103+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
66104+ if (ret == 1) {
66105+ rcu_read_unlock();
66106+ in_dev_put(idev);
66107+ dev_put(dev);
66108+ return 0;
66109+ } else if (ret == 2) {
66110+ rcu_read_unlock();
66111+ in_dev_put(idev);
66112+ dev_put(dev);
66113+ goto denied;
66114+ }
66115+ }
66116+ } endfor_ifa(idev);
66117+ rcu_read_unlock();
66118+ in_dev_put(idev);
66119+ dev_put(dev);
66120+ } else {
66121+ our_addr = ip->addr;
66122+ our_netmask = ip->netmask;
66123+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
66124+ if (ret == 1)
66125+ return 0;
66126+ else if (ret == 2)
66127+ goto denied;
66128+ }
66129+ }
66130+
66131+denied:
66132+ if (mode == GR_BIND)
66133+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
66134+ else if (mode == GR_CONNECT)
66135+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
66136+
66137+ return -EACCES;
66138+}
66139+
66140+int
66141+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
66142+{
66143+ /* always allow disconnection of dgram sockets with connect */
66144+ if (addr->sin_family == AF_UNSPEC)
66145+ return 0;
66146+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
66147+}
66148+
66149+int
66150+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
66151+{
66152+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
66153+}
66154+
66155+int gr_search_listen(struct socket *sock)
66156+{
66157+ struct sock *sk = sock->sk;
66158+ struct sockaddr_in addr;
66159+
66160+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
66161+ addr.sin_port = inet_sk(sk)->inet_sport;
66162+
66163+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
66164+}
66165+
66166+int gr_search_accept(struct socket *sock)
66167+{
66168+ struct sock *sk = sock->sk;
66169+ struct sockaddr_in addr;
66170+
66171+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
66172+ addr.sin_port = inet_sk(sk)->inet_sport;
66173+
66174+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
66175+}
66176+
66177+int
66178+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
66179+{
66180+ if (addr)
66181+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
66182+ else {
66183+ struct sockaddr_in sin;
66184+ const struct inet_sock *inet = inet_sk(sk);
66185+
66186+ sin.sin_addr.s_addr = inet->inet_daddr;
66187+ sin.sin_port = inet->inet_dport;
66188+
66189+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
66190+ }
66191+}
66192+
66193+int
66194+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
66195+{
66196+ struct sockaddr_in sin;
66197+
66198+ if (unlikely(skb->len < sizeof (struct udphdr)))
66199+ return 0; // skip this packet
66200+
66201+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
66202+ sin.sin_port = udp_hdr(skb)->source;
66203+
66204+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
66205+}
66206diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
66207new file mode 100644
66208index 0000000..25f54ef
66209--- /dev/null
66210+++ b/grsecurity/gracl_learn.c
66211@@ -0,0 +1,207 @@
66212+#include <linux/kernel.h>
66213+#include <linux/mm.h>
66214+#include <linux/sched.h>
66215+#include <linux/poll.h>
66216+#include <linux/string.h>
66217+#include <linux/file.h>
66218+#include <linux/types.h>
66219+#include <linux/vmalloc.h>
66220+#include <linux/grinternal.h>
66221+
66222+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
66223+ size_t count, loff_t *ppos);
66224+extern int gr_acl_is_enabled(void);
66225+
66226+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
66227+static int gr_learn_attached;
66228+
66229+/* use a 512k buffer */
66230+#define LEARN_BUFFER_SIZE (512 * 1024)
66231+
66232+static DEFINE_SPINLOCK(gr_learn_lock);
66233+static DEFINE_MUTEX(gr_learn_user_mutex);
66234+
66235+/* we need to maintain two buffers, so that the kernel context of grlearn
66236+ uses a semaphore around the userspace copying, and the other kernel contexts
66237+ use a spinlock when copying into the buffer, since they cannot sleep
66238+*/
66239+static char *learn_buffer;
66240+static char *learn_buffer_user;
66241+static int learn_buffer_len;
66242+static int learn_buffer_user_len;
66243+
66244+static ssize_t
66245+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
66246+{
66247+ DECLARE_WAITQUEUE(wait, current);
66248+ ssize_t retval = 0;
66249+
66250+ add_wait_queue(&learn_wait, &wait);
66251+ set_current_state(TASK_INTERRUPTIBLE);
66252+ do {
66253+ mutex_lock(&gr_learn_user_mutex);
66254+ spin_lock(&gr_learn_lock);
66255+ if (learn_buffer_len)
66256+ break;
66257+ spin_unlock(&gr_learn_lock);
66258+ mutex_unlock(&gr_learn_user_mutex);
66259+ if (file->f_flags & O_NONBLOCK) {
66260+ retval = -EAGAIN;
66261+ goto out;
66262+ }
66263+ if (signal_pending(current)) {
66264+ retval = -ERESTARTSYS;
66265+ goto out;
66266+ }
66267+
66268+ schedule();
66269+ } while (1);
66270+
66271+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
66272+ learn_buffer_user_len = learn_buffer_len;
66273+ retval = learn_buffer_len;
66274+ learn_buffer_len = 0;
66275+
66276+ spin_unlock(&gr_learn_lock);
66277+
66278+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
66279+ retval = -EFAULT;
66280+
66281+ mutex_unlock(&gr_learn_user_mutex);
66282+out:
66283+ set_current_state(TASK_RUNNING);
66284+ remove_wait_queue(&learn_wait, &wait);
66285+ return retval;
66286+}
66287+
66288+static unsigned int
66289+poll_learn(struct file * file, poll_table * wait)
66290+{
66291+ poll_wait(file, &learn_wait, wait);
66292+
66293+ if (learn_buffer_len)
66294+ return (POLLIN | POLLRDNORM);
66295+
66296+ return 0;
66297+}
66298+
66299+void
66300+gr_clear_learn_entries(void)
66301+{
66302+ char *tmp;
66303+
66304+ mutex_lock(&gr_learn_user_mutex);
66305+ spin_lock(&gr_learn_lock);
66306+ tmp = learn_buffer;
66307+ learn_buffer = NULL;
66308+ spin_unlock(&gr_learn_lock);
66309+ if (tmp)
66310+ vfree(tmp);
66311+ if (learn_buffer_user != NULL) {
66312+ vfree(learn_buffer_user);
66313+ learn_buffer_user = NULL;
66314+ }
66315+ learn_buffer_len = 0;
66316+ mutex_unlock(&gr_learn_user_mutex);
66317+
66318+ return;
66319+}
66320+
66321+void
66322+gr_add_learn_entry(const char *fmt, ...)
66323+{
66324+ va_list args;
66325+ unsigned int len;
66326+
66327+ if (!gr_learn_attached)
66328+ return;
66329+
66330+ spin_lock(&gr_learn_lock);
66331+
66332+ /* leave a gap at the end so we know when it's "full" but don't have to
66333+ compute the exact length of the string we're trying to append
66334+ */
66335+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
66336+ spin_unlock(&gr_learn_lock);
66337+ wake_up_interruptible(&learn_wait);
66338+ return;
66339+ }
66340+ if (learn_buffer == NULL) {
66341+ spin_unlock(&gr_learn_lock);
66342+ return;
66343+ }
66344+
66345+ va_start(args, fmt);
66346+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
66347+ va_end(args);
66348+
66349+ learn_buffer_len += len + 1;
66350+
66351+ spin_unlock(&gr_learn_lock);
66352+ wake_up_interruptible(&learn_wait);
66353+
66354+ return;
66355+}
66356+
66357+static int
66358+open_learn(struct inode *inode, struct file *file)
66359+{
66360+ if (file->f_mode & FMODE_READ && gr_learn_attached)
66361+ return -EBUSY;
66362+ if (file->f_mode & FMODE_READ) {
66363+ int retval = 0;
66364+ mutex_lock(&gr_learn_user_mutex);
66365+ if (learn_buffer == NULL)
66366+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
66367+ if (learn_buffer_user == NULL)
66368+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
66369+ if (learn_buffer == NULL) {
66370+ retval = -ENOMEM;
66371+ goto out_error;
66372+ }
66373+ if (learn_buffer_user == NULL) {
66374+ retval = -ENOMEM;
66375+ goto out_error;
66376+ }
66377+ learn_buffer_len = 0;
66378+ learn_buffer_user_len = 0;
66379+ gr_learn_attached = 1;
66380+out_error:
66381+ mutex_unlock(&gr_learn_user_mutex);
66382+ return retval;
66383+ }
66384+ return 0;
66385+}
66386+
66387+static int
66388+close_learn(struct inode *inode, struct file *file)
66389+{
66390+ if (file->f_mode & FMODE_READ) {
66391+ char *tmp = NULL;
66392+ mutex_lock(&gr_learn_user_mutex);
66393+ spin_lock(&gr_learn_lock);
66394+ tmp = learn_buffer;
66395+ learn_buffer = NULL;
66396+ spin_unlock(&gr_learn_lock);
66397+ if (tmp)
66398+ vfree(tmp);
66399+ if (learn_buffer_user != NULL) {
66400+ vfree(learn_buffer_user);
66401+ learn_buffer_user = NULL;
66402+ }
66403+ learn_buffer_len = 0;
66404+ learn_buffer_user_len = 0;
66405+ gr_learn_attached = 0;
66406+ mutex_unlock(&gr_learn_user_mutex);
66407+ }
66408+
66409+ return 0;
66410+}
66411+
66412+const struct file_operations grsec_fops = {
66413+ .read = read_learn,
66414+ .write = write_grsec_handler,
66415+ .open = open_learn,
66416+ .release = close_learn,
66417+ .poll = poll_learn,
66418+};
66419diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
66420new file mode 100644
66421index 0000000..39645c9
66422--- /dev/null
66423+++ b/grsecurity/gracl_res.c
66424@@ -0,0 +1,68 @@
66425+#include <linux/kernel.h>
66426+#include <linux/sched.h>
66427+#include <linux/gracl.h>
66428+#include <linux/grinternal.h>
66429+
66430+static const char *restab_log[] = {
66431+ [RLIMIT_CPU] = "RLIMIT_CPU",
66432+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
66433+ [RLIMIT_DATA] = "RLIMIT_DATA",
66434+ [RLIMIT_STACK] = "RLIMIT_STACK",
66435+ [RLIMIT_CORE] = "RLIMIT_CORE",
66436+ [RLIMIT_RSS] = "RLIMIT_RSS",
66437+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
66438+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
66439+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
66440+ [RLIMIT_AS] = "RLIMIT_AS",
66441+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
66442+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
66443+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
66444+ [RLIMIT_NICE] = "RLIMIT_NICE",
66445+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
66446+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
66447+ [GR_CRASH_RES] = "RLIMIT_CRASH"
66448+};
66449+
66450+void
66451+gr_log_resource(const struct task_struct *task,
66452+ const int res, const unsigned long wanted, const int gt)
66453+{
66454+ const struct cred *cred;
66455+ unsigned long rlim;
66456+
66457+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
66458+ return;
66459+
66460+ // not yet supported resource
66461+ if (unlikely(!restab_log[res]))
66462+ return;
66463+
66464+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
66465+ rlim = task_rlimit_max(task, res);
66466+ else
66467+ rlim = task_rlimit(task, res);
66468+
66469+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
66470+ return;
66471+
66472+ rcu_read_lock();
66473+ cred = __task_cred(task);
66474+
66475+ if (res == RLIMIT_NPROC &&
66476+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
66477+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
66478+ goto out_rcu_unlock;
66479+ else if (res == RLIMIT_MEMLOCK &&
66480+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
66481+ goto out_rcu_unlock;
66482+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
66483+ goto out_rcu_unlock;
66484+ rcu_read_unlock();
66485+
66486+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
66487+
66488+ return;
66489+out_rcu_unlock:
66490+ rcu_read_unlock();
66491+ return;
66492+}
66493diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
66494new file mode 100644
66495index 0000000..3c38bfe
66496--- /dev/null
66497+++ b/grsecurity/gracl_segv.c
66498@@ -0,0 +1,305 @@
66499+#include <linux/kernel.h>
66500+#include <linux/mm.h>
66501+#include <asm/uaccess.h>
66502+#include <asm/errno.h>
66503+#include <asm/mman.h>
66504+#include <net/sock.h>
66505+#include <linux/file.h>
66506+#include <linux/fs.h>
66507+#include <linux/net.h>
66508+#include <linux/in.h>
66509+#include <linux/slab.h>
66510+#include <linux/types.h>
66511+#include <linux/sched.h>
66512+#include <linux/timer.h>
66513+#include <linux/gracl.h>
66514+#include <linux/grsecurity.h>
66515+#include <linux/grinternal.h>
66516+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
66517+#include <linux/magic.h>
66518+#include <linux/pagemap.h>
66519+#include "../fs/btrfs/async-thread.h"
66520+#include "../fs/btrfs/ctree.h"
66521+#include "../fs/btrfs/btrfs_inode.h"
66522+#endif
66523+
66524+static struct crash_uid *uid_set;
66525+static unsigned short uid_used;
66526+static DEFINE_SPINLOCK(gr_uid_lock);
66527+extern rwlock_t gr_inode_lock;
66528+extern struct acl_subject_label *
66529+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
66530+ struct acl_role_label *role);
66531+
66532+static inline dev_t __get_dev(const struct dentry *dentry)
66533+{
66534+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
66535+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
66536+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
66537+ else
66538+#endif
66539+ return dentry->d_sb->s_dev;
66540+}
66541+
66542+int
66543+gr_init_uidset(void)
66544+{
66545+ uid_set =
66546+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
66547+ uid_used = 0;
66548+
66549+ return uid_set ? 1 : 0;
66550+}
66551+
66552+void
66553+gr_free_uidset(void)
66554+{
66555+ if (uid_set)
66556+ kfree(uid_set);
66557+
66558+ return;
66559+}
66560+
66561+int
66562+gr_find_uid(const uid_t uid)
66563+{
66564+ struct crash_uid *tmp = uid_set;
66565+ uid_t buid;
66566+ int low = 0, high = uid_used - 1, mid;
66567+
66568+ while (high >= low) {
66569+ mid = (low + high) >> 1;
66570+ buid = tmp[mid].uid;
66571+ if (buid == uid)
66572+ return mid;
66573+ if (buid > uid)
66574+ high = mid - 1;
66575+ if (buid < uid)
66576+ low = mid + 1;
66577+ }
66578+
66579+ return -1;
66580+}
66581+
66582+static __inline__ void
66583+gr_insertsort(void)
66584+{
66585+ unsigned short i, j;
66586+ struct crash_uid index;
66587+
66588+ for (i = 1; i < uid_used; i++) {
66589+ index = uid_set[i];
66590+ j = i;
66591+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
66592+ uid_set[j] = uid_set[j - 1];
66593+ j--;
66594+ }
66595+ uid_set[j] = index;
66596+ }
66597+
66598+ return;
66599+}
66600+
66601+static __inline__ void
66602+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
66603+{
66604+ int loc;
66605+ uid_t uid = GR_GLOBAL_UID(kuid);
66606+
66607+ if (uid_used == GR_UIDTABLE_MAX)
66608+ return;
66609+
66610+ loc = gr_find_uid(uid);
66611+
66612+ if (loc >= 0) {
66613+ uid_set[loc].expires = expires;
66614+ return;
66615+ }
66616+
66617+ uid_set[uid_used].uid = uid;
66618+ uid_set[uid_used].expires = expires;
66619+ uid_used++;
66620+
66621+ gr_insertsort();
66622+
66623+ return;
66624+}
66625+
66626+void
66627+gr_remove_uid(const unsigned short loc)
66628+{
66629+ unsigned short i;
66630+
66631+ for (i = loc + 1; i < uid_used; i++)
66632+ uid_set[i - 1] = uid_set[i];
66633+
66634+ uid_used--;
66635+
66636+ return;
66637+}
66638+
66639+int
66640+gr_check_crash_uid(const kuid_t kuid)
66641+{
66642+ int loc;
66643+ int ret = 0;
66644+ uid_t uid;
66645+
66646+ if (unlikely(!gr_acl_is_enabled()))
66647+ return 0;
66648+
66649+ uid = GR_GLOBAL_UID(kuid);
66650+
66651+ spin_lock(&gr_uid_lock);
66652+ loc = gr_find_uid(uid);
66653+
66654+ if (loc < 0)
66655+ goto out_unlock;
66656+
66657+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
66658+ gr_remove_uid(loc);
66659+ else
66660+ ret = 1;
66661+
66662+out_unlock:
66663+ spin_unlock(&gr_uid_lock);
66664+ return ret;
66665+}
66666+
66667+static __inline__ int
66668+proc_is_setxid(const struct cred *cred)
66669+{
66670+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
66671+ !uid_eq(cred->uid, cred->fsuid))
66672+ return 1;
66673+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
66674+ !gid_eq(cred->gid, cred->fsgid))
66675+ return 1;
66676+
66677+ return 0;
66678+}
66679+
66680+extern int gr_fake_force_sig(int sig, struct task_struct *t);
66681+
66682+void
66683+gr_handle_crash(struct task_struct *task, const int sig)
66684+{
66685+ struct acl_subject_label *curr;
66686+ struct task_struct *tsk, *tsk2;
66687+ const struct cred *cred;
66688+ const struct cred *cred2;
66689+
66690+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
66691+ return;
66692+
66693+ if (unlikely(!gr_acl_is_enabled()))
66694+ return;
66695+
66696+ curr = task->acl;
66697+
66698+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
66699+ return;
66700+
66701+ if (time_before_eq(curr->expires, get_seconds())) {
66702+ curr->expires = 0;
66703+ curr->crashes = 0;
66704+ }
66705+
66706+ curr->crashes++;
66707+
66708+ if (!curr->expires)
66709+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
66710+
66711+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
66712+ time_after(curr->expires, get_seconds())) {
66713+ rcu_read_lock();
66714+ cred = __task_cred(task);
66715+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
66716+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
66717+ spin_lock(&gr_uid_lock);
66718+ gr_insert_uid(cred->uid, curr->expires);
66719+ spin_unlock(&gr_uid_lock);
66720+ curr->expires = 0;
66721+ curr->crashes = 0;
66722+ read_lock(&tasklist_lock);
66723+ do_each_thread(tsk2, tsk) {
66724+ cred2 = __task_cred(tsk);
66725+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
66726+ gr_fake_force_sig(SIGKILL, tsk);
66727+ } while_each_thread(tsk2, tsk);
66728+ read_unlock(&tasklist_lock);
66729+ } else {
66730+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
66731+ read_lock(&tasklist_lock);
66732+ read_lock(&grsec_exec_file_lock);
66733+ do_each_thread(tsk2, tsk) {
66734+ if (likely(tsk != task)) {
66735+ // if this thread has the same subject as the one that triggered
66736+ // RES_CRASH and it's the same binary, kill it
66737+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
66738+ gr_fake_force_sig(SIGKILL, tsk);
66739+ }
66740+ } while_each_thread(tsk2, tsk);
66741+ read_unlock(&grsec_exec_file_lock);
66742+ read_unlock(&tasklist_lock);
66743+ }
66744+ rcu_read_unlock();
66745+ }
66746+
66747+ return;
66748+}
66749+
66750+int
66751+gr_check_crash_exec(const struct file *filp)
66752+{
66753+ struct acl_subject_label *curr;
66754+
66755+ if (unlikely(!gr_acl_is_enabled()))
66756+ return 0;
66757+
66758+ read_lock(&gr_inode_lock);
66759+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
66760+ __get_dev(filp->f_path.dentry),
66761+ current->role);
66762+ read_unlock(&gr_inode_lock);
66763+
66764+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
66765+ (!curr->crashes && !curr->expires))
66766+ return 0;
66767+
66768+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
66769+ time_after(curr->expires, get_seconds()))
66770+ return 1;
66771+ else if (time_before_eq(curr->expires, get_seconds())) {
66772+ curr->crashes = 0;
66773+ curr->expires = 0;
66774+ }
66775+
66776+ return 0;
66777+}
66778+
66779+void
66780+gr_handle_alertkill(struct task_struct *task)
66781+{
66782+ struct acl_subject_label *curracl;
66783+ __u32 curr_ip;
66784+ struct task_struct *p, *p2;
66785+
66786+ if (unlikely(!gr_acl_is_enabled()))
66787+ return;
66788+
66789+ curracl = task->acl;
66790+ curr_ip = task->signal->curr_ip;
66791+
66792+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
66793+ read_lock(&tasklist_lock);
66794+ do_each_thread(p2, p) {
66795+ if (p->signal->curr_ip == curr_ip)
66796+ gr_fake_force_sig(SIGKILL, p);
66797+ } while_each_thread(p2, p);
66798+ read_unlock(&tasklist_lock);
66799+ } else if (curracl->mode & GR_KILLPROC)
66800+ gr_fake_force_sig(SIGKILL, task);
66801+
66802+ return;
66803+}
66804diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
66805new file mode 100644
66806index 0000000..98011b0
66807--- /dev/null
66808+++ b/grsecurity/gracl_shm.c
66809@@ -0,0 +1,40 @@
66810+#include <linux/kernel.h>
66811+#include <linux/mm.h>
66812+#include <linux/sched.h>
66813+#include <linux/file.h>
66814+#include <linux/ipc.h>
66815+#include <linux/gracl.h>
66816+#include <linux/grsecurity.h>
66817+#include <linux/grinternal.h>
66818+
66819+int
66820+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66821+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
66822+{
66823+ struct task_struct *task;
66824+
66825+ if (!gr_acl_is_enabled())
66826+ return 1;
66827+
66828+ rcu_read_lock();
66829+ read_lock(&tasklist_lock);
66830+
66831+ task = find_task_by_vpid(shm_cprid);
66832+
66833+ if (unlikely(!task))
66834+ task = find_task_by_vpid(shm_lapid);
66835+
66836+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
66837+ (task_pid_nr(task) == shm_lapid)) &&
66838+ (task->acl->mode & GR_PROTSHM) &&
66839+ (task->acl != current->acl))) {
66840+ read_unlock(&tasklist_lock);
66841+ rcu_read_unlock();
66842+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
66843+ return 0;
66844+ }
66845+ read_unlock(&tasklist_lock);
66846+ rcu_read_unlock();
66847+
66848+ return 1;
66849+}
66850diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
66851new file mode 100644
66852index 0000000..bc0be01
66853--- /dev/null
66854+++ b/grsecurity/grsec_chdir.c
66855@@ -0,0 +1,19 @@
66856+#include <linux/kernel.h>
66857+#include <linux/sched.h>
66858+#include <linux/fs.h>
66859+#include <linux/file.h>
66860+#include <linux/grsecurity.h>
66861+#include <linux/grinternal.h>
66862+
66863+void
66864+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
66865+{
66866+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
66867+ if ((grsec_enable_chdir && grsec_enable_group &&
66868+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
66869+ !grsec_enable_group)) {
66870+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
66871+ }
66872+#endif
66873+ return;
66874+}
66875diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
66876new file mode 100644
66877index 0000000..bd6e105
66878--- /dev/null
66879+++ b/grsecurity/grsec_chroot.c
66880@@ -0,0 +1,370 @@
66881+#include <linux/kernel.h>
66882+#include <linux/module.h>
66883+#include <linux/sched.h>
66884+#include <linux/file.h>
66885+#include <linux/fs.h>
66886+#include <linux/mount.h>
66887+#include <linux/types.h>
66888+#include "../fs/mount.h"
66889+#include <linux/grsecurity.h>
66890+#include <linux/grinternal.h>
66891+
66892+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
66893+static int gr_init_ran;
66894+#endif
66895+
66896+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
66897+{
66898+#ifdef CONFIG_GRKERNSEC
66899+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
66900+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
66901+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
66902+ && gr_init_ran
66903+#endif
66904+ )
66905+ task->gr_is_chrooted = 1;
66906+ else {
66907+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
66908+ if (task_pid_nr(task) == 1 && !gr_init_ran)
66909+ gr_init_ran = 1;
66910+#endif
66911+ task->gr_is_chrooted = 0;
66912+ }
66913+
66914+ task->gr_chroot_dentry = path->dentry;
66915+#endif
66916+ return;
66917+}
66918+
66919+void gr_clear_chroot_entries(struct task_struct *task)
66920+{
66921+#ifdef CONFIG_GRKERNSEC
66922+ task->gr_is_chrooted = 0;
66923+ task->gr_chroot_dentry = NULL;
66924+#endif
66925+ return;
66926+}
66927+
66928+int
66929+gr_handle_chroot_unix(const pid_t pid)
66930+{
66931+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
66932+ struct task_struct *p;
66933+
66934+ if (unlikely(!grsec_enable_chroot_unix))
66935+ return 1;
66936+
66937+ if (likely(!proc_is_chrooted(current)))
66938+ return 1;
66939+
66940+ rcu_read_lock();
66941+ read_lock(&tasklist_lock);
66942+ p = find_task_by_vpid_unrestricted(pid);
66943+ if (unlikely(p && !have_same_root(current, p))) {
66944+ read_unlock(&tasklist_lock);
66945+ rcu_read_unlock();
66946+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
66947+ return 0;
66948+ }
66949+ read_unlock(&tasklist_lock);
66950+ rcu_read_unlock();
66951+#endif
66952+ return 1;
66953+}
66954+
66955+int
66956+gr_handle_chroot_nice(void)
66957+{
66958+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
66959+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
66960+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
66961+ return -EPERM;
66962+ }
66963+#endif
66964+ return 0;
66965+}
66966+
66967+int
66968+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
66969+{
66970+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
66971+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
66972+ && proc_is_chrooted(current)) {
66973+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
66974+ return -EACCES;
66975+ }
66976+#endif
66977+ return 0;
66978+}
66979+
66980+int
66981+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
66982+{
66983+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66984+ struct task_struct *p;
66985+ int ret = 0;
66986+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
66987+ return ret;
66988+
66989+ read_lock(&tasklist_lock);
66990+ do_each_pid_task(pid, type, p) {
66991+ if (!have_same_root(current, p)) {
66992+ ret = 1;
66993+ goto out;
66994+ }
66995+ } while_each_pid_task(pid, type, p);
66996+out:
66997+ read_unlock(&tasklist_lock);
66998+ return ret;
66999+#endif
67000+ return 0;
67001+}
67002+
67003+int
67004+gr_pid_is_chrooted(struct task_struct *p)
67005+{
67006+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67007+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
67008+ return 0;
67009+
67010+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
67011+ !have_same_root(current, p)) {
67012+ return 1;
67013+ }
67014+#endif
67015+ return 0;
67016+}
67017+
67018+EXPORT_SYMBOL(gr_pid_is_chrooted);
67019+
67020+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
67021+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
67022+{
67023+ struct path path, currentroot;
67024+ int ret = 0;
67025+
67026+ path.dentry = (struct dentry *)u_dentry;
67027+ path.mnt = (struct vfsmount *)u_mnt;
67028+ get_fs_root(current->fs, &currentroot);
67029+ if (path_is_under(&path, &currentroot))
67030+ ret = 1;
67031+ path_put(&currentroot);
67032+
67033+ return ret;
67034+}
67035+#endif
67036+
67037+int
67038+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
67039+{
67040+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
67041+ if (!grsec_enable_chroot_fchdir)
67042+ return 1;
67043+
67044+ if (!proc_is_chrooted(current))
67045+ return 1;
67046+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
67047+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
67048+ return 0;
67049+ }
67050+#endif
67051+ return 1;
67052+}
67053+
67054+int
67055+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
67056+ const time_t shm_createtime)
67057+{
67058+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
67059+ struct task_struct *p;
67060+ time_t starttime;
67061+
67062+ if (unlikely(!grsec_enable_chroot_shmat))
67063+ return 1;
67064+
67065+ if (likely(!proc_is_chrooted(current)))
67066+ return 1;
67067+
67068+ rcu_read_lock();
67069+ read_lock(&tasklist_lock);
67070+
67071+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
67072+ starttime = p->start_time.tv_sec;
67073+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
67074+ if (have_same_root(current, p)) {
67075+ goto allow;
67076+ } else {
67077+ read_unlock(&tasklist_lock);
67078+ rcu_read_unlock();
67079+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
67080+ return 0;
67081+ }
67082+ }
67083+ /* creator exited, pid reuse, fall through to next check */
67084+ }
67085+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
67086+ if (unlikely(!have_same_root(current, p))) {
67087+ read_unlock(&tasklist_lock);
67088+ rcu_read_unlock();
67089+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
67090+ return 0;
67091+ }
67092+ }
67093+
67094+allow:
67095+ read_unlock(&tasklist_lock);
67096+ rcu_read_unlock();
67097+#endif
67098+ return 1;
67099+}
67100+
67101+void
67102+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
67103+{
67104+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
67105+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
67106+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
67107+#endif
67108+ return;
67109+}
67110+
67111+int
67112+gr_handle_chroot_mknod(const struct dentry *dentry,
67113+ const struct vfsmount *mnt, const int mode)
67114+{
67115+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
67116+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
67117+ proc_is_chrooted(current)) {
67118+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
67119+ return -EPERM;
67120+ }
67121+#endif
67122+ return 0;
67123+}
67124+
67125+int
67126+gr_handle_chroot_mount(const struct dentry *dentry,
67127+ const struct vfsmount *mnt, const char *dev_name)
67128+{
67129+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
67130+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
67131+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
67132+ return -EPERM;
67133+ }
67134+#endif
67135+ return 0;
67136+}
67137+
67138+int
67139+gr_handle_chroot_pivot(void)
67140+{
67141+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
67142+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
67143+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
67144+ return -EPERM;
67145+ }
67146+#endif
67147+ return 0;
67148+}
67149+
67150+int
67151+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
67152+{
67153+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
67154+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
67155+ !gr_is_outside_chroot(dentry, mnt)) {
67156+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
67157+ return -EPERM;
67158+ }
67159+#endif
67160+ return 0;
67161+}
67162+
67163+extern const char *captab_log[];
67164+extern int captab_log_entries;
67165+
67166+int
67167+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
67168+{
67169+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67170+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
67171+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
67172+ if (cap_raised(chroot_caps, cap)) {
67173+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
67174+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
67175+ }
67176+ return 0;
67177+ }
67178+ }
67179+#endif
67180+ return 1;
67181+}
67182+
67183+int
67184+gr_chroot_is_capable(const int cap)
67185+{
67186+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67187+ return gr_task_chroot_is_capable(current, current_cred(), cap);
67188+#endif
67189+ return 1;
67190+}
67191+
67192+int
67193+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
67194+{
67195+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67196+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
67197+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
67198+ if (cap_raised(chroot_caps, cap)) {
67199+ return 0;
67200+ }
67201+ }
67202+#endif
67203+ return 1;
67204+}
67205+
67206+int
67207+gr_chroot_is_capable_nolog(const int cap)
67208+{
67209+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67210+ return gr_task_chroot_is_capable_nolog(current, cap);
67211+#endif
67212+ return 1;
67213+}
67214+
67215+int
67216+gr_handle_chroot_sysctl(const int op)
67217+{
67218+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
67219+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
67220+ proc_is_chrooted(current))
67221+ return -EACCES;
67222+#endif
67223+ return 0;
67224+}
67225+
67226+void
67227+gr_handle_chroot_chdir(const struct path *path)
67228+{
67229+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
67230+ if (grsec_enable_chroot_chdir)
67231+ set_fs_pwd(current->fs, path);
67232+#endif
67233+ return;
67234+}
67235+
67236+int
67237+gr_handle_chroot_chmod(const struct dentry *dentry,
67238+ const struct vfsmount *mnt, const int mode)
67239+{
67240+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
67241+ /* allow chmod +s on directories, but not files */
67242+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
67243+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
67244+ proc_is_chrooted(current)) {
67245+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
67246+ return -EPERM;
67247+ }
67248+#endif
67249+ return 0;
67250+}
67251diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
67252new file mode 100644
67253index 0000000..ce65ceb
67254--- /dev/null
67255+++ b/grsecurity/grsec_disabled.c
67256@@ -0,0 +1,434 @@
67257+#include <linux/kernel.h>
67258+#include <linux/module.h>
67259+#include <linux/sched.h>
67260+#include <linux/file.h>
67261+#include <linux/fs.h>
67262+#include <linux/kdev_t.h>
67263+#include <linux/net.h>
67264+#include <linux/in.h>
67265+#include <linux/ip.h>
67266+#include <linux/skbuff.h>
67267+#include <linux/sysctl.h>
67268+
67269+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
67270+void
67271+pax_set_initial_flags(struct linux_binprm *bprm)
67272+{
67273+ return;
67274+}
67275+#endif
67276+
67277+#ifdef CONFIG_SYSCTL
67278+__u32
67279+gr_handle_sysctl(const struct ctl_table * table, const int op)
67280+{
67281+ return 0;
67282+}
67283+#endif
67284+
67285+#ifdef CONFIG_TASKSTATS
67286+int gr_is_taskstats_denied(int pid)
67287+{
67288+ return 0;
67289+}
67290+#endif
67291+
67292+int
67293+gr_acl_is_enabled(void)
67294+{
67295+ return 0;
67296+}
67297+
67298+void
67299+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
67300+{
67301+ return;
67302+}
67303+
67304+int
67305+gr_handle_rawio(const struct inode *inode)
67306+{
67307+ return 0;
67308+}
67309+
67310+void
67311+gr_acl_handle_psacct(struct task_struct *task, const long code)
67312+{
67313+ return;
67314+}
67315+
67316+int
67317+gr_handle_ptrace(struct task_struct *task, const long request)
67318+{
67319+ return 0;
67320+}
67321+
67322+int
67323+gr_handle_proc_ptrace(struct task_struct *task)
67324+{
67325+ return 0;
67326+}
67327+
67328+int
67329+gr_set_acls(const int type)
67330+{
67331+ return 0;
67332+}
67333+
67334+int
67335+gr_check_hidden_task(const struct task_struct *tsk)
67336+{
67337+ return 0;
67338+}
67339+
67340+int
67341+gr_check_protected_task(const struct task_struct *task)
67342+{
67343+ return 0;
67344+}
67345+
67346+int
67347+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
67348+{
67349+ return 0;
67350+}
67351+
67352+void
67353+gr_copy_label(struct task_struct *tsk)
67354+{
67355+ return;
67356+}
67357+
67358+void
67359+gr_set_pax_flags(struct task_struct *task)
67360+{
67361+ return;
67362+}
67363+
67364+int
67365+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
67366+ const int unsafe_share)
67367+{
67368+ return 0;
67369+}
67370+
67371+void
67372+gr_handle_delete(const ino_t ino, const dev_t dev)
67373+{
67374+ return;
67375+}
67376+
67377+void
67378+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
67379+{
67380+ return;
67381+}
67382+
67383+void
67384+gr_handle_crash(struct task_struct *task, const int sig)
67385+{
67386+ return;
67387+}
67388+
67389+int
67390+gr_check_crash_exec(const struct file *filp)
67391+{
67392+ return 0;
67393+}
67394+
67395+int
67396+gr_check_crash_uid(const kuid_t uid)
67397+{
67398+ return 0;
67399+}
67400+
67401+void
67402+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67403+ struct dentry *old_dentry,
67404+ struct dentry *new_dentry,
67405+ struct vfsmount *mnt, const __u8 replace)
67406+{
67407+ return;
67408+}
67409+
67410+int
67411+gr_search_socket(const int family, const int type, const int protocol)
67412+{
67413+ return 1;
67414+}
67415+
67416+int
67417+gr_search_connectbind(const int mode, const struct socket *sock,
67418+ const struct sockaddr_in *addr)
67419+{
67420+ return 0;
67421+}
67422+
67423+void
67424+gr_handle_alertkill(struct task_struct *task)
67425+{
67426+ return;
67427+}
67428+
67429+__u32
67430+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
67431+{
67432+ return 1;
67433+}
67434+
67435+__u32
67436+gr_acl_handle_hidden_file(const struct dentry * dentry,
67437+ const struct vfsmount * mnt)
67438+{
67439+ return 1;
67440+}
67441+
67442+__u32
67443+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
67444+ int acc_mode)
67445+{
67446+ return 1;
67447+}
67448+
67449+__u32
67450+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
67451+{
67452+ return 1;
67453+}
67454+
67455+__u32
67456+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
67457+{
67458+ return 1;
67459+}
67460+
67461+int
67462+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
67463+ unsigned int *vm_flags)
67464+{
67465+ return 1;
67466+}
67467+
67468+__u32
67469+gr_acl_handle_truncate(const struct dentry * dentry,
67470+ const struct vfsmount * mnt)
67471+{
67472+ return 1;
67473+}
67474+
67475+__u32
67476+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
67477+{
67478+ return 1;
67479+}
67480+
67481+__u32
67482+gr_acl_handle_access(const struct dentry * dentry,
67483+ const struct vfsmount * mnt, const int fmode)
67484+{
67485+ return 1;
67486+}
67487+
67488+__u32
67489+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
67490+ umode_t *mode)
67491+{
67492+ return 1;
67493+}
67494+
67495+__u32
67496+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
67497+{
67498+ return 1;
67499+}
67500+
67501+__u32
67502+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
67503+{
67504+ return 1;
67505+}
67506+
67507+void
67508+grsecurity_init(void)
67509+{
67510+ return;
67511+}
67512+
67513+umode_t gr_acl_umask(void)
67514+{
67515+ return 0;
67516+}
67517+
67518+__u32
67519+gr_acl_handle_mknod(const struct dentry * new_dentry,
67520+ const struct dentry * parent_dentry,
67521+ const struct vfsmount * parent_mnt,
67522+ const int mode)
67523+{
67524+ return 1;
67525+}
67526+
67527+__u32
67528+gr_acl_handle_mkdir(const struct dentry * new_dentry,
67529+ const struct dentry * parent_dentry,
67530+ const struct vfsmount * parent_mnt)
67531+{
67532+ return 1;
67533+}
67534+
67535+__u32
67536+gr_acl_handle_symlink(const struct dentry * new_dentry,
67537+ const struct dentry * parent_dentry,
67538+ const struct vfsmount * parent_mnt, const struct filename *from)
67539+{
67540+ return 1;
67541+}
67542+
67543+__u32
67544+gr_acl_handle_link(const struct dentry * new_dentry,
67545+ const struct dentry * parent_dentry,
67546+ const struct vfsmount * parent_mnt,
67547+ const struct dentry * old_dentry,
67548+ const struct vfsmount * old_mnt, const struct filename *to)
67549+{
67550+ return 1;
67551+}
67552+
67553+int
67554+gr_acl_handle_rename(const struct dentry *new_dentry,
67555+ const struct dentry *parent_dentry,
67556+ const struct vfsmount *parent_mnt,
67557+ const struct dentry *old_dentry,
67558+ const struct inode *old_parent_inode,
67559+ const struct vfsmount *old_mnt, const struct filename *newname)
67560+{
67561+ return 0;
67562+}
67563+
67564+int
67565+gr_acl_handle_filldir(const struct file *file, const char *name,
67566+ const int namelen, const ino_t ino)
67567+{
67568+ return 1;
67569+}
67570+
67571+int
67572+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
67573+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
67574+{
67575+ return 1;
67576+}
67577+
67578+int
67579+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
67580+{
67581+ return 0;
67582+}
67583+
67584+int
67585+gr_search_accept(const struct socket *sock)
67586+{
67587+ return 0;
67588+}
67589+
67590+int
67591+gr_search_listen(const struct socket *sock)
67592+{
67593+ return 0;
67594+}
67595+
67596+int
67597+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
67598+{
67599+ return 0;
67600+}
67601+
67602+__u32
67603+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
67604+{
67605+ return 1;
67606+}
67607+
67608+__u32
67609+gr_acl_handle_creat(const struct dentry * dentry,
67610+ const struct dentry * p_dentry,
67611+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
67612+ const int imode)
67613+{
67614+ return 1;
67615+}
67616+
67617+void
67618+gr_acl_handle_exit(void)
67619+{
67620+ return;
67621+}
67622+
67623+int
67624+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
67625+{
67626+ return 1;
67627+}
67628+
67629+void
67630+gr_set_role_label(const kuid_t uid, const kgid_t gid)
67631+{
67632+ return;
67633+}
67634+
67635+int
67636+gr_acl_handle_procpidmem(const struct task_struct *task)
67637+{
67638+ return 0;
67639+}
67640+
67641+int
67642+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
67643+{
67644+ return 0;
67645+}
67646+
67647+int
67648+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
67649+{
67650+ return 0;
67651+}
67652+
67653+void
67654+gr_set_kernel_label(struct task_struct *task)
67655+{
67656+ return;
67657+}
67658+
67659+int
67660+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
67661+{
67662+ return 0;
67663+}
67664+
67665+int
67666+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
67667+{
67668+ return 0;
67669+}
67670+
67671+int gr_acl_enable_at_secure(void)
67672+{
67673+ return 0;
67674+}
67675+
67676+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
67677+{
67678+ return dentry->d_sb->s_dev;
67679+}
67680+
67681+void gr_put_exec_file(struct task_struct *task)
67682+{
67683+ return;
67684+}
67685+
67686+EXPORT_SYMBOL(gr_set_kernel_label);
67687+#ifdef CONFIG_SECURITY
67688+EXPORT_SYMBOL(gr_check_user_change);
67689+EXPORT_SYMBOL(gr_check_group_change);
67690+#endif
67691diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
67692new file mode 100644
67693index 0000000..387032b
67694--- /dev/null
67695+++ b/grsecurity/grsec_exec.c
67696@@ -0,0 +1,187 @@
67697+#include <linux/kernel.h>
67698+#include <linux/sched.h>
67699+#include <linux/file.h>
67700+#include <linux/binfmts.h>
67701+#include <linux/fs.h>
67702+#include <linux/types.h>
67703+#include <linux/grdefs.h>
67704+#include <linux/grsecurity.h>
67705+#include <linux/grinternal.h>
67706+#include <linux/capability.h>
67707+#include <linux/module.h>
67708+#include <linux/compat.h>
67709+
67710+#include <asm/uaccess.h>
67711+
67712+#ifdef CONFIG_GRKERNSEC_EXECLOG
67713+static char gr_exec_arg_buf[132];
67714+static DEFINE_MUTEX(gr_exec_arg_mutex);
67715+#endif
67716+
67717+struct user_arg_ptr {
67718+#ifdef CONFIG_COMPAT
67719+ bool is_compat;
67720+#endif
67721+ union {
67722+ const char __user *const __user *native;
67723+#ifdef CONFIG_COMPAT
67724+ const compat_uptr_t __user *compat;
67725+#endif
67726+ } ptr;
67727+};
67728+
67729+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
67730+
67731+void
67732+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
67733+{
67734+#ifdef CONFIG_GRKERNSEC_EXECLOG
67735+ char *grarg = gr_exec_arg_buf;
67736+ unsigned int i, x, execlen = 0;
67737+ char c;
67738+
67739+ if (!((grsec_enable_execlog && grsec_enable_group &&
67740+ in_group_p(grsec_audit_gid))
67741+ || (grsec_enable_execlog && !grsec_enable_group)))
67742+ return;
67743+
67744+ mutex_lock(&gr_exec_arg_mutex);
67745+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
67746+
67747+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
67748+ const char __user *p;
67749+ unsigned int len;
67750+
67751+ p = get_user_arg_ptr(argv, i);
67752+ if (IS_ERR(p))
67753+ goto log;
67754+
67755+ len = strnlen_user(p, 128 - execlen);
67756+ if (len > 128 - execlen)
67757+ len = 128 - execlen;
67758+ else if (len > 0)
67759+ len--;
67760+ if (copy_from_user(grarg + execlen, p, len))
67761+ goto log;
67762+
67763+ /* rewrite unprintable characters */
67764+ for (x = 0; x < len; x++) {
67765+ c = *(grarg + execlen + x);
67766+ if (c < 32 || c > 126)
67767+ *(grarg + execlen + x) = ' ';
67768+ }
67769+
67770+ execlen += len;
67771+ *(grarg + execlen) = ' ';
67772+ *(grarg + execlen + 1) = '\0';
67773+ execlen++;
67774+ }
67775+
67776+ log:
67777+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
67778+ bprm->file->f_path.mnt, grarg);
67779+ mutex_unlock(&gr_exec_arg_mutex);
67780+#endif
67781+ return;
67782+}
67783+
67784+#ifdef CONFIG_GRKERNSEC
67785+extern int gr_acl_is_capable(const int cap);
67786+extern int gr_acl_is_capable_nolog(const int cap);
67787+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
67788+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
67789+extern int gr_chroot_is_capable(const int cap);
67790+extern int gr_chroot_is_capable_nolog(const int cap);
67791+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
67792+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
67793+#endif
67794+
67795+const char *captab_log[] = {
67796+ "CAP_CHOWN",
67797+ "CAP_DAC_OVERRIDE",
67798+ "CAP_DAC_READ_SEARCH",
67799+ "CAP_FOWNER",
67800+ "CAP_FSETID",
67801+ "CAP_KILL",
67802+ "CAP_SETGID",
67803+ "CAP_SETUID",
67804+ "CAP_SETPCAP",
67805+ "CAP_LINUX_IMMUTABLE",
67806+ "CAP_NET_BIND_SERVICE",
67807+ "CAP_NET_BROADCAST",
67808+ "CAP_NET_ADMIN",
67809+ "CAP_NET_RAW",
67810+ "CAP_IPC_LOCK",
67811+ "CAP_IPC_OWNER",
67812+ "CAP_SYS_MODULE",
67813+ "CAP_SYS_RAWIO",
67814+ "CAP_SYS_CHROOT",
67815+ "CAP_SYS_PTRACE",
67816+ "CAP_SYS_PACCT",
67817+ "CAP_SYS_ADMIN",
67818+ "CAP_SYS_BOOT",
67819+ "CAP_SYS_NICE",
67820+ "CAP_SYS_RESOURCE",
67821+ "CAP_SYS_TIME",
67822+ "CAP_SYS_TTY_CONFIG",
67823+ "CAP_MKNOD",
67824+ "CAP_LEASE",
67825+ "CAP_AUDIT_WRITE",
67826+ "CAP_AUDIT_CONTROL",
67827+ "CAP_SETFCAP",
67828+ "CAP_MAC_OVERRIDE",
67829+ "CAP_MAC_ADMIN",
67830+ "CAP_SYSLOG",
67831+ "CAP_WAKE_ALARM"
67832+};
67833+
67834+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
67835+
67836+int gr_is_capable(const int cap)
67837+{
67838+#ifdef CONFIG_GRKERNSEC
67839+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
67840+ return 1;
67841+ return 0;
67842+#else
67843+ return 1;
67844+#endif
67845+}
67846+
67847+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
67848+{
67849+#ifdef CONFIG_GRKERNSEC
67850+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
67851+ return 1;
67852+ return 0;
67853+#else
67854+ return 1;
67855+#endif
67856+}
67857+
67858+int gr_is_capable_nolog(const int cap)
67859+{
67860+#ifdef CONFIG_GRKERNSEC
67861+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
67862+ return 1;
67863+ return 0;
67864+#else
67865+ return 1;
67866+#endif
67867+}
67868+
67869+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
67870+{
67871+#ifdef CONFIG_GRKERNSEC
67872+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
67873+ return 1;
67874+ return 0;
67875+#else
67876+ return 1;
67877+#endif
67878+}
67879+
67880+EXPORT_SYMBOL(gr_is_capable);
67881+EXPORT_SYMBOL(gr_is_capable_nolog);
67882+EXPORT_SYMBOL(gr_task_is_capable);
67883+EXPORT_SYMBOL(gr_task_is_capable_nolog);
67884diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
67885new file mode 100644
67886index 0000000..06cc6ea
67887--- /dev/null
67888+++ b/grsecurity/grsec_fifo.c
67889@@ -0,0 +1,24 @@
67890+#include <linux/kernel.h>
67891+#include <linux/sched.h>
67892+#include <linux/fs.h>
67893+#include <linux/file.h>
67894+#include <linux/grinternal.h>
67895+
67896+int
67897+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
67898+ const struct dentry *dir, const int flag, const int acc_mode)
67899+{
67900+#ifdef CONFIG_GRKERNSEC_FIFO
67901+ const struct cred *cred = current_cred();
67902+
67903+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
67904+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
67905+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
67906+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
67907+ if (!inode_permission(dentry->d_inode, acc_mode))
67908+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
67909+ return -EACCES;
67910+ }
67911+#endif
67912+ return 0;
67913+}
67914diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
67915new file mode 100644
67916index 0000000..8ca18bf
67917--- /dev/null
67918+++ b/grsecurity/grsec_fork.c
67919@@ -0,0 +1,23 @@
67920+#include <linux/kernel.h>
67921+#include <linux/sched.h>
67922+#include <linux/grsecurity.h>
67923+#include <linux/grinternal.h>
67924+#include <linux/errno.h>
67925+
67926+void
67927+gr_log_forkfail(const int retval)
67928+{
67929+#ifdef CONFIG_GRKERNSEC_FORKFAIL
67930+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
67931+ switch (retval) {
67932+ case -EAGAIN:
67933+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
67934+ break;
67935+ case -ENOMEM:
67936+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
67937+ break;
67938+ }
67939+ }
67940+#endif
67941+ return;
67942+}
67943diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
67944new file mode 100644
67945index 0000000..ab2d875
67946--- /dev/null
67947+++ b/grsecurity/grsec_init.c
67948@@ -0,0 +1,279 @@
67949+#include <linux/kernel.h>
67950+#include <linux/sched.h>
67951+#include <linux/mm.h>
67952+#include <linux/gracl.h>
67953+#include <linux/slab.h>
67954+#include <linux/vmalloc.h>
67955+#include <linux/percpu.h>
67956+#include <linux/module.h>
67957+
67958+int grsec_enable_ptrace_readexec;
67959+int grsec_enable_setxid;
67960+int grsec_enable_symlinkown;
67961+kgid_t grsec_symlinkown_gid;
67962+int grsec_enable_brute;
67963+int grsec_enable_link;
67964+int grsec_enable_dmesg;
67965+int grsec_enable_harden_ptrace;
67966+int grsec_enable_fifo;
67967+int grsec_enable_execlog;
67968+int grsec_enable_signal;
67969+int grsec_enable_forkfail;
67970+int grsec_enable_audit_ptrace;
67971+int grsec_enable_time;
67972+int grsec_enable_group;
67973+kgid_t grsec_audit_gid;
67974+int grsec_enable_chdir;
67975+int grsec_enable_mount;
67976+int grsec_enable_rofs;
67977+int grsec_enable_chroot_findtask;
67978+int grsec_enable_chroot_mount;
67979+int grsec_enable_chroot_shmat;
67980+int grsec_enable_chroot_fchdir;
67981+int grsec_enable_chroot_double;
67982+int grsec_enable_chroot_pivot;
67983+int grsec_enable_chroot_chdir;
67984+int grsec_enable_chroot_chmod;
67985+int grsec_enable_chroot_mknod;
67986+int grsec_enable_chroot_nice;
67987+int grsec_enable_chroot_execlog;
67988+int grsec_enable_chroot_caps;
67989+int grsec_enable_chroot_sysctl;
67990+int grsec_enable_chroot_unix;
67991+int grsec_enable_tpe;
67992+kgid_t grsec_tpe_gid;
67993+int grsec_enable_blackhole;
67994+#ifdef CONFIG_IPV6_MODULE
67995+EXPORT_SYMBOL(grsec_enable_blackhole);
67996+#endif
67997+int grsec_lastack_retries;
67998+int grsec_enable_tpe_all;
67999+int grsec_enable_tpe_invert;
68000+int grsec_enable_socket_all;
68001+kgid_t grsec_socket_all_gid;
68002+int grsec_enable_socket_client;
68003+kgid_t grsec_socket_client_gid;
68004+int grsec_enable_socket_server;
68005+kgid_t grsec_socket_server_gid;
68006+int grsec_resource_logging;
68007+int grsec_disable_privio;
68008+int grsec_enable_log_rwxmaps;
68009+int grsec_lock;
68010+
68011+DEFINE_SPINLOCK(grsec_alert_lock);
68012+unsigned long grsec_alert_wtime = 0;
68013+unsigned long grsec_alert_fyet = 0;
68014+
68015+DEFINE_SPINLOCK(grsec_audit_lock);
68016+
68017+DEFINE_RWLOCK(grsec_exec_file_lock);
68018+
68019+char *gr_shared_page[4];
68020+
68021+char *gr_alert_log_fmt;
68022+char *gr_audit_log_fmt;
68023+char *gr_alert_log_buf;
68024+char *gr_audit_log_buf;
68025+
68026+extern struct gr_arg *gr_usermode;
68027+extern unsigned char *gr_system_salt;
68028+extern unsigned char *gr_system_sum;
68029+
68030+void __init
68031+grsecurity_init(void)
68032+{
68033+ int j;
68034+ /* create the per-cpu shared pages */
68035+
68036+#ifdef CONFIG_X86
68037+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
68038+#endif
68039+
68040+ for (j = 0; j < 4; j++) {
68041+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
68042+ if (gr_shared_page[j] == NULL) {
68043+ panic("Unable to allocate grsecurity shared page");
68044+ return;
68045+ }
68046+ }
68047+
68048+ /* allocate log buffers */
68049+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
68050+ if (!gr_alert_log_fmt) {
68051+ panic("Unable to allocate grsecurity alert log format buffer");
68052+ return;
68053+ }
68054+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
68055+ if (!gr_audit_log_fmt) {
68056+ panic("Unable to allocate grsecurity audit log format buffer");
68057+ return;
68058+ }
68059+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
68060+ if (!gr_alert_log_buf) {
68061+ panic("Unable to allocate grsecurity alert log buffer");
68062+ return;
68063+ }
68064+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
68065+ if (!gr_audit_log_buf) {
68066+ panic("Unable to allocate grsecurity audit log buffer");
68067+ return;
68068+ }
68069+
68070+ /* allocate memory for authentication structure */
68071+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
68072+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
68073+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
68074+
68075+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
68076+ panic("Unable to allocate grsecurity authentication structure");
68077+ return;
68078+ }
68079+
68080+
68081+#ifdef CONFIG_GRKERNSEC_IO
68082+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
68083+ grsec_disable_privio = 1;
68084+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
68085+ grsec_disable_privio = 1;
68086+#else
68087+ grsec_disable_privio = 0;
68088+#endif
68089+#endif
68090+
68091+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
68092+ /* for backward compatibility, tpe_invert always defaults to on if
68093+ enabled in the kernel
68094+ */
68095+ grsec_enable_tpe_invert = 1;
68096+#endif
68097+
68098+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
68099+#ifndef CONFIG_GRKERNSEC_SYSCTL
68100+ grsec_lock = 1;
68101+#endif
68102+
68103+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68104+ grsec_enable_log_rwxmaps = 1;
68105+#endif
68106+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
68107+ grsec_enable_group = 1;
68108+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
68109+#endif
68110+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
68111+ grsec_enable_ptrace_readexec = 1;
68112+#endif
68113+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
68114+ grsec_enable_chdir = 1;
68115+#endif
68116+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
68117+ grsec_enable_harden_ptrace = 1;
68118+#endif
68119+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
68120+ grsec_enable_mount = 1;
68121+#endif
68122+#ifdef CONFIG_GRKERNSEC_LINK
68123+ grsec_enable_link = 1;
68124+#endif
68125+#ifdef CONFIG_GRKERNSEC_BRUTE
68126+ grsec_enable_brute = 1;
68127+#endif
68128+#ifdef CONFIG_GRKERNSEC_DMESG
68129+ grsec_enable_dmesg = 1;
68130+#endif
68131+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68132+ grsec_enable_blackhole = 1;
68133+ grsec_lastack_retries = 4;
68134+#endif
68135+#ifdef CONFIG_GRKERNSEC_FIFO
68136+ grsec_enable_fifo = 1;
68137+#endif
68138+#ifdef CONFIG_GRKERNSEC_EXECLOG
68139+ grsec_enable_execlog = 1;
68140+#endif
68141+#ifdef CONFIG_GRKERNSEC_SETXID
68142+ grsec_enable_setxid = 1;
68143+#endif
68144+#ifdef CONFIG_GRKERNSEC_SIGNAL
68145+ grsec_enable_signal = 1;
68146+#endif
68147+#ifdef CONFIG_GRKERNSEC_FORKFAIL
68148+ grsec_enable_forkfail = 1;
68149+#endif
68150+#ifdef CONFIG_GRKERNSEC_TIME
68151+ grsec_enable_time = 1;
68152+#endif
68153+#ifdef CONFIG_GRKERNSEC_RESLOG
68154+ grsec_resource_logging = 1;
68155+#endif
68156+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68157+ grsec_enable_chroot_findtask = 1;
68158+#endif
68159+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
68160+ grsec_enable_chroot_unix = 1;
68161+#endif
68162+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
68163+ grsec_enable_chroot_mount = 1;
68164+#endif
68165+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
68166+ grsec_enable_chroot_fchdir = 1;
68167+#endif
68168+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
68169+ grsec_enable_chroot_shmat = 1;
68170+#endif
68171+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
68172+ grsec_enable_audit_ptrace = 1;
68173+#endif
68174+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
68175+ grsec_enable_chroot_double = 1;
68176+#endif
68177+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
68178+ grsec_enable_chroot_pivot = 1;
68179+#endif
68180+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
68181+ grsec_enable_chroot_chdir = 1;
68182+#endif
68183+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
68184+ grsec_enable_chroot_chmod = 1;
68185+#endif
68186+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
68187+ grsec_enable_chroot_mknod = 1;
68188+#endif
68189+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
68190+ grsec_enable_chroot_nice = 1;
68191+#endif
68192+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
68193+ grsec_enable_chroot_execlog = 1;
68194+#endif
68195+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
68196+ grsec_enable_chroot_caps = 1;
68197+#endif
68198+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
68199+ grsec_enable_chroot_sysctl = 1;
68200+#endif
68201+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
68202+ grsec_enable_symlinkown = 1;
68203+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
68204+#endif
68205+#ifdef CONFIG_GRKERNSEC_TPE
68206+ grsec_enable_tpe = 1;
68207+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
68208+#ifdef CONFIG_GRKERNSEC_TPE_ALL
68209+ grsec_enable_tpe_all = 1;
68210+#endif
68211+#endif
68212+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
68213+ grsec_enable_socket_all = 1;
68214+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
68215+#endif
68216+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
68217+ grsec_enable_socket_client = 1;
68218+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
68219+#endif
68220+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
68221+ grsec_enable_socket_server = 1;
68222+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
68223+#endif
68224+#endif
68225+
68226+ return;
68227+}
68228diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
68229new file mode 100644
68230index 0000000..5e05e20
68231--- /dev/null
68232+++ b/grsecurity/grsec_link.c
68233@@ -0,0 +1,58 @@
68234+#include <linux/kernel.h>
68235+#include <linux/sched.h>
68236+#include <linux/fs.h>
68237+#include <linux/file.h>
68238+#include <linux/grinternal.h>
68239+
68240+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
68241+{
68242+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
68243+ const struct inode *link_inode = link->dentry->d_inode;
68244+
68245+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
68246+ /* ignore root-owned links, e.g. /proc/self */
68247+ gr_is_global_nonroot(link_inode->i_uid) && target &&
68248+ !uid_eq(link_inode->i_uid, target->i_uid)) {
68249+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
68250+ return 1;
68251+ }
68252+#endif
68253+ return 0;
68254+}
68255+
68256+int
68257+gr_handle_follow_link(const struct inode *parent,
68258+ const struct inode *inode,
68259+ const struct dentry *dentry, const struct vfsmount *mnt)
68260+{
68261+#ifdef CONFIG_GRKERNSEC_LINK
68262+ const struct cred *cred = current_cred();
68263+
68264+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
68265+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
68266+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
68267+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
68268+ return -EACCES;
68269+ }
68270+#endif
68271+ return 0;
68272+}
68273+
68274+int
68275+gr_handle_hardlink(const struct dentry *dentry,
68276+ const struct vfsmount *mnt,
68277+ struct inode *inode, const int mode, const struct filename *to)
68278+{
68279+#ifdef CONFIG_GRKERNSEC_LINK
68280+ const struct cred *cred = current_cred();
68281+
68282+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
68283+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
68284+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
68285+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
68286+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
68287+ return -EPERM;
68288+ }
68289+#endif
68290+ return 0;
68291+}
68292diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
68293new file mode 100644
68294index 0000000..dbe0a6b
68295--- /dev/null
68296+++ b/grsecurity/grsec_log.c
68297@@ -0,0 +1,341 @@
68298+#include <linux/kernel.h>
68299+#include <linux/sched.h>
68300+#include <linux/file.h>
68301+#include <linux/tty.h>
68302+#include <linux/fs.h>
68303+#include <linux/mm.h>
68304+#include <linux/grinternal.h>
68305+
68306+#ifdef CONFIG_TREE_PREEMPT_RCU
68307+#define DISABLE_PREEMPT() preempt_disable()
68308+#define ENABLE_PREEMPT() preempt_enable()
68309+#else
68310+#define DISABLE_PREEMPT()
68311+#define ENABLE_PREEMPT()
68312+#endif
68313+
68314+#define BEGIN_LOCKS(x) \
68315+ DISABLE_PREEMPT(); \
68316+ rcu_read_lock(); \
68317+ read_lock(&tasklist_lock); \
68318+ read_lock(&grsec_exec_file_lock); \
68319+ if (x != GR_DO_AUDIT) \
68320+ spin_lock(&grsec_alert_lock); \
68321+ else \
68322+ spin_lock(&grsec_audit_lock)
68323+
68324+#define END_LOCKS(x) \
68325+ if (x != GR_DO_AUDIT) \
68326+ spin_unlock(&grsec_alert_lock); \
68327+ else \
68328+ spin_unlock(&grsec_audit_lock); \
68329+ read_unlock(&grsec_exec_file_lock); \
68330+ read_unlock(&tasklist_lock); \
68331+ rcu_read_unlock(); \
68332+ ENABLE_PREEMPT(); \
68333+ if (x == GR_DONT_AUDIT) \
68334+ gr_handle_alertkill(current)
68335+
68336+enum {
68337+ FLOODING,
68338+ NO_FLOODING
68339+};
68340+
68341+extern char *gr_alert_log_fmt;
68342+extern char *gr_audit_log_fmt;
68343+extern char *gr_alert_log_buf;
68344+extern char *gr_audit_log_buf;
68345+
68346+static int gr_log_start(int audit)
68347+{
68348+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
68349+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
68350+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
68351+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
68352+ unsigned long curr_secs = get_seconds();
68353+
68354+ if (audit == GR_DO_AUDIT)
68355+ goto set_fmt;
68356+
68357+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
68358+ grsec_alert_wtime = curr_secs;
68359+ grsec_alert_fyet = 0;
68360+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
68361+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
68362+ grsec_alert_fyet++;
68363+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
68364+ grsec_alert_wtime = curr_secs;
68365+ grsec_alert_fyet++;
68366+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
68367+ return FLOODING;
68368+ }
68369+ else return FLOODING;
68370+
68371+set_fmt:
68372+#endif
68373+ memset(buf, 0, PAGE_SIZE);
68374+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
68375+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
68376+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
68377+ } else if (current->signal->curr_ip) {
68378+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
68379+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
68380+ } else if (gr_acl_is_enabled()) {
68381+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
68382+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
68383+ } else {
68384+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
68385+ strcpy(buf, fmt);
68386+ }
68387+
68388+ return NO_FLOODING;
68389+}
68390+
68391+static void gr_log_middle(int audit, const char *msg, va_list ap)
68392+ __attribute__ ((format (printf, 2, 0)));
68393+
68394+static void gr_log_middle(int audit, const char *msg, va_list ap)
68395+{
68396+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
68397+ unsigned int len = strlen(buf);
68398+
68399+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
68400+
68401+ return;
68402+}
68403+
68404+static void gr_log_middle_varargs(int audit, const char *msg, ...)
68405+ __attribute__ ((format (printf, 2, 3)));
68406+
68407+static void gr_log_middle_varargs(int audit, const char *msg, ...)
68408+{
68409+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
68410+ unsigned int len = strlen(buf);
68411+ va_list ap;
68412+
68413+ va_start(ap, msg);
68414+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
68415+ va_end(ap);
68416+
68417+ return;
68418+}
68419+
68420+static void gr_log_end(int audit, int append_default)
68421+{
68422+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
68423+ if (append_default) {
68424+ struct task_struct *task = current;
68425+ struct task_struct *parent = task->real_parent;
68426+ const struct cred *cred = __task_cred(task);
68427+ const struct cred *pcred = __task_cred(parent);
68428+ unsigned int len = strlen(buf);
68429+
68430+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68431+ }
68432+
68433+ printk("%s\n", buf);
68434+
68435+ return;
68436+}
68437+
68438+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
68439+{
68440+ int logtype;
68441+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
68442+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
68443+ void *voidptr = NULL;
68444+ int num1 = 0, num2 = 0;
68445+ unsigned long ulong1 = 0, ulong2 = 0;
68446+ struct dentry *dentry = NULL;
68447+ struct vfsmount *mnt = NULL;
68448+ struct file *file = NULL;
68449+ struct task_struct *task = NULL;
68450+ struct vm_area_struct *vma = NULL;
68451+ const struct cred *cred, *pcred;
68452+ va_list ap;
68453+
68454+ BEGIN_LOCKS(audit);
68455+ logtype = gr_log_start(audit);
68456+ if (logtype == FLOODING) {
68457+ END_LOCKS(audit);
68458+ return;
68459+ }
68460+ va_start(ap, argtypes);
68461+ switch (argtypes) {
68462+ case GR_TTYSNIFF:
68463+ task = va_arg(ap, struct task_struct *);
68464+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
68465+ break;
68466+ case GR_SYSCTL_HIDDEN:
68467+ str1 = va_arg(ap, char *);
68468+ gr_log_middle_varargs(audit, msg, result, str1);
68469+ break;
68470+ case GR_RBAC:
68471+ dentry = va_arg(ap, struct dentry *);
68472+ mnt = va_arg(ap, struct vfsmount *);
68473+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
68474+ break;
68475+ case GR_RBAC_STR:
68476+ dentry = va_arg(ap, struct dentry *);
68477+ mnt = va_arg(ap, struct vfsmount *);
68478+ str1 = va_arg(ap, char *);
68479+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
68480+ break;
68481+ case GR_STR_RBAC:
68482+ str1 = va_arg(ap, char *);
68483+ dentry = va_arg(ap, struct dentry *);
68484+ mnt = va_arg(ap, struct vfsmount *);
68485+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
68486+ break;
68487+ case GR_RBAC_MODE2:
68488+ dentry = va_arg(ap, struct dentry *);
68489+ mnt = va_arg(ap, struct vfsmount *);
68490+ str1 = va_arg(ap, char *);
68491+ str2 = va_arg(ap, char *);
68492+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
68493+ break;
68494+ case GR_RBAC_MODE3:
68495+ dentry = va_arg(ap, struct dentry *);
68496+ mnt = va_arg(ap, struct vfsmount *);
68497+ str1 = va_arg(ap, char *);
68498+ str2 = va_arg(ap, char *);
68499+ str3 = va_arg(ap, char *);
68500+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
68501+ break;
68502+ case GR_FILENAME:
68503+ dentry = va_arg(ap, struct dentry *);
68504+ mnt = va_arg(ap, struct vfsmount *);
68505+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
68506+ break;
68507+ case GR_STR_FILENAME:
68508+ str1 = va_arg(ap, char *);
68509+ dentry = va_arg(ap, struct dentry *);
68510+ mnt = va_arg(ap, struct vfsmount *);
68511+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
68512+ break;
68513+ case GR_FILENAME_STR:
68514+ dentry = va_arg(ap, struct dentry *);
68515+ mnt = va_arg(ap, struct vfsmount *);
68516+ str1 = va_arg(ap, char *);
68517+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
68518+ break;
68519+ case GR_FILENAME_TWO_INT:
68520+ dentry = va_arg(ap, struct dentry *);
68521+ mnt = va_arg(ap, struct vfsmount *);
68522+ num1 = va_arg(ap, int);
68523+ num2 = va_arg(ap, int);
68524+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
68525+ break;
68526+ case GR_FILENAME_TWO_INT_STR:
68527+ dentry = va_arg(ap, struct dentry *);
68528+ mnt = va_arg(ap, struct vfsmount *);
68529+ num1 = va_arg(ap, int);
68530+ num2 = va_arg(ap, int);
68531+ str1 = va_arg(ap, char *);
68532+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
68533+ break;
68534+ case GR_TEXTREL:
68535+ file = va_arg(ap, struct file *);
68536+ ulong1 = va_arg(ap, unsigned long);
68537+ ulong2 = va_arg(ap, unsigned long);
68538+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
68539+ break;
68540+ case GR_PTRACE:
68541+ task = va_arg(ap, struct task_struct *);
68542+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
68543+ break;
68544+ case GR_RESOURCE:
68545+ task = va_arg(ap, struct task_struct *);
68546+ cred = __task_cred(task);
68547+ pcred = __task_cred(task->real_parent);
68548+ ulong1 = va_arg(ap, unsigned long);
68549+ str1 = va_arg(ap, char *);
68550+ ulong2 = va_arg(ap, unsigned long);
68551+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68552+ break;
68553+ case GR_CAP:
68554+ task = va_arg(ap, struct task_struct *);
68555+ cred = __task_cred(task);
68556+ pcred = __task_cred(task->real_parent);
68557+ str1 = va_arg(ap, char *);
68558+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68559+ break;
68560+ case GR_SIG:
68561+ str1 = va_arg(ap, char *);
68562+ voidptr = va_arg(ap, void *);
68563+ gr_log_middle_varargs(audit, msg, str1, voidptr);
68564+ break;
68565+ case GR_SIG2:
68566+ task = va_arg(ap, struct task_struct *);
68567+ cred = __task_cred(task);
68568+ pcred = __task_cred(task->real_parent);
68569+ num1 = va_arg(ap, int);
68570+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68571+ break;
68572+ case GR_CRASH1:
68573+ task = va_arg(ap, struct task_struct *);
68574+ cred = __task_cred(task);
68575+ pcred = __task_cred(task->real_parent);
68576+ ulong1 = va_arg(ap, unsigned long);
68577+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
68578+ break;
68579+ case GR_CRASH2:
68580+ task = va_arg(ap, struct task_struct *);
68581+ cred = __task_cred(task);
68582+ pcred = __task_cred(task->real_parent);
68583+ ulong1 = va_arg(ap, unsigned long);
68584+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
68585+ break;
68586+ case GR_RWXMAP:
68587+ file = va_arg(ap, struct file *);
68588+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
68589+ break;
68590+ case GR_RWXMAPVMA:
68591+ vma = va_arg(ap, struct vm_area_struct *);
68592+ if (vma->vm_file)
68593+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
68594+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
68595+ str1 = "<stack>";
68596+ else if (vma->vm_start <= current->mm->brk &&
68597+ vma->vm_end >= current->mm->start_brk)
68598+ str1 = "<heap>";
68599+ else
68600+ str1 = "<anonymous mapping>";
68601+ gr_log_middle_varargs(audit, msg, str1);
68602+ break;
68603+ case GR_PSACCT:
68604+ {
68605+ unsigned int wday, cday;
68606+ __u8 whr, chr;
68607+ __u8 wmin, cmin;
68608+ __u8 wsec, csec;
68609+ char cur_tty[64] = { 0 };
68610+ char parent_tty[64] = { 0 };
68611+
68612+ task = va_arg(ap, struct task_struct *);
68613+ wday = va_arg(ap, unsigned int);
68614+ cday = va_arg(ap, unsigned int);
68615+ whr = va_arg(ap, int);
68616+ chr = va_arg(ap, int);
68617+ wmin = va_arg(ap, int);
68618+ cmin = va_arg(ap, int);
68619+ wsec = va_arg(ap, int);
68620+ csec = va_arg(ap, int);
68621+ ulong1 = va_arg(ap, unsigned long);
68622+ cred = __task_cred(task);
68623+ pcred = __task_cred(task->real_parent);
68624+
68625+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
68626+ }
68627+ break;
68628+ default:
68629+ gr_log_middle(audit, msg, ap);
68630+ }
68631+ va_end(ap);
68632+ // these don't need DEFAULTSECARGS printed on the end
68633+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
68634+ gr_log_end(audit, 0);
68635+ else
68636+ gr_log_end(audit, 1);
68637+ END_LOCKS(audit);
68638+}
68639diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
68640new file mode 100644
68641index 0000000..f536303
68642--- /dev/null
68643+++ b/grsecurity/grsec_mem.c
68644@@ -0,0 +1,40 @@
68645+#include <linux/kernel.h>
68646+#include <linux/sched.h>
68647+#include <linux/mm.h>
68648+#include <linux/mman.h>
68649+#include <linux/grinternal.h>
68650+
68651+void
68652+gr_handle_ioperm(void)
68653+{
68654+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
68655+ return;
68656+}
68657+
68658+void
68659+gr_handle_iopl(void)
68660+{
68661+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
68662+ return;
68663+}
68664+
68665+void
68666+gr_handle_mem_readwrite(u64 from, u64 to)
68667+{
68668+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
68669+ return;
68670+}
68671+
68672+void
68673+gr_handle_vm86(void)
68674+{
68675+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
68676+ return;
68677+}
68678+
68679+void
68680+gr_log_badprocpid(const char *entry)
68681+{
68682+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
68683+ return;
68684+}
68685diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
68686new file mode 100644
68687index 0000000..2131422
68688--- /dev/null
68689+++ b/grsecurity/grsec_mount.c
68690@@ -0,0 +1,62 @@
68691+#include <linux/kernel.h>
68692+#include <linux/sched.h>
68693+#include <linux/mount.h>
68694+#include <linux/grsecurity.h>
68695+#include <linux/grinternal.h>
68696+
68697+void
68698+gr_log_remount(const char *devname, const int retval)
68699+{
68700+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
68701+ if (grsec_enable_mount && (retval >= 0))
68702+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
68703+#endif
68704+ return;
68705+}
68706+
68707+void
68708+gr_log_unmount(const char *devname, const int retval)
68709+{
68710+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
68711+ if (grsec_enable_mount && (retval >= 0))
68712+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
68713+#endif
68714+ return;
68715+}
68716+
68717+void
68718+gr_log_mount(const char *from, const char *to, const int retval)
68719+{
68720+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
68721+ if (grsec_enable_mount && (retval >= 0))
68722+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
68723+#endif
68724+ return;
68725+}
68726+
68727+int
68728+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
68729+{
68730+#ifdef CONFIG_GRKERNSEC_ROFS
68731+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
68732+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
68733+ return -EPERM;
68734+ } else
68735+ return 0;
68736+#endif
68737+ return 0;
68738+}
68739+
68740+int
68741+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
68742+{
68743+#ifdef CONFIG_GRKERNSEC_ROFS
68744+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
68745+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
68746+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
68747+ return -EPERM;
68748+ } else
68749+ return 0;
68750+#endif
68751+ return 0;
68752+}
68753diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
68754new file mode 100644
68755index 0000000..6ee9d50
68756--- /dev/null
68757+++ b/grsecurity/grsec_pax.c
68758@@ -0,0 +1,45 @@
68759+#include <linux/kernel.h>
68760+#include <linux/sched.h>
68761+#include <linux/mm.h>
68762+#include <linux/file.h>
68763+#include <linux/grinternal.h>
68764+#include <linux/grsecurity.h>
68765+
68766+void
68767+gr_log_textrel(struct vm_area_struct * vma)
68768+{
68769+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68770+ if (grsec_enable_log_rwxmaps)
68771+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
68772+#endif
68773+ return;
68774+}
68775+
68776+void gr_log_ptgnustack(struct file *file)
68777+{
68778+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68779+ if (grsec_enable_log_rwxmaps)
68780+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
68781+#endif
68782+ return;
68783+}
68784+
68785+void
68786+gr_log_rwxmmap(struct file *file)
68787+{
68788+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68789+ if (grsec_enable_log_rwxmaps)
68790+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
68791+#endif
68792+ return;
68793+}
68794+
68795+void
68796+gr_log_rwxmprotect(struct vm_area_struct *vma)
68797+{
68798+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
68799+ if (grsec_enable_log_rwxmaps)
68800+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
68801+#endif
68802+ return;
68803+}
68804diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
68805new file mode 100644
68806index 0000000..f7f29aa
68807--- /dev/null
68808+++ b/grsecurity/grsec_ptrace.c
68809@@ -0,0 +1,30 @@
68810+#include <linux/kernel.h>
68811+#include <linux/sched.h>
68812+#include <linux/grinternal.h>
68813+#include <linux/security.h>
68814+
68815+void
68816+gr_audit_ptrace(struct task_struct *task)
68817+{
68818+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
68819+ if (grsec_enable_audit_ptrace)
68820+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
68821+#endif
68822+ return;
68823+}
68824+
68825+int
68826+gr_ptrace_readexec(struct file *file, int unsafe_flags)
68827+{
68828+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
68829+ const struct dentry *dentry = file->f_path.dentry;
68830+ const struct vfsmount *mnt = file->f_path.mnt;
68831+
68832+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
68833+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
68834+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
68835+ return -EACCES;
68836+ }
68837+#endif
68838+ return 0;
68839+}
68840diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
68841new file mode 100644
68842index 0000000..4e29cc7
68843--- /dev/null
68844+++ b/grsecurity/grsec_sig.c
68845@@ -0,0 +1,246 @@
68846+#include <linux/kernel.h>
68847+#include <linux/sched.h>
68848+#include <linux/fs.h>
68849+#include <linux/delay.h>
68850+#include <linux/grsecurity.h>
68851+#include <linux/grinternal.h>
68852+#include <linux/hardirq.h>
68853+
68854+char *signames[] = {
68855+ [SIGSEGV] = "Segmentation fault",
68856+ [SIGILL] = "Illegal instruction",
68857+ [SIGABRT] = "Abort",
68858+ [SIGBUS] = "Invalid alignment/Bus error"
68859+};
68860+
68861+void
68862+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
68863+{
68864+#ifdef CONFIG_GRKERNSEC_SIGNAL
68865+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
68866+ (sig == SIGABRT) || (sig == SIGBUS))) {
68867+ if (task_pid_nr(t) == task_pid_nr(current)) {
68868+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
68869+ } else {
68870+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
68871+ }
68872+ }
68873+#endif
68874+ return;
68875+}
68876+
68877+int
68878+gr_handle_signal(const struct task_struct *p, const int sig)
68879+{
68880+#ifdef CONFIG_GRKERNSEC
68881+ /* ignore the 0 signal for protected task checks */
68882+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
68883+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
68884+ return -EPERM;
68885+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
68886+ return -EPERM;
68887+ }
68888+#endif
68889+ return 0;
68890+}
68891+
68892+#ifdef CONFIG_GRKERNSEC
68893+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
68894+
68895+int gr_fake_force_sig(int sig, struct task_struct *t)
68896+{
68897+ unsigned long int flags;
68898+ int ret, blocked, ignored;
68899+ struct k_sigaction *action;
68900+
68901+ spin_lock_irqsave(&t->sighand->siglock, flags);
68902+ action = &t->sighand->action[sig-1];
68903+ ignored = action->sa.sa_handler == SIG_IGN;
68904+ blocked = sigismember(&t->blocked, sig);
68905+ if (blocked || ignored) {
68906+ action->sa.sa_handler = SIG_DFL;
68907+ if (blocked) {
68908+ sigdelset(&t->blocked, sig);
68909+ recalc_sigpending_and_wake(t);
68910+ }
68911+ }
68912+ if (action->sa.sa_handler == SIG_DFL)
68913+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
68914+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
68915+
68916+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
68917+
68918+ return ret;
68919+}
68920+#endif
68921+
68922+#ifdef CONFIG_GRKERNSEC_BRUTE
68923+#define GR_USER_BAN_TIME (15 * 60)
68924+#define GR_DAEMON_BRUTE_TIME (30 * 60)
68925+
68926+static int __get_dumpable(unsigned long mm_flags)
68927+{
68928+ int ret;
68929+
68930+ ret = mm_flags & MMF_DUMPABLE_MASK;
68931+ return (ret >= 2) ? 2 : ret;
68932+}
68933+#endif
68934+
68935+void gr_handle_brute_attach(unsigned long mm_flags)
68936+{
68937+#ifdef CONFIG_GRKERNSEC_BRUTE
68938+ struct task_struct *p = current;
68939+ kuid_t uid = GLOBAL_ROOT_UID;
68940+ int daemon = 0;
68941+
68942+ if (!grsec_enable_brute)
68943+ return;
68944+
68945+ rcu_read_lock();
68946+ read_lock(&tasklist_lock);
68947+ read_lock(&grsec_exec_file_lock);
68948+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
68949+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
68950+ p->real_parent->brute = 1;
68951+ daemon = 1;
68952+ } else {
68953+ const struct cred *cred = __task_cred(p), *cred2;
68954+ struct task_struct *tsk, *tsk2;
68955+
68956+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
68957+ struct user_struct *user;
68958+
68959+ uid = cred->uid;
68960+
68961+ /* this is put upon execution past expiration */
68962+ user = find_user(uid);
68963+ if (user == NULL)
68964+ goto unlock;
68965+ user->suid_banned = 1;
68966+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
68967+ if (user->suid_ban_expires == ~0UL)
68968+ user->suid_ban_expires--;
68969+
68970+ /* only kill other threads of the same binary, from the same user */
68971+ do_each_thread(tsk2, tsk) {
68972+ cred2 = __task_cred(tsk);
68973+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
68974+ gr_fake_force_sig(SIGKILL, tsk);
68975+ } while_each_thread(tsk2, tsk);
68976+ }
68977+ }
68978+unlock:
68979+ read_unlock(&grsec_exec_file_lock);
68980+ read_unlock(&tasklist_lock);
68981+ rcu_read_unlock();
68982+
68983+ if (gr_is_global_nonroot(uid))
68984+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
68985+ else if (daemon)
68986+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
68987+
68988+#endif
68989+ return;
68990+}
68991+
68992+void gr_handle_brute_check(void)
68993+{
68994+#ifdef CONFIG_GRKERNSEC_BRUTE
68995+ struct task_struct *p = current;
68996+
68997+ if (unlikely(p->brute)) {
68998+ if (!grsec_enable_brute)
68999+ p->brute = 0;
69000+ else if (time_before(get_seconds(), p->brute_expires))
69001+ msleep(30 * 1000);
69002+ }
69003+#endif
69004+ return;
69005+}
69006+
69007+void gr_handle_kernel_exploit(void)
69008+{
69009+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
69010+ const struct cred *cred;
69011+ struct task_struct *tsk, *tsk2;
69012+ struct user_struct *user;
69013+ kuid_t uid;
69014+
69015+ if (in_irq() || in_serving_softirq() || in_nmi())
69016+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
69017+
69018+ uid = current_uid();
69019+
69020+ if (gr_is_global_root(uid))
69021+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
69022+ else {
69023+ /* kill all the processes of this user, hold a reference
69024+ to their creds struct, and prevent them from creating
69025+ another process until system reset
69026+ */
69027+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
69028+ GR_GLOBAL_UID(uid));
69029+ /* we intentionally leak this ref */
69030+ user = get_uid(current->cred->user);
69031+ if (user)
69032+ user->kernel_banned = 1;
69033+
69034+ /* kill all processes of this user */
69035+ read_lock(&tasklist_lock);
69036+ do_each_thread(tsk2, tsk) {
69037+ cred = __task_cred(tsk);
69038+ if (uid_eq(cred->uid, uid))
69039+ gr_fake_force_sig(SIGKILL, tsk);
69040+ } while_each_thread(tsk2, tsk);
69041+ read_unlock(&tasklist_lock);
69042+ }
69043+#endif
69044+}
69045+
69046+#ifdef CONFIG_GRKERNSEC_BRUTE
69047+static bool suid_ban_expired(struct user_struct *user)
69048+{
69049+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
69050+ user->suid_banned = 0;
69051+ user->suid_ban_expires = 0;
69052+ free_uid(user);
69053+ return true;
69054+ }
69055+
69056+ return false;
69057+}
69058+#endif
69059+
69060+int gr_process_kernel_exec_ban(void)
69061+{
69062+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
69063+ if (unlikely(current->cred->user->kernel_banned))
69064+ return -EPERM;
69065+#endif
69066+ return 0;
69067+}
69068+
69069+int gr_process_kernel_setuid_ban(struct user_struct *user)
69070+{
69071+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
69072+ if (unlikely(user->kernel_banned))
69073+ gr_fake_force_sig(SIGKILL, current);
69074+#endif
69075+ return 0;
69076+}
69077+
69078+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
69079+{
69080+#ifdef CONFIG_GRKERNSEC_BRUTE
69081+ struct user_struct *user = current->cred->user;
69082+ if (unlikely(user->suid_banned)) {
69083+ if (suid_ban_expired(user))
69084+ return 0;
69085+ /* disallow execution of suid binaries only */
69086+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
69087+ return -EPERM;
69088+ }
69089+#endif
69090+ return 0;
69091+}
69092diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
69093new file mode 100644
69094index 0000000..4030d57
69095--- /dev/null
69096+++ b/grsecurity/grsec_sock.c
69097@@ -0,0 +1,244 @@
69098+#include <linux/kernel.h>
69099+#include <linux/module.h>
69100+#include <linux/sched.h>
69101+#include <linux/file.h>
69102+#include <linux/net.h>
69103+#include <linux/in.h>
69104+#include <linux/ip.h>
69105+#include <net/sock.h>
69106+#include <net/inet_sock.h>
69107+#include <linux/grsecurity.h>
69108+#include <linux/grinternal.h>
69109+#include <linux/gracl.h>
69110+
69111+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
69112+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
69113+
69114+EXPORT_SYMBOL(gr_search_udp_recvmsg);
69115+EXPORT_SYMBOL(gr_search_udp_sendmsg);
69116+
69117+#ifdef CONFIG_UNIX_MODULE
69118+EXPORT_SYMBOL(gr_acl_handle_unix);
69119+EXPORT_SYMBOL(gr_acl_handle_mknod);
69120+EXPORT_SYMBOL(gr_handle_chroot_unix);
69121+EXPORT_SYMBOL(gr_handle_create);
69122+#endif
69123+
69124+#ifdef CONFIG_GRKERNSEC
69125+#define gr_conn_table_size 32749
69126+struct conn_table_entry {
69127+ struct conn_table_entry *next;
69128+ struct signal_struct *sig;
69129+};
69130+
69131+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
69132+DEFINE_SPINLOCK(gr_conn_table_lock);
69133+
69134+extern const char * gr_socktype_to_name(unsigned char type);
69135+extern const char * gr_proto_to_name(unsigned char proto);
69136+extern const char * gr_sockfamily_to_name(unsigned char family);
69137+
69138+static __inline__ int
69139+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
69140+{
69141+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
69142+}
69143+
69144+static __inline__ int
69145+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
69146+ __u16 sport, __u16 dport)
69147+{
69148+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
69149+ sig->gr_sport == sport && sig->gr_dport == dport))
69150+ return 1;
69151+ else
69152+ return 0;
69153+}
69154+
69155+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
69156+{
69157+ struct conn_table_entry **match;
69158+ unsigned int index;
69159+
69160+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
69161+ sig->gr_sport, sig->gr_dport,
69162+ gr_conn_table_size);
69163+
69164+ newent->sig = sig;
69165+
69166+ match = &gr_conn_table[index];
69167+ newent->next = *match;
69168+ *match = newent;
69169+
69170+ return;
69171+}
69172+
69173+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
69174+{
69175+ struct conn_table_entry *match, *last = NULL;
69176+ unsigned int index;
69177+
69178+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
69179+ sig->gr_sport, sig->gr_dport,
69180+ gr_conn_table_size);
69181+
69182+ match = gr_conn_table[index];
69183+ while (match && !conn_match(match->sig,
69184+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
69185+ sig->gr_dport)) {
69186+ last = match;
69187+ match = match->next;
69188+ }
69189+
69190+ if (match) {
69191+ if (last)
69192+ last->next = match->next;
69193+ else
69194+ gr_conn_table[index] = NULL;
69195+ kfree(match);
69196+ }
69197+
69198+ return;
69199+}
69200+
69201+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
69202+ __u16 sport, __u16 dport)
69203+{
69204+ struct conn_table_entry *match;
69205+ unsigned int index;
69206+
69207+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
69208+
69209+ match = gr_conn_table[index];
69210+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
69211+ match = match->next;
69212+
69213+ if (match)
69214+ return match->sig;
69215+ else
69216+ return NULL;
69217+}
69218+
69219+#endif
69220+
69221+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
69222+{
69223+#ifdef CONFIG_GRKERNSEC
69224+ struct signal_struct *sig = task->signal;
69225+ struct conn_table_entry *newent;
69226+
69227+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
69228+ if (newent == NULL)
69229+ return;
69230+ /* no bh lock needed since we are called with bh disabled */
69231+ spin_lock(&gr_conn_table_lock);
69232+ gr_del_task_from_ip_table_nolock(sig);
69233+ sig->gr_saddr = inet->inet_rcv_saddr;
69234+ sig->gr_daddr = inet->inet_daddr;
69235+ sig->gr_sport = inet->inet_sport;
69236+ sig->gr_dport = inet->inet_dport;
69237+ gr_add_to_task_ip_table_nolock(sig, newent);
69238+ spin_unlock(&gr_conn_table_lock);
69239+#endif
69240+ return;
69241+}
69242+
69243+void gr_del_task_from_ip_table(struct task_struct *task)
69244+{
69245+#ifdef CONFIG_GRKERNSEC
69246+ spin_lock_bh(&gr_conn_table_lock);
69247+ gr_del_task_from_ip_table_nolock(task->signal);
69248+ spin_unlock_bh(&gr_conn_table_lock);
69249+#endif
69250+ return;
69251+}
69252+
69253+void
69254+gr_attach_curr_ip(const struct sock *sk)
69255+{
69256+#ifdef CONFIG_GRKERNSEC
69257+ struct signal_struct *p, *set;
69258+ const struct inet_sock *inet = inet_sk(sk);
69259+
69260+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
69261+ return;
69262+
69263+ set = current->signal;
69264+
69265+ spin_lock_bh(&gr_conn_table_lock);
69266+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
69267+ inet->inet_dport, inet->inet_sport);
69268+ if (unlikely(p != NULL)) {
69269+ set->curr_ip = p->curr_ip;
69270+ set->used_accept = 1;
69271+ gr_del_task_from_ip_table_nolock(p);
69272+ spin_unlock_bh(&gr_conn_table_lock);
69273+ return;
69274+ }
69275+ spin_unlock_bh(&gr_conn_table_lock);
69276+
69277+ set->curr_ip = inet->inet_daddr;
69278+ set->used_accept = 1;
69279+#endif
69280+ return;
69281+}
69282+
69283+int
69284+gr_handle_sock_all(const int family, const int type, const int protocol)
69285+{
69286+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
69287+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
69288+ (family != AF_UNIX)) {
69289+ if (family == AF_INET)
69290+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
69291+ else
69292+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
69293+ return -EACCES;
69294+ }
69295+#endif
69296+ return 0;
69297+}
69298+
69299+int
69300+gr_handle_sock_server(const struct sockaddr *sck)
69301+{
69302+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69303+ if (grsec_enable_socket_server &&
69304+ in_group_p(grsec_socket_server_gid) &&
69305+ sck && (sck->sa_family != AF_UNIX) &&
69306+ (sck->sa_family != AF_LOCAL)) {
69307+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
69308+ return -EACCES;
69309+ }
69310+#endif
69311+ return 0;
69312+}
69313+
69314+int
69315+gr_handle_sock_server_other(const struct sock *sck)
69316+{
69317+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69318+ if (grsec_enable_socket_server &&
69319+ in_group_p(grsec_socket_server_gid) &&
69320+ sck && (sck->sk_family != AF_UNIX) &&
69321+ (sck->sk_family != AF_LOCAL)) {
69322+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
69323+ return -EACCES;
69324+ }
69325+#endif
69326+ return 0;
69327+}
69328+
69329+int
69330+gr_handle_sock_client(const struct sockaddr *sck)
69331+{
69332+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
69333+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
69334+ sck && (sck->sa_family != AF_UNIX) &&
69335+ (sck->sa_family != AF_LOCAL)) {
69336+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
69337+ return -EACCES;
69338+ }
69339+#endif
69340+ return 0;
69341+}
69342diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
69343new file mode 100644
69344index 0000000..7624d1c
69345--- /dev/null
69346+++ b/grsecurity/grsec_sysctl.c
69347@@ -0,0 +1,460 @@
69348+#include <linux/kernel.h>
69349+#include <linux/sched.h>
69350+#include <linux/sysctl.h>
69351+#include <linux/grsecurity.h>
69352+#include <linux/grinternal.h>
69353+
69354+int
69355+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
69356+{
69357+#ifdef CONFIG_GRKERNSEC_SYSCTL
69358+ if (dirname == NULL || name == NULL)
69359+ return 0;
69360+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
69361+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
69362+ return -EACCES;
69363+ }
69364+#endif
69365+ return 0;
69366+}
69367+
69368+#ifdef CONFIG_GRKERNSEC_ROFS
69369+static int __maybe_unused one = 1;
69370+#endif
69371+
69372+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69373+struct ctl_table grsecurity_table[] = {
69374+#ifdef CONFIG_GRKERNSEC_SYSCTL
69375+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
69376+#ifdef CONFIG_GRKERNSEC_IO
69377+ {
69378+ .procname = "disable_priv_io",
69379+ .data = &grsec_disable_privio,
69380+ .maxlen = sizeof(int),
69381+ .mode = 0600,
69382+ .proc_handler = &proc_dointvec,
69383+ },
69384+#endif
69385+#endif
69386+#ifdef CONFIG_GRKERNSEC_LINK
69387+ {
69388+ .procname = "linking_restrictions",
69389+ .data = &grsec_enable_link,
69390+ .maxlen = sizeof(int),
69391+ .mode = 0600,
69392+ .proc_handler = &proc_dointvec,
69393+ },
69394+#endif
69395+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
69396+ {
69397+ .procname = "enforce_symlinksifowner",
69398+ .data = &grsec_enable_symlinkown,
69399+ .maxlen = sizeof(int),
69400+ .mode = 0600,
69401+ .proc_handler = &proc_dointvec,
69402+ },
69403+ {
69404+ .procname = "symlinkown_gid",
69405+ .data = &grsec_symlinkown_gid,
69406+ .maxlen = sizeof(int),
69407+ .mode = 0600,
69408+ .proc_handler = &proc_dointvec,
69409+ },
69410+#endif
69411+#ifdef CONFIG_GRKERNSEC_BRUTE
69412+ {
69413+ .procname = "deter_bruteforce",
69414+ .data = &grsec_enable_brute,
69415+ .maxlen = sizeof(int),
69416+ .mode = 0600,
69417+ .proc_handler = &proc_dointvec,
69418+ },
69419+#endif
69420+#ifdef CONFIG_GRKERNSEC_FIFO
69421+ {
69422+ .procname = "fifo_restrictions",
69423+ .data = &grsec_enable_fifo,
69424+ .maxlen = sizeof(int),
69425+ .mode = 0600,
69426+ .proc_handler = &proc_dointvec,
69427+ },
69428+#endif
69429+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
69430+ {
69431+ .procname = "ptrace_readexec",
69432+ .data = &grsec_enable_ptrace_readexec,
69433+ .maxlen = sizeof(int),
69434+ .mode = 0600,
69435+ .proc_handler = &proc_dointvec,
69436+ },
69437+#endif
69438+#ifdef CONFIG_GRKERNSEC_SETXID
69439+ {
69440+ .procname = "consistent_setxid",
69441+ .data = &grsec_enable_setxid,
69442+ .maxlen = sizeof(int),
69443+ .mode = 0600,
69444+ .proc_handler = &proc_dointvec,
69445+ },
69446+#endif
69447+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69448+ {
69449+ .procname = "ip_blackhole",
69450+ .data = &grsec_enable_blackhole,
69451+ .maxlen = sizeof(int),
69452+ .mode = 0600,
69453+ .proc_handler = &proc_dointvec,
69454+ },
69455+ {
69456+ .procname = "lastack_retries",
69457+ .data = &grsec_lastack_retries,
69458+ .maxlen = sizeof(int),
69459+ .mode = 0600,
69460+ .proc_handler = &proc_dointvec,
69461+ },
69462+#endif
69463+#ifdef CONFIG_GRKERNSEC_EXECLOG
69464+ {
69465+ .procname = "exec_logging",
69466+ .data = &grsec_enable_execlog,
69467+ .maxlen = sizeof(int),
69468+ .mode = 0600,
69469+ .proc_handler = &proc_dointvec,
69470+ },
69471+#endif
69472+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
69473+ {
69474+ .procname = "rwxmap_logging",
69475+ .data = &grsec_enable_log_rwxmaps,
69476+ .maxlen = sizeof(int),
69477+ .mode = 0600,
69478+ .proc_handler = &proc_dointvec,
69479+ },
69480+#endif
69481+#ifdef CONFIG_GRKERNSEC_SIGNAL
69482+ {
69483+ .procname = "signal_logging",
69484+ .data = &grsec_enable_signal,
69485+ .maxlen = sizeof(int),
69486+ .mode = 0600,
69487+ .proc_handler = &proc_dointvec,
69488+ },
69489+#endif
69490+#ifdef CONFIG_GRKERNSEC_FORKFAIL
69491+ {
69492+ .procname = "forkfail_logging",
69493+ .data = &grsec_enable_forkfail,
69494+ .maxlen = sizeof(int),
69495+ .mode = 0600,
69496+ .proc_handler = &proc_dointvec,
69497+ },
69498+#endif
69499+#ifdef CONFIG_GRKERNSEC_TIME
69500+ {
69501+ .procname = "timechange_logging",
69502+ .data = &grsec_enable_time,
69503+ .maxlen = sizeof(int),
69504+ .mode = 0600,
69505+ .proc_handler = &proc_dointvec,
69506+ },
69507+#endif
69508+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
69509+ {
69510+ .procname = "chroot_deny_shmat",
69511+ .data = &grsec_enable_chroot_shmat,
69512+ .maxlen = sizeof(int),
69513+ .mode = 0600,
69514+ .proc_handler = &proc_dointvec,
69515+ },
69516+#endif
69517+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
69518+ {
69519+ .procname = "chroot_deny_unix",
69520+ .data = &grsec_enable_chroot_unix,
69521+ .maxlen = sizeof(int),
69522+ .mode = 0600,
69523+ .proc_handler = &proc_dointvec,
69524+ },
69525+#endif
69526+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
69527+ {
69528+ .procname = "chroot_deny_mount",
69529+ .data = &grsec_enable_chroot_mount,
69530+ .maxlen = sizeof(int),
69531+ .mode = 0600,
69532+ .proc_handler = &proc_dointvec,
69533+ },
69534+#endif
69535+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
69536+ {
69537+ .procname = "chroot_deny_fchdir",
69538+ .data = &grsec_enable_chroot_fchdir,
69539+ .maxlen = sizeof(int),
69540+ .mode = 0600,
69541+ .proc_handler = &proc_dointvec,
69542+ },
69543+#endif
69544+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
69545+ {
69546+ .procname = "chroot_deny_chroot",
69547+ .data = &grsec_enable_chroot_double,
69548+ .maxlen = sizeof(int),
69549+ .mode = 0600,
69550+ .proc_handler = &proc_dointvec,
69551+ },
69552+#endif
69553+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
69554+ {
69555+ .procname = "chroot_deny_pivot",
69556+ .data = &grsec_enable_chroot_pivot,
69557+ .maxlen = sizeof(int),
69558+ .mode = 0600,
69559+ .proc_handler = &proc_dointvec,
69560+ },
69561+#endif
69562+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
69563+ {
69564+ .procname = "chroot_enforce_chdir",
69565+ .data = &grsec_enable_chroot_chdir,
69566+ .maxlen = sizeof(int),
69567+ .mode = 0600,
69568+ .proc_handler = &proc_dointvec,
69569+ },
69570+#endif
69571+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
69572+ {
69573+ .procname = "chroot_deny_chmod",
69574+ .data = &grsec_enable_chroot_chmod,
69575+ .maxlen = sizeof(int),
69576+ .mode = 0600,
69577+ .proc_handler = &proc_dointvec,
69578+ },
69579+#endif
69580+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
69581+ {
69582+ .procname = "chroot_deny_mknod",
69583+ .data = &grsec_enable_chroot_mknod,
69584+ .maxlen = sizeof(int),
69585+ .mode = 0600,
69586+ .proc_handler = &proc_dointvec,
69587+ },
69588+#endif
69589+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
69590+ {
69591+ .procname = "chroot_restrict_nice",
69592+ .data = &grsec_enable_chroot_nice,
69593+ .maxlen = sizeof(int),
69594+ .mode = 0600,
69595+ .proc_handler = &proc_dointvec,
69596+ },
69597+#endif
69598+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
69599+ {
69600+ .procname = "chroot_execlog",
69601+ .data = &grsec_enable_chroot_execlog,
69602+ .maxlen = sizeof(int),
69603+ .mode = 0600,
69604+ .proc_handler = &proc_dointvec,
69605+ },
69606+#endif
69607+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69608+ {
69609+ .procname = "chroot_caps",
69610+ .data = &grsec_enable_chroot_caps,
69611+ .maxlen = sizeof(int),
69612+ .mode = 0600,
69613+ .proc_handler = &proc_dointvec,
69614+ },
69615+#endif
69616+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
69617+ {
69618+ .procname = "chroot_deny_sysctl",
69619+ .data = &grsec_enable_chroot_sysctl,
69620+ .maxlen = sizeof(int),
69621+ .mode = 0600,
69622+ .proc_handler = &proc_dointvec,
69623+ },
69624+#endif
69625+#ifdef CONFIG_GRKERNSEC_TPE
69626+ {
69627+ .procname = "tpe",
69628+ .data = &grsec_enable_tpe,
69629+ .maxlen = sizeof(int),
69630+ .mode = 0600,
69631+ .proc_handler = &proc_dointvec,
69632+ },
69633+ {
69634+ .procname = "tpe_gid",
69635+ .data = &grsec_tpe_gid,
69636+ .maxlen = sizeof(int),
69637+ .mode = 0600,
69638+ .proc_handler = &proc_dointvec,
69639+ },
69640+#endif
69641+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
69642+ {
69643+ .procname = "tpe_invert",
69644+ .data = &grsec_enable_tpe_invert,
69645+ .maxlen = sizeof(int),
69646+ .mode = 0600,
69647+ .proc_handler = &proc_dointvec,
69648+ },
69649+#endif
69650+#ifdef CONFIG_GRKERNSEC_TPE_ALL
69651+ {
69652+ .procname = "tpe_restrict_all",
69653+ .data = &grsec_enable_tpe_all,
69654+ .maxlen = sizeof(int),
69655+ .mode = 0600,
69656+ .proc_handler = &proc_dointvec,
69657+ },
69658+#endif
69659+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
69660+ {
69661+ .procname = "socket_all",
69662+ .data = &grsec_enable_socket_all,
69663+ .maxlen = sizeof(int),
69664+ .mode = 0600,
69665+ .proc_handler = &proc_dointvec,
69666+ },
69667+ {
69668+ .procname = "socket_all_gid",
69669+ .data = &grsec_socket_all_gid,
69670+ .maxlen = sizeof(int),
69671+ .mode = 0600,
69672+ .proc_handler = &proc_dointvec,
69673+ },
69674+#endif
69675+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
69676+ {
69677+ .procname = "socket_client",
69678+ .data = &grsec_enable_socket_client,
69679+ .maxlen = sizeof(int),
69680+ .mode = 0600,
69681+ .proc_handler = &proc_dointvec,
69682+ },
69683+ {
69684+ .procname = "socket_client_gid",
69685+ .data = &grsec_socket_client_gid,
69686+ .maxlen = sizeof(int),
69687+ .mode = 0600,
69688+ .proc_handler = &proc_dointvec,
69689+ },
69690+#endif
69691+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
69692+ {
69693+ .procname = "socket_server",
69694+ .data = &grsec_enable_socket_server,
69695+ .maxlen = sizeof(int),
69696+ .mode = 0600,
69697+ .proc_handler = &proc_dointvec,
69698+ },
69699+ {
69700+ .procname = "socket_server_gid",
69701+ .data = &grsec_socket_server_gid,
69702+ .maxlen = sizeof(int),
69703+ .mode = 0600,
69704+ .proc_handler = &proc_dointvec,
69705+ },
69706+#endif
69707+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
69708+ {
69709+ .procname = "audit_group",
69710+ .data = &grsec_enable_group,
69711+ .maxlen = sizeof(int),
69712+ .mode = 0600,
69713+ .proc_handler = &proc_dointvec,
69714+ },
69715+ {
69716+ .procname = "audit_gid",
69717+ .data = &grsec_audit_gid,
69718+ .maxlen = sizeof(int),
69719+ .mode = 0600,
69720+ .proc_handler = &proc_dointvec,
69721+ },
69722+#endif
69723+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
69724+ {
69725+ .procname = "audit_chdir",
69726+ .data = &grsec_enable_chdir,
69727+ .maxlen = sizeof(int),
69728+ .mode = 0600,
69729+ .proc_handler = &proc_dointvec,
69730+ },
69731+#endif
69732+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
69733+ {
69734+ .procname = "audit_mount",
69735+ .data = &grsec_enable_mount,
69736+ .maxlen = sizeof(int),
69737+ .mode = 0600,
69738+ .proc_handler = &proc_dointvec,
69739+ },
69740+#endif
69741+#ifdef CONFIG_GRKERNSEC_DMESG
69742+ {
69743+ .procname = "dmesg",
69744+ .data = &grsec_enable_dmesg,
69745+ .maxlen = sizeof(int),
69746+ .mode = 0600,
69747+ .proc_handler = &proc_dointvec,
69748+ },
69749+#endif
69750+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69751+ {
69752+ .procname = "chroot_findtask",
69753+ .data = &grsec_enable_chroot_findtask,
69754+ .maxlen = sizeof(int),
69755+ .mode = 0600,
69756+ .proc_handler = &proc_dointvec,
69757+ },
69758+#endif
69759+#ifdef CONFIG_GRKERNSEC_RESLOG
69760+ {
69761+ .procname = "resource_logging",
69762+ .data = &grsec_resource_logging,
69763+ .maxlen = sizeof(int),
69764+ .mode = 0600,
69765+ .proc_handler = &proc_dointvec,
69766+ },
69767+#endif
69768+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
69769+ {
69770+ .procname = "audit_ptrace",
69771+ .data = &grsec_enable_audit_ptrace,
69772+ .maxlen = sizeof(int),
69773+ .mode = 0600,
69774+ .proc_handler = &proc_dointvec,
69775+ },
69776+#endif
69777+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
69778+ {
69779+ .procname = "harden_ptrace",
69780+ .data = &grsec_enable_harden_ptrace,
69781+ .maxlen = sizeof(int),
69782+ .mode = 0600,
69783+ .proc_handler = &proc_dointvec,
69784+ },
69785+#endif
69786+ {
69787+ .procname = "grsec_lock",
69788+ .data = &grsec_lock,
69789+ .maxlen = sizeof(int),
69790+ .mode = 0600,
69791+ .proc_handler = &proc_dointvec,
69792+ },
69793+#endif
69794+#ifdef CONFIG_GRKERNSEC_ROFS
69795+ {
69796+ .procname = "romount_protect",
69797+ .data = &grsec_enable_rofs,
69798+ .maxlen = sizeof(int),
69799+ .mode = 0600,
69800+ .proc_handler = &proc_dointvec_minmax,
69801+ .extra1 = &one,
69802+ .extra2 = &one,
69803+ },
69804+#endif
69805+ { }
69806+};
69807+#endif
69808diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
69809new file mode 100644
69810index 0000000..0dc13c3
69811--- /dev/null
69812+++ b/grsecurity/grsec_time.c
69813@@ -0,0 +1,16 @@
69814+#include <linux/kernel.h>
69815+#include <linux/sched.h>
69816+#include <linux/grinternal.h>
69817+#include <linux/module.h>
69818+
69819+void
69820+gr_log_timechange(void)
69821+{
69822+#ifdef CONFIG_GRKERNSEC_TIME
69823+ if (grsec_enable_time)
69824+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
69825+#endif
69826+ return;
69827+}
69828+
69829+EXPORT_SYMBOL(gr_log_timechange);
69830diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
69831new file mode 100644
69832index 0000000..ee57dcf
69833--- /dev/null
69834+++ b/grsecurity/grsec_tpe.c
69835@@ -0,0 +1,73 @@
69836+#include <linux/kernel.h>
69837+#include <linux/sched.h>
69838+#include <linux/file.h>
69839+#include <linux/fs.h>
69840+#include <linux/grinternal.h>
69841+
69842+extern int gr_acl_tpe_check(void);
69843+
69844+int
69845+gr_tpe_allow(const struct file *file)
69846+{
69847+#ifdef CONFIG_GRKERNSEC
69848+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
69849+ const struct cred *cred = current_cred();
69850+ char *msg = NULL;
69851+ char *msg2 = NULL;
69852+
69853+ // never restrict root
69854+ if (gr_is_global_root(cred->uid))
69855+ return 1;
69856+
69857+ if (grsec_enable_tpe) {
69858+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
69859+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
69860+ msg = "not being in trusted group";
69861+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
69862+ msg = "being in untrusted group";
69863+#else
69864+ if (in_group_p(grsec_tpe_gid))
69865+ msg = "being in untrusted group";
69866+#endif
69867+ }
69868+ if (!msg && gr_acl_tpe_check())
69869+ msg = "being in untrusted role";
69870+
69871+ // not in any affected group/role
69872+ if (!msg)
69873+ goto next_check;
69874+
69875+ if (gr_is_global_nonroot(inode->i_uid))
69876+ msg2 = "file in non-root-owned directory";
69877+ else if (inode->i_mode & S_IWOTH)
69878+ msg2 = "file in world-writable directory";
69879+ else if (inode->i_mode & S_IWGRP)
69880+ msg2 = "file in group-writable directory";
69881+
69882+ if (msg && msg2) {
69883+ char fullmsg[70] = {0};
69884+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
69885+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
69886+ return 0;
69887+ }
69888+ msg = NULL;
69889+next_check:
69890+#ifdef CONFIG_GRKERNSEC_TPE_ALL
69891+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
69892+ return 1;
69893+
69894+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
69895+ msg = "directory not owned by user";
69896+ else if (inode->i_mode & S_IWOTH)
69897+ msg = "file in world-writable directory";
69898+ else if (inode->i_mode & S_IWGRP)
69899+ msg = "file in group-writable directory";
69900+
69901+ if (msg) {
69902+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
69903+ return 0;
69904+ }
69905+#endif
69906+#endif
69907+ return 1;
69908+}
69909diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
69910new file mode 100644
69911index 0000000..9f7b1ac
69912--- /dev/null
69913+++ b/grsecurity/grsum.c
69914@@ -0,0 +1,61 @@
69915+#include <linux/err.h>
69916+#include <linux/kernel.h>
69917+#include <linux/sched.h>
69918+#include <linux/mm.h>
69919+#include <linux/scatterlist.h>
69920+#include <linux/crypto.h>
69921+#include <linux/gracl.h>
69922+
69923+
69924+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
69925+#error "crypto and sha256 must be built into the kernel"
69926+#endif
69927+
69928+int
69929+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
69930+{
69931+ char *p;
69932+ struct crypto_hash *tfm;
69933+ struct hash_desc desc;
69934+ struct scatterlist sg;
69935+ unsigned char temp_sum[GR_SHA_LEN];
69936+ volatile int retval = 0;
69937+ volatile int dummy = 0;
69938+ unsigned int i;
69939+
69940+ sg_init_table(&sg, 1);
69941+
69942+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
69943+ if (IS_ERR(tfm)) {
69944+ /* should never happen, since sha256 should be built in */
69945+ return 1;
69946+ }
69947+
69948+ desc.tfm = tfm;
69949+ desc.flags = 0;
69950+
69951+ crypto_hash_init(&desc);
69952+
69953+ p = salt;
69954+ sg_set_buf(&sg, p, GR_SALT_LEN);
69955+ crypto_hash_update(&desc, &sg, sg.length);
69956+
69957+ p = entry->pw;
69958+ sg_set_buf(&sg, p, strlen(p));
69959+
69960+ crypto_hash_update(&desc, &sg, sg.length);
69961+
69962+ crypto_hash_final(&desc, temp_sum);
69963+
69964+ memset(entry->pw, 0, GR_PW_LEN);
69965+
69966+ for (i = 0; i < GR_SHA_LEN; i++)
69967+ if (sum[i] != temp_sum[i])
69968+ retval = 1;
69969+ else
69970+ dummy = 1; // waste a cycle
69971+
69972+ crypto_free_hash(tfm);
69973+
69974+ return retval;
69975+}
69976diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
69977index 77ff547..181834f 100644
69978--- a/include/asm-generic/4level-fixup.h
69979+++ b/include/asm-generic/4level-fixup.h
69980@@ -13,8 +13,10 @@
69981 #define pmd_alloc(mm, pud, address) \
69982 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
69983 NULL: pmd_offset(pud, address))
69984+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
69985
69986 #define pud_alloc(mm, pgd, address) (pgd)
69987+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
69988 #define pud_offset(pgd, start) (pgd)
69989 #define pud_none(pud) 0
69990 #define pud_bad(pud) 0
69991diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
69992index b7babf0..04ad282 100644
69993--- a/include/asm-generic/atomic-long.h
69994+++ b/include/asm-generic/atomic-long.h
69995@@ -22,6 +22,12 @@
69996
69997 typedef atomic64_t atomic_long_t;
69998
69999+#ifdef CONFIG_PAX_REFCOUNT
70000+typedef atomic64_unchecked_t atomic_long_unchecked_t;
70001+#else
70002+typedef atomic64_t atomic_long_unchecked_t;
70003+#endif
70004+
70005 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
70006
70007 static inline long atomic_long_read(atomic_long_t *l)
70008@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
70009 return (long)atomic64_read(v);
70010 }
70011
70012+#ifdef CONFIG_PAX_REFCOUNT
70013+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
70014+{
70015+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70016+
70017+ return (long)atomic64_read_unchecked(v);
70018+}
70019+#endif
70020+
70021 static inline void atomic_long_set(atomic_long_t *l, long i)
70022 {
70023 atomic64_t *v = (atomic64_t *)l;
70024@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
70025 atomic64_set(v, i);
70026 }
70027
70028+#ifdef CONFIG_PAX_REFCOUNT
70029+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
70030+{
70031+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70032+
70033+ atomic64_set_unchecked(v, i);
70034+}
70035+#endif
70036+
70037 static inline void atomic_long_inc(atomic_long_t *l)
70038 {
70039 atomic64_t *v = (atomic64_t *)l;
70040@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
70041 atomic64_inc(v);
70042 }
70043
70044+#ifdef CONFIG_PAX_REFCOUNT
70045+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
70046+{
70047+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70048+
70049+ atomic64_inc_unchecked(v);
70050+}
70051+#endif
70052+
70053 static inline void atomic_long_dec(atomic_long_t *l)
70054 {
70055 atomic64_t *v = (atomic64_t *)l;
70056@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
70057 atomic64_dec(v);
70058 }
70059
70060+#ifdef CONFIG_PAX_REFCOUNT
70061+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
70062+{
70063+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70064+
70065+ atomic64_dec_unchecked(v);
70066+}
70067+#endif
70068+
70069 static inline void atomic_long_add(long i, atomic_long_t *l)
70070 {
70071 atomic64_t *v = (atomic64_t *)l;
70072@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
70073 atomic64_add(i, v);
70074 }
70075
70076+#ifdef CONFIG_PAX_REFCOUNT
70077+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
70078+{
70079+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70080+
70081+ atomic64_add_unchecked(i, v);
70082+}
70083+#endif
70084+
70085 static inline void atomic_long_sub(long i, atomic_long_t *l)
70086 {
70087 atomic64_t *v = (atomic64_t *)l;
70088@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
70089 atomic64_sub(i, v);
70090 }
70091
70092+#ifdef CONFIG_PAX_REFCOUNT
70093+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
70094+{
70095+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70096+
70097+ atomic64_sub_unchecked(i, v);
70098+}
70099+#endif
70100+
70101 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
70102 {
70103 atomic64_t *v = (atomic64_t *)l;
70104@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
70105 return (long)atomic64_add_return(i, v);
70106 }
70107
70108+#ifdef CONFIG_PAX_REFCOUNT
70109+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
70110+{
70111+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70112+
70113+ return (long)atomic64_add_return_unchecked(i, v);
70114+}
70115+#endif
70116+
70117 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
70118 {
70119 atomic64_t *v = (atomic64_t *)l;
70120@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
70121 return (long)atomic64_inc_return(v);
70122 }
70123
70124+#ifdef CONFIG_PAX_REFCOUNT
70125+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
70126+{
70127+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
70128+
70129+ return (long)atomic64_inc_return_unchecked(v);
70130+}
70131+#endif
70132+
70133 static inline long atomic_long_dec_return(atomic_long_t *l)
70134 {
70135 atomic64_t *v = (atomic64_t *)l;
70136@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
70137
70138 typedef atomic_t atomic_long_t;
70139
70140+#ifdef CONFIG_PAX_REFCOUNT
70141+typedef atomic_unchecked_t atomic_long_unchecked_t;
70142+#else
70143+typedef atomic_t atomic_long_unchecked_t;
70144+#endif
70145+
70146 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
70147 static inline long atomic_long_read(atomic_long_t *l)
70148 {
70149@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
70150 return (long)atomic_read(v);
70151 }
70152
70153+#ifdef CONFIG_PAX_REFCOUNT
70154+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
70155+{
70156+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70157+
70158+ return (long)atomic_read_unchecked(v);
70159+}
70160+#endif
70161+
70162 static inline void atomic_long_set(atomic_long_t *l, long i)
70163 {
70164 atomic_t *v = (atomic_t *)l;
70165@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
70166 atomic_set(v, i);
70167 }
70168
70169+#ifdef CONFIG_PAX_REFCOUNT
70170+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
70171+{
70172+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70173+
70174+ atomic_set_unchecked(v, i);
70175+}
70176+#endif
70177+
70178 static inline void atomic_long_inc(atomic_long_t *l)
70179 {
70180 atomic_t *v = (atomic_t *)l;
70181@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
70182 atomic_inc(v);
70183 }
70184
70185+#ifdef CONFIG_PAX_REFCOUNT
70186+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
70187+{
70188+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70189+
70190+ atomic_inc_unchecked(v);
70191+}
70192+#endif
70193+
70194 static inline void atomic_long_dec(atomic_long_t *l)
70195 {
70196 atomic_t *v = (atomic_t *)l;
70197@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
70198 atomic_dec(v);
70199 }
70200
70201+#ifdef CONFIG_PAX_REFCOUNT
70202+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
70203+{
70204+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70205+
70206+ atomic_dec_unchecked(v);
70207+}
70208+#endif
70209+
70210 static inline void atomic_long_add(long i, atomic_long_t *l)
70211 {
70212 atomic_t *v = (atomic_t *)l;
70213@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
70214 atomic_add(i, v);
70215 }
70216
70217+#ifdef CONFIG_PAX_REFCOUNT
70218+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
70219+{
70220+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70221+
70222+ atomic_add_unchecked(i, v);
70223+}
70224+#endif
70225+
70226 static inline void atomic_long_sub(long i, atomic_long_t *l)
70227 {
70228 atomic_t *v = (atomic_t *)l;
70229@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
70230 atomic_sub(i, v);
70231 }
70232
70233+#ifdef CONFIG_PAX_REFCOUNT
70234+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
70235+{
70236+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70237+
70238+ atomic_sub_unchecked(i, v);
70239+}
70240+#endif
70241+
70242 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
70243 {
70244 atomic_t *v = (atomic_t *)l;
70245@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
70246 return (long)atomic_add_return(i, v);
70247 }
70248
70249+#ifdef CONFIG_PAX_REFCOUNT
70250+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
70251+{
70252+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70253+
70254+ return (long)atomic_add_return_unchecked(i, v);
70255+}
70256+
70257+#endif
70258+
70259 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
70260 {
70261 atomic_t *v = (atomic_t *)l;
70262@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
70263 return (long)atomic_inc_return(v);
70264 }
70265
70266+#ifdef CONFIG_PAX_REFCOUNT
70267+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
70268+{
70269+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
70270+
70271+ return (long)atomic_inc_return_unchecked(v);
70272+}
70273+#endif
70274+
70275 static inline long atomic_long_dec_return(atomic_long_t *l)
70276 {
70277 atomic_t *v = (atomic_t *)l;
70278@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
70279
70280 #endif /* BITS_PER_LONG == 64 */
70281
70282+#ifdef CONFIG_PAX_REFCOUNT
70283+static inline void pax_refcount_needs_these_functions(void)
70284+{
70285+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
70286+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
70287+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
70288+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
70289+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
70290+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
70291+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
70292+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
70293+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
70294+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
70295+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
70296+#ifdef CONFIG_X86
70297+ atomic_clear_mask_unchecked(0, NULL);
70298+ atomic_set_mask_unchecked(0, NULL);
70299+#endif
70300+
70301+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
70302+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
70303+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
70304+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
70305+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
70306+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
70307+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
70308+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
70309+}
70310+#else
70311+#define atomic_read_unchecked(v) atomic_read(v)
70312+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
70313+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
70314+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
70315+#define atomic_inc_unchecked(v) atomic_inc(v)
70316+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
70317+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
70318+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
70319+#define atomic_dec_unchecked(v) atomic_dec(v)
70320+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
70321+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
70322+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
70323+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
70324+
70325+#define atomic_long_read_unchecked(v) atomic_long_read(v)
70326+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
70327+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
70328+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
70329+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
70330+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
70331+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
70332+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
70333+#endif
70334+
70335 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
70336diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
70337index 33bd2de..f31bff97 100644
70338--- a/include/asm-generic/atomic.h
70339+++ b/include/asm-generic/atomic.h
70340@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
70341 * Atomically clears the bits set in @mask from @v
70342 */
70343 #ifndef atomic_clear_mask
70344-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
70345+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
70346 {
70347 unsigned long flags;
70348
70349diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
70350index b18ce4f..2ee2843 100644
70351--- a/include/asm-generic/atomic64.h
70352+++ b/include/asm-generic/atomic64.h
70353@@ -16,6 +16,8 @@ typedef struct {
70354 long long counter;
70355 } atomic64_t;
70356
70357+typedef atomic64_t atomic64_unchecked_t;
70358+
70359 #define ATOMIC64_INIT(i) { (i) }
70360
70361 extern long long atomic64_read(const atomic64_t *v);
70362@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
70363 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
70364 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
70365
70366+#define atomic64_read_unchecked(v) atomic64_read(v)
70367+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
70368+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
70369+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
70370+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
70371+#define atomic64_inc_unchecked(v) atomic64_inc(v)
70372+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
70373+#define atomic64_dec_unchecked(v) atomic64_dec(v)
70374+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
70375+
70376 #endif /* _ASM_GENERIC_ATOMIC64_H */
70377diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
70378index 1bfcfe5..e04c5c9 100644
70379--- a/include/asm-generic/cache.h
70380+++ b/include/asm-generic/cache.h
70381@@ -6,7 +6,7 @@
70382 * cache lines need to provide their own cache.h.
70383 */
70384
70385-#define L1_CACHE_SHIFT 5
70386-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
70387+#define L1_CACHE_SHIFT 5UL
70388+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
70389
70390 #endif /* __ASM_GENERIC_CACHE_H */
70391diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
70392index 0d68a1e..b74a761 100644
70393--- a/include/asm-generic/emergency-restart.h
70394+++ b/include/asm-generic/emergency-restart.h
70395@@ -1,7 +1,7 @@
70396 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
70397 #define _ASM_GENERIC_EMERGENCY_RESTART_H
70398
70399-static inline void machine_emergency_restart(void)
70400+static inline __noreturn void machine_emergency_restart(void)
70401 {
70402 machine_restart(NULL);
70403 }
70404diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
70405index 90f99c7..00ce236 100644
70406--- a/include/asm-generic/kmap_types.h
70407+++ b/include/asm-generic/kmap_types.h
70408@@ -2,9 +2,9 @@
70409 #define _ASM_GENERIC_KMAP_TYPES_H
70410
70411 #ifdef __WITH_KM_FENCE
70412-# define KM_TYPE_NR 41
70413+# define KM_TYPE_NR 42
70414 #else
70415-# define KM_TYPE_NR 20
70416+# define KM_TYPE_NR 21
70417 #endif
70418
70419 #endif
70420diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
70421index 9ceb03b..62b0b8f 100644
70422--- a/include/asm-generic/local.h
70423+++ b/include/asm-generic/local.h
70424@@ -23,24 +23,37 @@ typedef struct
70425 atomic_long_t a;
70426 } local_t;
70427
70428+typedef struct {
70429+ atomic_long_unchecked_t a;
70430+} local_unchecked_t;
70431+
70432 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
70433
70434 #define local_read(l) atomic_long_read(&(l)->a)
70435+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
70436 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
70437+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
70438 #define local_inc(l) atomic_long_inc(&(l)->a)
70439+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
70440 #define local_dec(l) atomic_long_dec(&(l)->a)
70441+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
70442 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
70443+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
70444 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
70445+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
70446
70447 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
70448 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
70449 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
70450 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
70451 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
70452+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
70453 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
70454 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
70455+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
70456
70457 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
70458+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
70459 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
70460 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
70461 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
70462diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
70463index 725612b..9cc513a 100644
70464--- a/include/asm-generic/pgtable-nopmd.h
70465+++ b/include/asm-generic/pgtable-nopmd.h
70466@@ -1,14 +1,19 @@
70467 #ifndef _PGTABLE_NOPMD_H
70468 #define _PGTABLE_NOPMD_H
70469
70470-#ifndef __ASSEMBLY__
70471-
70472 #include <asm-generic/pgtable-nopud.h>
70473
70474-struct mm_struct;
70475-
70476 #define __PAGETABLE_PMD_FOLDED
70477
70478+#define PMD_SHIFT PUD_SHIFT
70479+#define PTRS_PER_PMD 1
70480+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
70481+#define PMD_MASK (~(PMD_SIZE-1))
70482+
70483+#ifndef __ASSEMBLY__
70484+
70485+struct mm_struct;
70486+
70487 /*
70488 * Having the pmd type consist of a pud gets the size right, and allows
70489 * us to conceptually access the pud entry that this pmd is folded into
70490@@ -16,11 +21,6 @@ struct mm_struct;
70491 */
70492 typedef struct { pud_t pud; } pmd_t;
70493
70494-#define PMD_SHIFT PUD_SHIFT
70495-#define PTRS_PER_PMD 1
70496-#define PMD_SIZE (1UL << PMD_SHIFT)
70497-#define PMD_MASK (~(PMD_SIZE-1))
70498-
70499 /*
70500 * The "pud_xxx()" functions here are trivial for a folded two-level
70501 * setup: the pmd is never bad, and a pmd always exists (as it's folded
70502diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
70503index 810431d..0ec4804f 100644
70504--- a/include/asm-generic/pgtable-nopud.h
70505+++ b/include/asm-generic/pgtable-nopud.h
70506@@ -1,10 +1,15 @@
70507 #ifndef _PGTABLE_NOPUD_H
70508 #define _PGTABLE_NOPUD_H
70509
70510-#ifndef __ASSEMBLY__
70511-
70512 #define __PAGETABLE_PUD_FOLDED
70513
70514+#define PUD_SHIFT PGDIR_SHIFT
70515+#define PTRS_PER_PUD 1
70516+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
70517+#define PUD_MASK (~(PUD_SIZE-1))
70518+
70519+#ifndef __ASSEMBLY__
70520+
70521 /*
70522 * Having the pud type consist of a pgd gets the size right, and allows
70523 * us to conceptually access the pgd entry that this pud is folded into
70524@@ -12,11 +17,6 @@
70525 */
70526 typedef struct { pgd_t pgd; } pud_t;
70527
70528-#define PUD_SHIFT PGDIR_SHIFT
70529-#define PTRS_PER_PUD 1
70530-#define PUD_SIZE (1UL << PUD_SHIFT)
70531-#define PUD_MASK (~(PUD_SIZE-1))
70532-
70533 /*
70534 * The "pgd_xxx()" functions here are trivial for a folded two-level
70535 * setup: the pud is never bad, and a pud always exists (as it's folded
70536@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
70537 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
70538
70539 #define pgd_populate(mm, pgd, pud) do { } while (0)
70540+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
70541 /*
70542 * (puds are folded into pgds so this doesn't get actually called,
70543 * but the define is needed for a generic inline function.)
70544diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
70545index a59ff51..2594a70 100644
70546--- a/include/asm-generic/pgtable.h
70547+++ b/include/asm-generic/pgtable.h
70548@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
70549 }
70550 #endif /* CONFIG_NUMA_BALANCING */
70551
70552+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
70553+static inline unsigned long pax_open_kernel(void) { return 0; }
70554+#endif
70555+
70556+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
70557+static inline unsigned long pax_close_kernel(void) { return 0; }
70558+#endif
70559+
70560 #endif /* CONFIG_MMU */
70561
70562 #endif /* !__ASSEMBLY__ */
70563diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
70564index 13821c3..5672d7e 100644
70565--- a/include/asm-generic/tlb.h
70566+++ b/include/asm-generic/tlb.h
70567@@ -112,7 +112,7 @@ struct mmu_gather {
70568
70569 #define HAVE_GENERIC_MMU_GATHER
70570
70571-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
70572+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
70573 void tlb_flush_mmu(struct mmu_gather *tlb);
70574 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
70575 unsigned long end);
70576diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
70577index c184aa8..d049942 100644
70578--- a/include/asm-generic/uaccess.h
70579+++ b/include/asm-generic/uaccess.h
70580@@ -343,4 +343,12 @@ clear_user(void __user *to, unsigned long n)
70581 return __clear_user(to, n);
70582 }
70583
70584+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
70585+//static inline unsigned long pax_open_userland(void) { return 0; }
70586+#endif
70587+
70588+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
70589+//static inline unsigned long pax_close_userland(void) { return 0; }
70590+#endif
70591+
70592 #endif /* __ASM_GENERIC_UACCESS_H */
70593diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
70594index eb58d2d..df131bf 100644
70595--- a/include/asm-generic/vmlinux.lds.h
70596+++ b/include/asm-generic/vmlinux.lds.h
70597@@ -239,6 +239,7 @@
70598 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
70599 VMLINUX_SYMBOL(__start_rodata) = .; \
70600 *(.rodata) *(.rodata.*) \
70601+ *(.data..read_only) \
70602 *(__vermagic) /* Kernel version magic */ \
70603 . = ALIGN(8); \
70604 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
70605@@ -749,17 +750,18 @@
70606 * section in the linker script will go there too. @phdr should have
70607 * a leading colon.
70608 *
70609- * Note that this macros defines __per_cpu_load as an absolute symbol.
70610+ * Note that this macros defines per_cpu_load as an absolute symbol.
70611 * If there is no need to put the percpu section at a predetermined
70612 * address, use PERCPU_SECTION.
70613 */
70614 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
70615- VMLINUX_SYMBOL(__per_cpu_load) = .; \
70616- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
70617+ per_cpu_load = .; \
70618+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
70619 - LOAD_OFFSET) { \
70620+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
70621 PERCPU_INPUT(cacheline) \
70622 } phdr \
70623- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
70624+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
70625
70626 /**
70627 * PERCPU_SECTION - define output section for percpu area, simple version
70628diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
70629index 418d270..bfd2794 100644
70630--- a/include/crypto/algapi.h
70631+++ b/include/crypto/algapi.h
70632@@ -34,7 +34,7 @@ struct crypto_type {
70633 unsigned int maskclear;
70634 unsigned int maskset;
70635 unsigned int tfmsize;
70636-};
70637+} __do_const;
70638
70639 struct crypto_instance {
70640 struct crypto_alg alg;
70641diff --git a/include/drm/drmP.h b/include/drm/drmP.h
70642index 63d17ee..716de2b 100644
70643--- a/include/drm/drmP.h
70644+++ b/include/drm/drmP.h
70645@@ -72,6 +72,7 @@
70646 #include <linux/workqueue.h>
70647 #include <linux/poll.h>
70648 #include <asm/pgalloc.h>
70649+#include <asm/local.h>
70650 #include <drm/drm.h>
70651 #include <drm/drm_sarea.h>
70652
70653@@ -296,10 +297,12 @@ do { \
70654 * \param cmd command.
70655 * \param arg argument.
70656 */
70657-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
70658+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
70659+ struct drm_file *file_priv);
70660+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
70661 struct drm_file *file_priv);
70662
70663-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
70664+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
70665 unsigned long arg);
70666
70667 #define DRM_IOCTL_NR(n) _IOC_NR(n)
70668@@ -314,10 +317,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
70669 struct drm_ioctl_desc {
70670 unsigned int cmd;
70671 int flags;
70672- drm_ioctl_t *func;
70673+ drm_ioctl_t func;
70674 unsigned int cmd_drv;
70675 const char *name;
70676-};
70677+} __do_const;
70678
70679 /**
70680 * Creates a driver or general drm_ioctl_desc array entry for the given
70681@@ -1015,7 +1018,7 @@ struct drm_info_list {
70682 int (*show)(struct seq_file*, void*); /** show callback */
70683 u32 driver_features; /**< Required driver features for this entry */
70684 void *data;
70685-};
70686+} __do_const;
70687
70688 /**
70689 * debugfs node structure. This structure represents a debugfs file.
70690@@ -1088,7 +1091,7 @@ struct drm_device {
70691
70692 /** \name Usage Counters */
70693 /*@{ */
70694- int open_count; /**< Outstanding files open */
70695+ local_t open_count; /**< Outstanding files open */
70696 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
70697 atomic_t vma_count; /**< Outstanding vma areas open */
70698 int buf_use; /**< Buffers in use -- cannot alloc */
70699@@ -1099,7 +1102,7 @@ struct drm_device {
70700 /*@{ */
70701 unsigned long counters;
70702 enum drm_stat_type types[15];
70703- atomic_t counts[15];
70704+ atomic_unchecked_t counts[15];
70705 /*@} */
70706
70707 struct list_head filelist;
70708diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
70709index f43d556..94d9343 100644
70710--- a/include/drm/drm_crtc_helper.h
70711+++ b/include/drm/drm_crtc_helper.h
70712@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
70713 struct drm_connector *connector);
70714 /* disable encoder when not in use - more explicit than dpms off */
70715 void (*disable)(struct drm_encoder *encoder);
70716-};
70717+} __no_const;
70718
70719 /**
70720 * drm_connector_helper_funcs - helper operations for connectors
70721diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
70722index 72dcbe8..8db58d7 100644
70723--- a/include/drm/ttm/ttm_memory.h
70724+++ b/include/drm/ttm/ttm_memory.h
70725@@ -48,7 +48,7 @@
70726
70727 struct ttm_mem_shrink {
70728 int (*do_shrink) (struct ttm_mem_shrink *);
70729-};
70730+} __no_const;
70731
70732 /**
70733 * struct ttm_mem_global - Global memory accounting structure.
70734diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
70735index 4b840e8..155d235 100644
70736--- a/include/keys/asymmetric-subtype.h
70737+++ b/include/keys/asymmetric-subtype.h
70738@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
70739 /* Verify the signature on a key of this subtype (optional) */
70740 int (*verify_signature)(const struct key *key,
70741 const struct public_key_signature *sig);
70742-};
70743+} __do_const;
70744
70745 /**
70746 * asymmetric_key_subtype - Get the subtype from an asymmetric key
70747diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
70748index c1da539..1dcec55 100644
70749--- a/include/linux/atmdev.h
70750+++ b/include/linux/atmdev.h
70751@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
70752 #endif
70753
70754 struct k_atm_aal_stats {
70755-#define __HANDLE_ITEM(i) atomic_t i
70756+#define __HANDLE_ITEM(i) atomic_unchecked_t i
70757 __AAL_STAT_ITEMS
70758 #undef __HANDLE_ITEM
70759 };
70760@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
70761 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
70762 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
70763 struct module *owner;
70764-};
70765+} __do_const ;
70766
70767 struct atmphy_ops {
70768 int (*start)(struct atm_dev *dev);
70769diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
70770index 70cf138..0418ee2 100644
70771--- a/include/linux/binfmts.h
70772+++ b/include/linux/binfmts.h
70773@@ -73,8 +73,10 @@ struct linux_binfmt {
70774 int (*load_binary)(struct linux_binprm *);
70775 int (*load_shlib)(struct file *);
70776 int (*core_dump)(struct coredump_params *cprm);
70777+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
70778+ void (*handle_mmap)(struct file *);
70779 unsigned long min_coredump; /* minimal dump size */
70780-};
70781+} __do_const;
70782
70783 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
70784
70785diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
70786index 2fdb4a4..54aad7e 100644
70787--- a/include/linux/blkdev.h
70788+++ b/include/linux/blkdev.h
70789@@ -1526,7 +1526,7 @@ struct block_device_operations {
70790 /* this callback is with swap_lock and sometimes page table lock held */
70791 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
70792 struct module *owner;
70793-};
70794+} __do_const;
70795
70796 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
70797 unsigned long);
70798diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
70799index 7c2e030..b72475d 100644
70800--- a/include/linux/blktrace_api.h
70801+++ b/include/linux/blktrace_api.h
70802@@ -23,7 +23,7 @@ struct blk_trace {
70803 struct dentry *dir;
70804 struct dentry *dropped_file;
70805 struct dentry *msg_file;
70806- atomic_t dropped;
70807+ atomic_unchecked_t dropped;
70808 };
70809
70810 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
70811diff --git a/include/linux/cache.h b/include/linux/cache.h
70812index 4c57065..4307975 100644
70813--- a/include/linux/cache.h
70814+++ b/include/linux/cache.h
70815@@ -16,6 +16,10 @@
70816 #define __read_mostly
70817 #endif
70818
70819+#ifndef __read_only
70820+#define __read_only __read_mostly
70821+#endif
70822+
70823 #ifndef ____cacheline_aligned
70824 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
70825 #endif
70826diff --git a/include/linux/capability.h b/include/linux/capability.h
70827index d9a4f7f4..19f77d6 100644
70828--- a/include/linux/capability.h
70829+++ b/include/linux/capability.h
70830@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
70831 extern bool nsown_capable(int cap);
70832 extern bool inode_capable(const struct inode *inode, int cap);
70833 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
70834+extern bool capable_nolog(int cap);
70835+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
70836+extern bool inode_capable_nolog(const struct inode *inode, int cap);
70837
70838 /* audit system wants to get cap info from files as well */
70839 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
70840
70841+extern int is_privileged_binary(const struct dentry *dentry);
70842+
70843 #endif /* !_LINUX_CAPABILITY_H */
70844diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
70845index 8609d57..86e4d79 100644
70846--- a/include/linux/cdrom.h
70847+++ b/include/linux/cdrom.h
70848@@ -87,7 +87,6 @@ struct cdrom_device_ops {
70849
70850 /* driver specifications */
70851 const int capability; /* capability flags */
70852- int n_minors; /* number of active minor devices */
70853 /* handle uniform packets for scsi type devices (scsi,atapi) */
70854 int (*generic_packet) (struct cdrom_device_info *,
70855 struct packet_command *);
70856diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
70857index 4ce9056..86caac6 100644
70858--- a/include/linux/cleancache.h
70859+++ b/include/linux/cleancache.h
70860@@ -31,7 +31,7 @@ struct cleancache_ops {
70861 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
70862 void (*invalidate_inode)(int, struct cleancache_filekey);
70863 void (*invalidate_fs)(int);
70864-};
70865+} __no_const;
70866
70867 extern struct cleancache_ops *
70868 cleancache_register_ops(struct cleancache_ops *ops);
70869diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
70870index 1186098..f87e53d 100644
70871--- a/include/linux/clk-provider.h
70872+++ b/include/linux/clk-provider.h
70873@@ -132,6 +132,7 @@ struct clk_ops {
70874 unsigned long);
70875 void (*init)(struct clk_hw *hw);
70876 };
70877+typedef struct clk_ops __no_const clk_ops_no_const;
70878
70879 /**
70880 * struct clk_init_data - holds init data that's common to all clocks and is
70881diff --git a/include/linux/compat.h b/include/linux/compat.h
70882index 7f0c1dd..206ac34 100644
70883--- a/include/linux/compat.h
70884+++ b/include/linux/compat.h
70885@@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
70886 compat_size_t __user *len_ptr);
70887
70888 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
70889-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
70890+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
70891 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
70892 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
70893 compat_ssize_t msgsz, int msgflg);
70894@@ -419,7 +419,7 @@ extern int compat_ptrace_request(struct task_struct *child,
70895 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
70896 compat_ulong_t addr, compat_ulong_t data);
70897 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
70898- compat_long_t addr, compat_long_t data);
70899+ compat_ulong_t addr, compat_ulong_t data);
70900
70901 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
70902 /*
70903@@ -669,6 +669,7 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
70904
70905 int compat_restore_altstack(const compat_stack_t __user *uss);
70906 int __compat_save_altstack(compat_stack_t __user *, unsigned long);
70907+void __compat_save_altstack_ex(compat_stack_t __user *, unsigned long);
70908
70909 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
70910 struct compat_timespec __user *interval);
70911diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
70912index 842de22..7f3a41f 100644
70913--- a/include/linux/compiler-gcc4.h
70914+++ b/include/linux/compiler-gcc4.h
70915@@ -39,9 +39,29 @@
70916 # define __compiletime_warning(message) __attribute__((warning(message)))
70917 # define __compiletime_error(message) __attribute__((error(message)))
70918 #endif /* __CHECKER__ */
70919+
70920+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
70921+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
70922+#define __bos0(ptr) __bos((ptr), 0)
70923+#define __bos1(ptr) __bos((ptr), 1)
70924 #endif /* GCC_VERSION >= 40300 */
70925
70926 #if GCC_VERSION >= 40500
70927+
70928+#ifdef CONSTIFY_PLUGIN
70929+#define __no_const __attribute__((no_const))
70930+#define __do_const __attribute__((do_const))
70931+#endif
70932+
70933+#ifdef SIZE_OVERFLOW_PLUGIN
70934+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
70935+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
70936+#endif
70937+
70938+#ifdef LATENT_ENTROPY_PLUGIN
70939+#define __latent_entropy __attribute__((latent_entropy))
70940+#endif
70941+
70942 /*
70943 * Mark a position in code as unreachable. This can be used to
70944 * suppress control flow warnings after asm blocks that transfer
70945diff --git a/include/linux/compiler.h b/include/linux/compiler.h
70946index 92669cd..1771a15 100644
70947--- a/include/linux/compiler.h
70948+++ b/include/linux/compiler.h
70949@@ -5,11 +5,14 @@
70950
70951 #ifdef __CHECKER__
70952 # define __user __attribute__((noderef, address_space(1)))
70953+# define __force_user __force __user
70954 # define __kernel __attribute__((address_space(0)))
70955+# define __force_kernel __force __kernel
70956 # define __safe __attribute__((safe))
70957 # define __force __attribute__((force))
70958 # define __nocast __attribute__((nocast))
70959 # define __iomem __attribute__((noderef, address_space(2)))
70960+# define __force_iomem __force __iomem
70961 # define __must_hold(x) __attribute__((context(x,1,1)))
70962 # define __acquires(x) __attribute__((context(x,0,1)))
70963 # define __releases(x) __attribute__((context(x,1,0)))
70964@@ -17,20 +20,37 @@
70965 # define __release(x) __context__(x,-1)
70966 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
70967 # define __percpu __attribute__((noderef, address_space(3)))
70968+# define __force_percpu __force __percpu
70969 #ifdef CONFIG_SPARSE_RCU_POINTER
70970 # define __rcu __attribute__((noderef, address_space(4)))
70971+# define __force_rcu __force __rcu
70972 #else
70973 # define __rcu
70974+# define __force_rcu
70975 #endif
70976 extern void __chk_user_ptr(const volatile void __user *);
70977 extern void __chk_io_ptr(const volatile void __iomem *);
70978 #else
70979-# define __user
70980-# define __kernel
70981+# ifdef CHECKER_PLUGIN
70982+//# define __user
70983+//# define __force_user
70984+//# define __kernel
70985+//# define __force_kernel
70986+# else
70987+# ifdef STRUCTLEAK_PLUGIN
70988+# define __user __attribute__((user))
70989+# else
70990+# define __user
70991+# endif
70992+# define __force_user
70993+# define __kernel
70994+# define __force_kernel
70995+# endif
70996 # define __safe
70997 # define __force
70998 # define __nocast
70999 # define __iomem
71000+# define __force_iomem
71001 # define __chk_user_ptr(x) (void)0
71002 # define __chk_io_ptr(x) (void)0
71003 # define __builtin_warning(x, y...) (1)
71004@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
71005 # define __release(x) (void)0
71006 # define __cond_lock(x,c) (c)
71007 # define __percpu
71008+# define __force_percpu
71009 # define __rcu
71010+# define __force_rcu
71011 #endif
71012
71013 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
71014@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
71015 # define __attribute_const__ /* unimplemented */
71016 #endif
71017
71018+#ifndef __no_const
71019+# define __no_const
71020+#endif
71021+
71022+#ifndef __do_const
71023+# define __do_const
71024+#endif
71025+
71026+#ifndef __size_overflow
71027+# define __size_overflow(...)
71028+#endif
71029+
71030+#ifndef __intentional_overflow
71031+# define __intentional_overflow(...)
71032+#endif
71033+
71034+#ifndef __latent_entropy
71035+# define __latent_entropy
71036+#endif
71037+
71038 /*
71039 * Tell gcc if a function is cold. The compiler will assume any path
71040 * directly leading to the call is unlikely.
71041@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
71042 #define __cold
71043 #endif
71044
71045+#ifndef __alloc_size
71046+#define __alloc_size(...)
71047+#endif
71048+
71049+#ifndef __bos
71050+#define __bos(ptr, arg)
71051+#endif
71052+
71053+#ifndef __bos0
71054+#define __bos0(ptr)
71055+#endif
71056+
71057+#ifndef __bos1
71058+#define __bos1(ptr)
71059+#endif
71060+
71061 /* Simple shorthand for a section definition */
71062 #ifndef __section
71063 # define __section(S) __attribute__ ((__section__(#S)))
71064@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
71065 * use is to mediate communication between process-level code and irq/NMI
71066 * handlers, all running on the same CPU.
71067 */
71068-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
71069+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
71070+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
71071
71072 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
71073 #ifdef CONFIG_KPROBES
71074diff --git a/include/linux/completion.h b/include/linux/completion.h
71075index 33f0280..35c6568 100644
71076--- a/include/linux/completion.h
71077+++ b/include/linux/completion.h
71078@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
71079 extern void wait_for_completion(struct completion *);
71080 extern void wait_for_completion_io(struct completion *);
71081 extern int wait_for_completion_interruptible(struct completion *x);
71082-extern int wait_for_completion_killable(struct completion *x);
71083+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
71084 extern unsigned long wait_for_completion_timeout(struct completion *x,
71085 unsigned long timeout);
71086 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
71087 unsigned long timeout);
71088 extern long wait_for_completion_interruptible_timeout(
71089- struct completion *x, unsigned long timeout);
71090+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
71091 extern long wait_for_completion_killable_timeout(
71092- struct completion *x, unsigned long timeout);
71093+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
71094 extern bool try_wait_for_completion(struct completion *x);
71095 extern bool completion_done(struct completion *x);
71096
71097diff --git a/include/linux/configfs.h b/include/linux/configfs.h
71098index 34025df..d94bbbc 100644
71099--- a/include/linux/configfs.h
71100+++ b/include/linux/configfs.h
71101@@ -125,7 +125,7 @@ struct configfs_attribute {
71102 const char *ca_name;
71103 struct module *ca_owner;
71104 umode_t ca_mode;
71105-};
71106+} __do_const;
71107
71108 /*
71109 * Users often need to create attribute structures for their configurable
71110diff --git a/include/linux/cpu.h b/include/linux/cpu.h
71111index 9f3c7e8..a18c7b6 100644
71112--- a/include/linux/cpu.h
71113+++ b/include/linux/cpu.h
71114@@ -115,7 +115,7 @@ enum {
71115 /* Need to know about CPUs going up/down? */
71116 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
71117 #define cpu_notifier(fn, pri) { \
71118- static struct notifier_block fn##_nb __cpuinitdata = \
71119+ static struct notifier_block fn##_nb = \
71120 { .notifier_call = fn, .priority = pri }; \
71121 register_cpu_notifier(&fn##_nb); \
71122 }
71123diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
71124index 037d36a..ca5fe6e 100644
71125--- a/include/linux/cpufreq.h
71126+++ b/include/linux/cpufreq.h
71127@@ -262,7 +262,7 @@ struct cpufreq_driver {
71128 int (*suspend) (struct cpufreq_policy *policy);
71129 int (*resume) (struct cpufreq_policy *policy);
71130 struct freq_attr **attr;
71131-};
71132+} __do_const;
71133
71134 /* flags */
71135
71136@@ -321,6 +321,7 @@ struct global_attr {
71137 ssize_t (*store)(struct kobject *a, struct attribute *b,
71138 const char *c, size_t count);
71139 };
71140+typedef struct global_attr __no_const global_attr_no_const;
71141
71142 #define define_one_global_ro(_name) \
71143 static struct global_attr _name = \
71144diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
71145index 8f04062..900239a 100644
71146--- a/include/linux/cpuidle.h
71147+++ b/include/linux/cpuidle.h
71148@@ -52,7 +52,8 @@ struct cpuidle_state {
71149 int index);
71150
71151 int (*enter_dead) (struct cpuidle_device *dev, int index);
71152-};
71153+} __do_const;
71154+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
71155
71156 /* Idle State Flags */
71157 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
71158@@ -191,7 +192,7 @@ struct cpuidle_governor {
71159 void (*reflect) (struct cpuidle_device *dev, int index);
71160
71161 struct module *owner;
71162-};
71163+} __do_const;
71164
71165 #ifdef CONFIG_CPU_IDLE
71166
71167diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
71168index d08e4d2..95fad61 100644
71169--- a/include/linux/cpumask.h
71170+++ b/include/linux/cpumask.h
71171@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
71172 }
71173
71174 /* Valid inputs for n are -1 and 0. */
71175-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
71176+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
71177 {
71178 return n+1;
71179 }
71180
71181-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
71182+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
71183 {
71184 return n+1;
71185 }
71186
71187-static inline unsigned int cpumask_next_and(int n,
71188+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
71189 const struct cpumask *srcp,
71190 const struct cpumask *andp)
71191 {
71192@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
71193 *
71194 * Returns >= nr_cpu_ids if no further cpus set.
71195 */
71196-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
71197+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
71198 {
71199 /* -1 is a legal arg here. */
71200 if (n != -1)
71201@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
71202 *
71203 * Returns >= nr_cpu_ids if no further cpus unset.
71204 */
71205-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
71206+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
71207 {
71208 /* -1 is a legal arg here. */
71209 if (n != -1)
71210@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
71211 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
71212 }
71213
71214-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
71215+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
71216 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
71217
71218 /**
71219diff --git a/include/linux/cred.h b/include/linux/cred.h
71220index 04421e8..6bce4ef 100644
71221--- a/include/linux/cred.h
71222+++ b/include/linux/cred.h
71223@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
71224 static inline void validate_process_creds(void)
71225 {
71226 }
71227+static inline void validate_task_creds(struct task_struct *task)
71228+{
71229+}
71230 #endif
71231
71232 /**
71233diff --git a/include/linux/crypto.h b/include/linux/crypto.h
71234index b92eadf..b4ecdc1 100644
71235--- a/include/linux/crypto.h
71236+++ b/include/linux/crypto.h
71237@@ -373,7 +373,7 @@ struct cipher_tfm {
71238 const u8 *key, unsigned int keylen);
71239 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
71240 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
71241-};
71242+} __no_const;
71243
71244 struct hash_tfm {
71245 int (*init)(struct hash_desc *desc);
71246@@ -394,13 +394,13 @@ struct compress_tfm {
71247 int (*cot_decompress)(struct crypto_tfm *tfm,
71248 const u8 *src, unsigned int slen,
71249 u8 *dst, unsigned int *dlen);
71250-};
71251+} __no_const;
71252
71253 struct rng_tfm {
71254 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
71255 unsigned int dlen);
71256 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
71257-};
71258+} __no_const;
71259
71260 #define crt_ablkcipher crt_u.ablkcipher
71261 #define crt_aead crt_u.aead
71262diff --git a/include/linux/ctype.h b/include/linux/ctype.h
71263index 653589e..4ef254a 100644
71264--- a/include/linux/ctype.h
71265+++ b/include/linux/ctype.h
71266@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
71267 * Fast implementation of tolower() for internal usage. Do not use in your
71268 * code.
71269 */
71270-static inline char _tolower(const char c)
71271+static inline unsigned char _tolower(const unsigned char c)
71272 {
71273 return c | 0x20;
71274 }
71275diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
71276index 7925bf0..d5143d2 100644
71277--- a/include/linux/decompress/mm.h
71278+++ b/include/linux/decompress/mm.h
71279@@ -77,7 +77,7 @@ static void free(void *where)
71280 * warnings when not needed (indeed large_malloc / large_free are not
71281 * needed by inflate */
71282
71283-#define malloc(a) kmalloc(a, GFP_KERNEL)
71284+#define malloc(a) kmalloc((a), GFP_KERNEL)
71285 #define free(a) kfree(a)
71286
71287 #define large_malloc(a) vmalloc(a)
71288diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
71289index fe8c447..bdc1f33 100644
71290--- a/include/linux/devfreq.h
71291+++ b/include/linux/devfreq.h
71292@@ -114,7 +114,7 @@ struct devfreq_governor {
71293 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
71294 int (*event_handler)(struct devfreq *devfreq,
71295 unsigned int event, void *data);
71296-};
71297+} __do_const;
71298
71299 /**
71300 * struct devfreq - Device devfreq structure
71301diff --git a/include/linux/device.h b/include/linux/device.h
71302index c0a1261..dba7569 100644
71303--- a/include/linux/device.h
71304+++ b/include/linux/device.h
71305@@ -290,7 +290,7 @@ struct subsys_interface {
71306 struct list_head node;
71307 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
71308 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
71309-};
71310+} __do_const;
71311
71312 int subsys_interface_register(struct subsys_interface *sif);
71313 void subsys_interface_unregister(struct subsys_interface *sif);
71314@@ -473,7 +473,7 @@ struct device_type {
71315 void (*release)(struct device *dev);
71316
71317 const struct dev_pm_ops *pm;
71318-};
71319+} __do_const;
71320
71321 /* interface for exporting device attributes */
71322 struct device_attribute {
71323@@ -483,11 +483,12 @@ struct device_attribute {
71324 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
71325 const char *buf, size_t count);
71326 };
71327+typedef struct device_attribute __no_const device_attribute_no_const;
71328
71329 struct dev_ext_attribute {
71330 struct device_attribute attr;
71331 void *var;
71332-};
71333+} __do_const;
71334
71335 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
71336 char *buf);
71337diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
71338index 94af418..b1ca7a2 100644
71339--- a/include/linux/dma-mapping.h
71340+++ b/include/linux/dma-mapping.h
71341@@ -54,7 +54,7 @@ struct dma_map_ops {
71342 u64 (*get_required_mask)(struct device *dev);
71343 #endif
71344 int is_phys;
71345-};
71346+} __do_const;
71347
71348 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
71349
71350diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
71351index 96d3e4a..dc36433 100644
71352--- a/include/linux/dmaengine.h
71353+++ b/include/linux/dmaengine.h
71354@@ -1035,9 +1035,9 @@ struct dma_pinned_list {
71355 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
71356 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
71357
71358-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
71359+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
71360 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
71361-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
71362+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
71363 struct dma_pinned_list *pinned_list, struct page *page,
71364 unsigned int offset, size_t len);
71365
71366diff --git a/include/linux/efi.h b/include/linux/efi.h
71367index 2bc0ad7..3f7b006 100644
71368--- a/include/linux/efi.h
71369+++ b/include/linux/efi.h
71370@@ -745,6 +745,7 @@ struct efivar_operations {
71371 efi_set_variable_t *set_variable;
71372 efi_query_variable_store_t *query_variable_store;
71373 };
71374+typedef struct efivar_operations __no_const efivar_operations_no_const;
71375
71376 struct efivars {
71377 /*
71378diff --git a/include/linux/elf.h b/include/linux/elf.h
71379index 40a3c0e..4c45a38 100644
71380--- a/include/linux/elf.h
71381+++ b/include/linux/elf.h
71382@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
71383 #define elf_note elf32_note
71384 #define elf_addr_t Elf32_Off
71385 #define Elf_Half Elf32_Half
71386+#define elf_dyn Elf32_Dyn
71387
71388 #else
71389
71390@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
71391 #define elf_note elf64_note
71392 #define elf_addr_t Elf64_Off
71393 #define Elf_Half Elf64_Half
71394+#define elf_dyn Elf64_Dyn
71395
71396 #endif
71397
71398diff --git a/include/linux/err.h b/include/linux/err.h
71399index f2edce2..cc2082c 100644
71400--- a/include/linux/err.h
71401+++ b/include/linux/err.h
71402@@ -19,12 +19,12 @@
71403
71404 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
71405
71406-static inline void * __must_check ERR_PTR(long error)
71407+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
71408 {
71409 return (void *) error;
71410 }
71411
71412-static inline long __must_check PTR_ERR(const void *ptr)
71413+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
71414 {
71415 return (long) ptr;
71416 }
71417diff --git a/include/linux/extcon.h b/include/linux/extcon.h
71418index fcb51c8..bdafcf6 100644
71419--- a/include/linux/extcon.h
71420+++ b/include/linux/extcon.h
71421@@ -134,7 +134,7 @@ struct extcon_dev {
71422 /* /sys/class/extcon/.../mutually_exclusive/... */
71423 struct attribute_group attr_g_muex;
71424 struct attribute **attrs_muex;
71425- struct device_attribute *d_attrs_muex;
71426+ device_attribute_no_const *d_attrs_muex;
71427 };
71428
71429 /**
71430diff --git a/include/linux/fb.h b/include/linux/fb.h
71431index d49c60f..2834fbe 100644
71432--- a/include/linux/fb.h
71433+++ b/include/linux/fb.h
71434@@ -304,7 +304,7 @@ struct fb_ops {
71435 /* called at KDB enter and leave time to prepare the console */
71436 int (*fb_debug_enter)(struct fb_info *info);
71437 int (*fb_debug_leave)(struct fb_info *info);
71438-};
71439+} __do_const;
71440
71441 #ifdef CONFIG_FB_TILEBLITTING
71442 #define FB_TILE_CURSOR_NONE 0
71443diff --git a/include/linux/filter.h b/include/linux/filter.h
71444index f65f5a6..2f4f93a 100644
71445--- a/include/linux/filter.h
71446+++ b/include/linux/filter.h
71447@@ -20,6 +20,7 @@ struct compat_sock_fprog {
71448
71449 struct sk_buff;
71450 struct sock;
71451+struct bpf_jit_work;
71452
71453 struct sk_filter
71454 {
71455@@ -27,6 +28,9 @@ struct sk_filter
71456 unsigned int len; /* Number of filter blocks */
71457 unsigned int (*bpf_func)(const struct sk_buff *skb,
71458 const struct sock_filter *filter);
71459+#ifdef CONFIG_BPF_JIT
71460+ struct bpf_jit_work *work;
71461+#endif
71462 struct rcu_head rcu;
71463 struct sock_filter insns[0];
71464 };
71465diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
71466index 8293262..2b3b8bd 100644
71467--- a/include/linux/frontswap.h
71468+++ b/include/linux/frontswap.h
71469@@ -11,7 +11,7 @@ struct frontswap_ops {
71470 int (*load)(unsigned, pgoff_t, struct page *);
71471 void (*invalidate_page)(unsigned, pgoff_t);
71472 void (*invalidate_area)(unsigned);
71473-};
71474+} __no_const;
71475
71476 extern bool frontswap_enabled;
71477 extern struct frontswap_ops *
71478diff --git a/include/linux/fs.h b/include/linux/fs.h
71479index 65c2be2..4c53f6e 100644
71480--- a/include/linux/fs.h
71481+++ b/include/linux/fs.h
71482@@ -1543,7 +1543,8 @@ struct file_operations {
71483 long (*fallocate)(struct file *file, int mode, loff_t offset,
71484 loff_t len);
71485 int (*show_fdinfo)(struct seq_file *m, struct file *f);
71486-};
71487+} __do_const;
71488+typedef struct file_operations __no_const file_operations_no_const;
71489
71490 struct inode_operations {
71491 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
71492@@ -2688,4 +2689,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
71493 inode->i_flags |= S_NOSEC;
71494 }
71495
71496+static inline bool is_sidechannel_device(const struct inode *inode)
71497+{
71498+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
71499+ umode_t mode = inode->i_mode;
71500+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
71501+#else
71502+ return false;
71503+#endif
71504+}
71505+
71506 #endif /* _LINUX_FS_H */
71507diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
71508index 2b93a9a..855d94a 100644
71509--- a/include/linux/fs_struct.h
71510+++ b/include/linux/fs_struct.h
71511@@ -6,7 +6,7 @@
71512 #include <linux/seqlock.h>
71513
71514 struct fs_struct {
71515- int users;
71516+ atomic_t users;
71517 spinlock_t lock;
71518 seqcount_t seq;
71519 int umask;
71520diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
71521index 5dfa0aa..6acf322 100644
71522--- a/include/linux/fscache-cache.h
71523+++ b/include/linux/fscache-cache.h
71524@@ -112,7 +112,7 @@ struct fscache_operation {
71525 fscache_operation_release_t release;
71526 };
71527
71528-extern atomic_t fscache_op_debug_id;
71529+extern atomic_unchecked_t fscache_op_debug_id;
71530 extern void fscache_op_work_func(struct work_struct *work);
71531
71532 extern void fscache_enqueue_operation(struct fscache_operation *);
71533@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
71534 INIT_WORK(&op->work, fscache_op_work_func);
71535 atomic_set(&op->usage, 1);
71536 op->state = FSCACHE_OP_ST_INITIALISED;
71537- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
71538+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
71539 op->processor = processor;
71540 op->release = release;
71541 INIT_LIST_HEAD(&op->pend_link);
71542diff --git a/include/linux/fscache.h b/include/linux/fscache.h
71543index 7a08623..4c07b0f 100644
71544--- a/include/linux/fscache.h
71545+++ b/include/linux/fscache.h
71546@@ -152,7 +152,7 @@ struct fscache_cookie_def {
71547 * - this is mandatory for any object that may have data
71548 */
71549 void (*now_uncached)(void *cookie_netfs_data);
71550-};
71551+} __do_const;
71552
71553 /*
71554 * fscache cached network filesystem type
71555diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
71556index a78680a..87bd73e 100644
71557--- a/include/linux/fsnotify.h
71558+++ b/include/linux/fsnotify.h
71559@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
71560 struct inode *inode = path->dentry->d_inode;
71561 __u32 mask = FS_ACCESS;
71562
71563+ if (is_sidechannel_device(inode))
71564+ return;
71565+
71566 if (S_ISDIR(inode->i_mode))
71567 mask |= FS_ISDIR;
71568
71569@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
71570 struct inode *inode = path->dentry->d_inode;
71571 __u32 mask = FS_MODIFY;
71572
71573+ if (is_sidechannel_device(inode))
71574+ return;
71575+
71576 if (S_ISDIR(inode->i_mode))
71577 mask |= FS_ISDIR;
71578
71579@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
71580 */
71581 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
71582 {
71583- return kstrdup(name, GFP_KERNEL);
71584+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
71585 }
71586
71587 /*
71588diff --git a/include/linux/genhd.h b/include/linux/genhd.h
71589index 9f3c275..911b591 100644
71590--- a/include/linux/genhd.h
71591+++ b/include/linux/genhd.h
71592@@ -194,7 +194,7 @@ struct gendisk {
71593 struct kobject *slave_dir;
71594
71595 struct timer_rand_state *random;
71596- atomic_t sync_io; /* RAID */
71597+ atomic_unchecked_t sync_io; /* RAID */
71598 struct disk_events *ev;
71599 #ifdef CONFIG_BLK_DEV_INTEGRITY
71600 struct blk_integrity *integrity;
71601diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
71602index 023bc34..b02b46a 100644
71603--- a/include/linux/genl_magic_func.h
71604+++ b/include/linux/genl_magic_func.h
71605@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
71606 },
71607
71608 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
71609-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
71610+static struct genl_ops ZZZ_genl_ops[] = {
71611 #include GENL_MAGIC_INCLUDE_FILE
71612 };
71613
71614diff --git a/include/linux/gfp.h b/include/linux/gfp.h
71615index 0f615eb..5c3832f 100644
71616--- a/include/linux/gfp.h
71617+++ b/include/linux/gfp.h
71618@@ -35,6 +35,13 @@ struct vm_area_struct;
71619 #define ___GFP_NO_KSWAPD 0x400000u
71620 #define ___GFP_OTHER_NODE 0x800000u
71621 #define ___GFP_WRITE 0x1000000u
71622+
71623+#ifdef CONFIG_PAX_USERCOPY_SLABS
71624+#define ___GFP_USERCOPY 0x2000000u
71625+#else
71626+#define ___GFP_USERCOPY 0
71627+#endif
71628+
71629 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
71630
71631 /*
71632@@ -92,6 +99,7 @@ struct vm_area_struct;
71633 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
71634 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
71635 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
71636+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
71637
71638 /*
71639 * This may seem redundant, but it's a way of annotating false positives vs.
71640@@ -99,7 +107,7 @@ struct vm_area_struct;
71641 */
71642 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
71643
71644-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
71645+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
71646 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
71647
71648 /* This equals 0, but use constants in case they ever change */
71649@@ -153,6 +161,8 @@ struct vm_area_struct;
71650 /* 4GB DMA on some platforms */
71651 #define GFP_DMA32 __GFP_DMA32
71652
71653+#define GFP_USERCOPY __GFP_USERCOPY
71654+
71655 /* Convert GFP flags to their corresponding migrate type */
71656 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
71657 {
71658diff --git a/include/linux/gracl.h b/include/linux/gracl.h
71659new file mode 100644
71660index 0000000..ebe6d72
71661--- /dev/null
71662+++ b/include/linux/gracl.h
71663@@ -0,0 +1,319 @@
71664+#ifndef GR_ACL_H
71665+#define GR_ACL_H
71666+
71667+#include <linux/grdefs.h>
71668+#include <linux/resource.h>
71669+#include <linux/capability.h>
71670+#include <linux/dcache.h>
71671+#include <asm/resource.h>
71672+
71673+/* Major status information */
71674+
71675+#define GR_VERSION "grsecurity 2.9.1"
71676+#define GRSECURITY_VERSION 0x2901
71677+
71678+enum {
71679+ GR_SHUTDOWN = 0,
71680+ GR_ENABLE = 1,
71681+ GR_SPROLE = 2,
71682+ GR_RELOAD = 3,
71683+ GR_SEGVMOD = 4,
71684+ GR_STATUS = 5,
71685+ GR_UNSPROLE = 6,
71686+ GR_PASSSET = 7,
71687+ GR_SPROLEPAM = 8,
71688+};
71689+
71690+/* Password setup definitions
71691+ * kernel/grhash.c */
71692+enum {
71693+ GR_PW_LEN = 128,
71694+ GR_SALT_LEN = 16,
71695+ GR_SHA_LEN = 32,
71696+};
71697+
71698+enum {
71699+ GR_SPROLE_LEN = 64,
71700+};
71701+
71702+enum {
71703+ GR_NO_GLOB = 0,
71704+ GR_REG_GLOB,
71705+ GR_CREATE_GLOB
71706+};
71707+
71708+#define GR_NLIMITS 32
71709+
71710+/* Begin Data Structures */
71711+
71712+struct sprole_pw {
71713+ unsigned char *rolename;
71714+ unsigned char salt[GR_SALT_LEN];
71715+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
71716+};
71717+
71718+struct name_entry {
71719+ __u32 key;
71720+ ino_t inode;
71721+ dev_t device;
71722+ char *name;
71723+ __u16 len;
71724+ __u8 deleted;
71725+ struct name_entry *prev;
71726+ struct name_entry *next;
71727+};
71728+
71729+struct inodev_entry {
71730+ struct name_entry *nentry;
71731+ struct inodev_entry *prev;
71732+ struct inodev_entry *next;
71733+};
71734+
71735+struct acl_role_db {
71736+ struct acl_role_label **r_hash;
71737+ __u32 r_size;
71738+};
71739+
71740+struct inodev_db {
71741+ struct inodev_entry **i_hash;
71742+ __u32 i_size;
71743+};
71744+
71745+struct name_db {
71746+ struct name_entry **n_hash;
71747+ __u32 n_size;
71748+};
71749+
71750+struct crash_uid {
71751+ uid_t uid;
71752+ unsigned long expires;
71753+};
71754+
71755+struct gr_hash_struct {
71756+ void **table;
71757+ void **nametable;
71758+ void *first;
71759+ __u32 table_size;
71760+ __u32 used_size;
71761+ int type;
71762+};
71763+
71764+/* Userspace Grsecurity ACL data structures */
71765+
71766+struct acl_subject_label {
71767+ char *filename;
71768+ ino_t inode;
71769+ dev_t device;
71770+ __u32 mode;
71771+ kernel_cap_t cap_mask;
71772+ kernel_cap_t cap_lower;
71773+ kernel_cap_t cap_invert_audit;
71774+
71775+ struct rlimit res[GR_NLIMITS];
71776+ __u32 resmask;
71777+
71778+ __u8 user_trans_type;
71779+ __u8 group_trans_type;
71780+ uid_t *user_transitions;
71781+ gid_t *group_transitions;
71782+ __u16 user_trans_num;
71783+ __u16 group_trans_num;
71784+
71785+ __u32 sock_families[2];
71786+ __u32 ip_proto[8];
71787+ __u32 ip_type;
71788+ struct acl_ip_label **ips;
71789+ __u32 ip_num;
71790+ __u32 inaddr_any_override;
71791+
71792+ __u32 crashes;
71793+ unsigned long expires;
71794+
71795+ struct acl_subject_label *parent_subject;
71796+ struct gr_hash_struct *hash;
71797+ struct acl_subject_label *prev;
71798+ struct acl_subject_label *next;
71799+
71800+ struct acl_object_label **obj_hash;
71801+ __u32 obj_hash_size;
71802+ __u16 pax_flags;
71803+};
71804+
71805+struct role_allowed_ip {
71806+ __u32 addr;
71807+ __u32 netmask;
71808+
71809+ struct role_allowed_ip *prev;
71810+ struct role_allowed_ip *next;
71811+};
71812+
71813+struct role_transition {
71814+ char *rolename;
71815+
71816+ struct role_transition *prev;
71817+ struct role_transition *next;
71818+};
71819+
71820+struct acl_role_label {
71821+ char *rolename;
71822+ uid_t uidgid;
71823+ __u16 roletype;
71824+
71825+ __u16 auth_attempts;
71826+ unsigned long expires;
71827+
71828+ struct acl_subject_label *root_label;
71829+ struct gr_hash_struct *hash;
71830+
71831+ struct acl_role_label *prev;
71832+ struct acl_role_label *next;
71833+
71834+ struct role_transition *transitions;
71835+ struct role_allowed_ip *allowed_ips;
71836+ uid_t *domain_children;
71837+ __u16 domain_child_num;
71838+
71839+ umode_t umask;
71840+
71841+ struct acl_subject_label **subj_hash;
71842+ __u32 subj_hash_size;
71843+};
71844+
71845+struct user_acl_role_db {
71846+ struct acl_role_label **r_table;
71847+ __u32 num_pointers; /* Number of allocations to track */
71848+ __u32 num_roles; /* Number of roles */
71849+ __u32 num_domain_children; /* Number of domain children */
71850+ __u32 num_subjects; /* Number of subjects */
71851+ __u32 num_objects; /* Number of objects */
71852+};
71853+
71854+struct acl_object_label {
71855+ char *filename;
71856+ ino_t inode;
71857+ dev_t device;
71858+ __u32 mode;
71859+
71860+ struct acl_subject_label *nested;
71861+ struct acl_object_label *globbed;
71862+
71863+ /* next two structures not used */
71864+
71865+ struct acl_object_label *prev;
71866+ struct acl_object_label *next;
71867+};
71868+
71869+struct acl_ip_label {
71870+ char *iface;
71871+ __u32 addr;
71872+ __u32 netmask;
71873+ __u16 low, high;
71874+ __u8 mode;
71875+ __u32 type;
71876+ __u32 proto[8];
71877+
71878+ /* next two structures not used */
71879+
71880+ struct acl_ip_label *prev;
71881+ struct acl_ip_label *next;
71882+};
71883+
71884+struct gr_arg {
71885+ struct user_acl_role_db role_db;
71886+ unsigned char pw[GR_PW_LEN];
71887+ unsigned char salt[GR_SALT_LEN];
71888+ unsigned char sum[GR_SHA_LEN];
71889+ unsigned char sp_role[GR_SPROLE_LEN];
71890+ struct sprole_pw *sprole_pws;
71891+ dev_t segv_device;
71892+ ino_t segv_inode;
71893+ uid_t segv_uid;
71894+ __u16 num_sprole_pws;
71895+ __u16 mode;
71896+};
71897+
71898+struct gr_arg_wrapper {
71899+ struct gr_arg *arg;
71900+ __u32 version;
71901+ __u32 size;
71902+};
71903+
71904+struct subject_map {
71905+ struct acl_subject_label *user;
71906+ struct acl_subject_label *kernel;
71907+ struct subject_map *prev;
71908+ struct subject_map *next;
71909+};
71910+
71911+struct acl_subj_map_db {
71912+ struct subject_map **s_hash;
71913+ __u32 s_size;
71914+};
71915+
71916+/* End Data Structures Section */
71917+
71918+/* Hash functions generated by empirical testing by Brad Spengler
71919+ Makes good use of the low bits of the inode. Generally 0-1 times
71920+ in loop for successful match. 0-3 for unsuccessful match.
71921+ Shift/add algorithm with modulus of table size and an XOR*/
71922+
71923+static __inline__ unsigned int
71924+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
71925+{
71926+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
71927+}
71928+
71929+ static __inline__ unsigned int
71930+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
71931+{
71932+ return ((const unsigned long)userp % sz);
71933+}
71934+
71935+static __inline__ unsigned int
71936+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
71937+{
71938+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
71939+}
71940+
71941+static __inline__ unsigned int
71942+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
71943+{
71944+ return full_name_hash((const unsigned char *)name, len) % sz;
71945+}
71946+
71947+#define FOR_EACH_ROLE_START(role) \
71948+ role = role_list; \
71949+ while (role) {
71950+
71951+#define FOR_EACH_ROLE_END(role) \
71952+ role = role->prev; \
71953+ }
71954+
71955+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
71956+ subj = NULL; \
71957+ iter = 0; \
71958+ while (iter < role->subj_hash_size) { \
71959+ if (subj == NULL) \
71960+ subj = role->subj_hash[iter]; \
71961+ if (subj == NULL) { \
71962+ iter++; \
71963+ continue; \
71964+ }
71965+
71966+#define FOR_EACH_SUBJECT_END(subj,iter) \
71967+ subj = subj->next; \
71968+ if (subj == NULL) \
71969+ iter++; \
71970+ }
71971+
71972+
71973+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
71974+ subj = role->hash->first; \
71975+ while (subj != NULL) {
71976+
71977+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
71978+ subj = subj->next; \
71979+ }
71980+
71981+#endif
71982+
71983diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
71984new file mode 100644
71985index 0000000..33ebd1f
71986--- /dev/null
71987+++ b/include/linux/gracl_compat.h
71988@@ -0,0 +1,156 @@
71989+#ifndef GR_ACL_COMPAT_H
71990+#define GR_ACL_COMPAT_H
71991+
71992+#include <linux/resource.h>
71993+#include <asm/resource.h>
71994+
71995+struct sprole_pw_compat {
71996+ compat_uptr_t rolename;
71997+ unsigned char salt[GR_SALT_LEN];
71998+ unsigned char sum[GR_SHA_LEN];
71999+};
72000+
72001+struct gr_hash_struct_compat {
72002+ compat_uptr_t table;
72003+ compat_uptr_t nametable;
72004+ compat_uptr_t first;
72005+ __u32 table_size;
72006+ __u32 used_size;
72007+ int type;
72008+};
72009+
72010+struct acl_subject_label_compat {
72011+ compat_uptr_t filename;
72012+ compat_ino_t inode;
72013+ __u32 device;
72014+ __u32 mode;
72015+ kernel_cap_t cap_mask;
72016+ kernel_cap_t cap_lower;
72017+ kernel_cap_t cap_invert_audit;
72018+
72019+ struct compat_rlimit res[GR_NLIMITS];
72020+ __u32 resmask;
72021+
72022+ __u8 user_trans_type;
72023+ __u8 group_trans_type;
72024+ compat_uptr_t user_transitions;
72025+ compat_uptr_t group_transitions;
72026+ __u16 user_trans_num;
72027+ __u16 group_trans_num;
72028+
72029+ __u32 sock_families[2];
72030+ __u32 ip_proto[8];
72031+ __u32 ip_type;
72032+ compat_uptr_t ips;
72033+ __u32 ip_num;
72034+ __u32 inaddr_any_override;
72035+
72036+ __u32 crashes;
72037+ compat_ulong_t expires;
72038+
72039+ compat_uptr_t parent_subject;
72040+ compat_uptr_t hash;
72041+ compat_uptr_t prev;
72042+ compat_uptr_t next;
72043+
72044+ compat_uptr_t obj_hash;
72045+ __u32 obj_hash_size;
72046+ __u16 pax_flags;
72047+};
72048+
72049+struct role_allowed_ip_compat {
72050+ __u32 addr;
72051+ __u32 netmask;
72052+
72053+ compat_uptr_t prev;
72054+ compat_uptr_t next;
72055+};
72056+
72057+struct role_transition_compat {
72058+ compat_uptr_t rolename;
72059+
72060+ compat_uptr_t prev;
72061+ compat_uptr_t next;
72062+};
72063+
72064+struct acl_role_label_compat {
72065+ compat_uptr_t rolename;
72066+ uid_t uidgid;
72067+ __u16 roletype;
72068+
72069+ __u16 auth_attempts;
72070+ compat_ulong_t expires;
72071+
72072+ compat_uptr_t root_label;
72073+ compat_uptr_t hash;
72074+
72075+ compat_uptr_t prev;
72076+ compat_uptr_t next;
72077+
72078+ compat_uptr_t transitions;
72079+ compat_uptr_t allowed_ips;
72080+ compat_uptr_t domain_children;
72081+ __u16 domain_child_num;
72082+
72083+ umode_t umask;
72084+
72085+ compat_uptr_t subj_hash;
72086+ __u32 subj_hash_size;
72087+};
72088+
72089+struct user_acl_role_db_compat {
72090+ compat_uptr_t r_table;
72091+ __u32 num_pointers;
72092+ __u32 num_roles;
72093+ __u32 num_domain_children;
72094+ __u32 num_subjects;
72095+ __u32 num_objects;
72096+};
72097+
72098+struct acl_object_label_compat {
72099+ compat_uptr_t filename;
72100+ compat_ino_t inode;
72101+ __u32 device;
72102+ __u32 mode;
72103+
72104+ compat_uptr_t nested;
72105+ compat_uptr_t globbed;
72106+
72107+ compat_uptr_t prev;
72108+ compat_uptr_t next;
72109+};
72110+
72111+struct acl_ip_label_compat {
72112+ compat_uptr_t iface;
72113+ __u32 addr;
72114+ __u32 netmask;
72115+ __u16 low, high;
72116+ __u8 mode;
72117+ __u32 type;
72118+ __u32 proto[8];
72119+
72120+ compat_uptr_t prev;
72121+ compat_uptr_t next;
72122+};
72123+
72124+struct gr_arg_compat {
72125+ struct user_acl_role_db_compat role_db;
72126+ unsigned char pw[GR_PW_LEN];
72127+ unsigned char salt[GR_SALT_LEN];
72128+ unsigned char sum[GR_SHA_LEN];
72129+ unsigned char sp_role[GR_SPROLE_LEN];
72130+ compat_uptr_t sprole_pws;
72131+ __u32 segv_device;
72132+ compat_ino_t segv_inode;
72133+ uid_t segv_uid;
72134+ __u16 num_sprole_pws;
72135+ __u16 mode;
72136+};
72137+
72138+struct gr_arg_wrapper_compat {
72139+ compat_uptr_t arg;
72140+ __u32 version;
72141+ __u32 size;
72142+};
72143+
72144+#endif
72145diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
72146new file mode 100644
72147index 0000000..323ecf2
72148--- /dev/null
72149+++ b/include/linux/gralloc.h
72150@@ -0,0 +1,9 @@
72151+#ifndef __GRALLOC_H
72152+#define __GRALLOC_H
72153+
72154+void acl_free_all(void);
72155+int acl_alloc_stack_init(unsigned long size);
72156+void *acl_alloc(unsigned long len);
72157+void *acl_alloc_num(unsigned long num, unsigned long len);
72158+
72159+#endif
72160diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
72161new file mode 100644
72162index 0000000..be66033
72163--- /dev/null
72164+++ b/include/linux/grdefs.h
72165@@ -0,0 +1,140 @@
72166+#ifndef GRDEFS_H
72167+#define GRDEFS_H
72168+
72169+/* Begin grsecurity status declarations */
72170+
72171+enum {
72172+ GR_READY = 0x01,
72173+ GR_STATUS_INIT = 0x00 // disabled state
72174+};
72175+
72176+/* Begin ACL declarations */
72177+
72178+/* Role flags */
72179+
72180+enum {
72181+ GR_ROLE_USER = 0x0001,
72182+ GR_ROLE_GROUP = 0x0002,
72183+ GR_ROLE_DEFAULT = 0x0004,
72184+ GR_ROLE_SPECIAL = 0x0008,
72185+ GR_ROLE_AUTH = 0x0010,
72186+ GR_ROLE_NOPW = 0x0020,
72187+ GR_ROLE_GOD = 0x0040,
72188+ GR_ROLE_LEARN = 0x0080,
72189+ GR_ROLE_TPE = 0x0100,
72190+ GR_ROLE_DOMAIN = 0x0200,
72191+ GR_ROLE_PAM = 0x0400,
72192+ GR_ROLE_PERSIST = 0x0800
72193+};
72194+
72195+/* ACL Subject and Object mode flags */
72196+enum {
72197+ GR_DELETED = 0x80000000
72198+};
72199+
72200+/* ACL Object-only mode flags */
72201+enum {
72202+ GR_READ = 0x00000001,
72203+ GR_APPEND = 0x00000002,
72204+ GR_WRITE = 0x00000004,
72205+ GR_EXEC = 0x00000008,
72206+ GR_FIND = 0x00000010,
72207+ GR_INHERIT = 0x00000020,
72208+ GR_SETID = 0x00000040,
72209+ GR_CREATE = 0x00000080,
72210+ GR_DELETE = 0x00000100,
72211+ GR_LINK = 0x00000200,
72212+ GR_AUDIT_READ = 0x00000400,
72213+ GR_AUDIT_APPEND = 0x00000800,
72214+ GR_AUDIT_WRITE = 0x00001000,
72215+ GR_AUDIT_EXEC = 0x00002000,
72216+ GR_AUDIT_FIND = 0x00004000,
72217+ GR_AUDIT_INHERIT= 0x00008000,
72218+ GR_AUDIT_SETID = 0x00010000,
72219+ GR_AUDIT_CREATE = 0x00020000,
72220+ GR_AUDIT_DELETE = 0x00040000,
72221+ GR_AUDIT_LINK = 0x00080000,
72222+ GR_PTRACERD = 0x00100000,
72223+ GR_NOPTRACE = 0x00200000,
72224+ GR_SUPPRESS = 0x00400000,
72225+ GR_NOLEARN = 0x00800000,
72226+ GR_INIT_TRANSFER= 0x01000000
72227+};
72228+
72229+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
72230+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
72231+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
72232+
72233+/* ACL subject-only mode flags */
72234+enum {
72235+ GR_KILL = 0x00000001,
72236+ GR_VIEW = 0x00000002,
72237+ GR_PROTECTED = 0x00000004,
72238+ GR_LEARN = 0x00000008,
72239+ GR_OVERRIDE = 0x00000010,
72240+ /* just a placeholder, this mode is only used in userspace */
72241+ GR_DUMMY = 0x00000020,
72242+ GR_PROTSHM = 0x00000040,
72243+ GR_KILLPROC = 0x00000080,
72244+ GR_KILLIPPROC = 0x00000100,
72245+ /* just a placeholder, this mode is only used in userspace */
72246+ GR_NOTROJAN = 0x00000200,
72247+ GR_PROTPROCFD = 0x00000400,
72248+ GR_PROCACCT = 0x00000800,
72249+ GR_RELAXPTRACE = 0x00001000,
72250+ //GR_NESTED = 0x00002000,
72251+ GR_INHERITLEARN = 0x00004000,
72252+ GR_PROCFIND = 0x00008000,
72253+ GR_POVERRIDE = 0x00010000,
72254+ GR_KERNELAUTH = 0x00020000,
72255+ GR_ATSECURE = 0x00040000,
72256+ GR_SHMEXEC = 0x00080000
72257+};
72258+
72259+enum {
72260+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
72261+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
72262+ GR_PAX_ENABLE_MPROTECT = 0x0004,
72263+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
72264+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
72265+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
72266+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
72267+ GR_PAX_DISABLE_MPROTECT = 0x0400,
72268+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
72269+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
72270+};
72271+
72272+enum {
72273+ GR_ID_USER = 0x01,
72274+ GR_ID_GROUP = 0x02,
72275+};
72276+
72277+enum {
72278+ GR_ID_ALLOW = 0x01,
72279+ GR_ID_DENY = 0x02,
72280+};
72281+
72282+#define GR_CRASH_RES 31
72283+#define GR_UIDTABLE_MAX 500
72284+
72285+/* begin resource learning section */
72286+enum {
72287+ GR_RLIM_CPU_BUMP = 60,
72288+ GR_RLIM_FSIZE_BUMP = 50000,
72289+ GR_RLIM_DATA_BUMP = 10000,
72290+ GR_RLIM_STACK_BUMP = 1000,
72291+ GR_RLIM_CORE_BUMP = 10000,
72292+ GR_RLIM_RSS_BUMP = 500000,
72293+ GR_RLIM_NPROC_BUMP = 1,
72294+ GR_RLIM_NOFILE_BUMP = 5,
72295+ GR_RLIM_MEMLOCK_BUMP = 50000,
72296+ GR_RLIM_AS_BUMP = 500000,
72297+ GR_RLIM_LOCKS_BUMP = 2,
72298+ GR_RLIM_SIGPENDING_BUMP = 5,
72299+ GR_RLIM_MSGQUEUE_BUMP = 10000,
72300+ GR_RLIM_NICE_BUMP = 1,
72301+ GR_RLIM_RTPRIO_BUMP = 1,
72302+ GR_RLIM_RTTIME_BUMP = 1000000
72303+};
72304+
72305+#endif
72306diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
72307new file mode 100644
72308index 0000000..fd8598b
72309--- /dev/null
72310+++ b/include/linux/grinternal.h
72311@@ -0,0 +1,228 @@
72312+#ifndef __GRINTERNAL_H
72313+#define __GRINTERNAL_H
72314+
72315+#ifdef CONFIG_GRKERNSEC
72316+
72317+#include <linux/fs.h>
72318+#include <linux/mnt_namespace.h>
72319+#include <linux/nsproxy.h>
72320+#include <linux/gracl.h>
72321+#include <linux/grdefs.h>
72322+#include <linux/grmsg.h>
72323+
72324+void gr_add_learn_entry(const char *fmt, ...)
72325+ __attribute__ ((format (printf, 1, 2)));
72326+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
72327+ const struct vfsmount *mnt);
72328+__u32 gr_check_create(const struct dentry *new_dentry,
72329+ const struct dentry *parent,
72330+ const struct vfsmount *mnt, const __u32 mode);
72331+int gr_check_protected_task(const struct task_struct *task);
72332+__u32 to_gr_audit(const __u32 reqmode);
72333+int gr_set_acls(const int type);
72334+int gr_apply_subject_to_task(struct task_struct *task);
72335+int gr_acl_is_enabled(void);
72336+char gr_roletype_to_char(void);
72337+
72338+void gr_handle_alertkill(struct task_struct *task);
72339+char *gr_to_filename(const struct dentry *dentry,
72340+ const struct vfsmount *mnt);
72341+char *gr_to_filename1(const struct dentry *dentry,
72342+ const struct vfsmount *mnt);
72343+char *gr_to_filename2(const struct dentry *dentry,
72344+ const struct vfsmount *mnt);
72345+char *gr_to_filename3(const struct dentry *dentry,
72346+ const struct vfsmount *mnt);
72347+
72348+extern int grsec_enable_ptrace_readexec;
72349+extern int grsec_enable_harden_ptrace;
72350+extern int grsec_enable_link;
72351+extern int grsec_enable_fifo;
72352+extern int grsec_enable_execve;
72353+extern int grsec_enable_shm;
72354+extern int grsec_enable_execlog;
72355+extern int grsec_enable_signal;
72356+extern int grsec_enable_audit_ptrace;
72357+extern int grsec_enable_forkfail;
72358+extern int grsec_enable_time;
72359+extern int grsec_enable_rofs;
72360+extern int grsec_enable_chroot_shmat;
72361+extern int grsec_enable_chroot_mount;
72362+extern int grsec_enable_chroot_double;
72363+extern int grsec_enable_chroot_pivot;
72364+extern int grsec_enable_chroot_chdir;
72365+extern int grsec_enable_chroot_chmod;
72366+extern int grsec_enable_chroot_mknod;
72367+extern int grsec_enable_chroot_fchdir;
72368+extern int grsec_enable_chroot_nice;
72369+extern int grsec_enable_chroot_execlog;
72370+extern int grsec_enable_chroot_caps;
72371+extern int grsec_enable_chroot_sysctl;
72372+extern int grsec_enable_chroot_unix;
72373+extern int grsec_enable_symlinkown;
72374+extern kgid_t grsec_symlinkown_gid;
72375+extern int grsec_enable_tpe;
72376+extern kgid_t grsec_tpe_gid;
72377+extern int grsec_enable_tpe_all;
72378+extern int grsec_enable_tpe_invert;
72379+extern int grsec_enable_socket_all;
72380+extern kgid_t grsec_socket_all_gid;
72381+extern int grsec_enable_socket_client;
72382+extern kgid_t grsec_socket_client_gid;
72383+extern int grsec_enable_socket_server;
72384+extern kgid_t grsec_socket_server_gid;
72385+extern kgid_t grsec_audit_gid;
72386+extern int grsec_enable_group;
72387+extern int grsec_enable_log_rwxmaps;
72388+extern int grsec_enable_mount;
72389+extern int grsec_enable_chdir;
72390+extern int grsec_resource_logging;
72391+extern int grsec_enable_blackhole;
72392+extern int grsec_lastack_retries;
72393+extern int grsec_enable_brute;
72394+extern int grsec_lock;
72395+
72396+extern spinlock_t grsec_alert_lock;
72397+extern unsigned long grsec_alert_wtime;
72398+extern unsigned long grsec_alert_fyet;
72399+
72400+extern spinlock_t grsec_audit_lock;
72401+
72402+extern rwlock_t grsec_exec_file_lock;
72403+
72404+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
72405+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
72406+ (tsk)->exec_file->f_path.mnt) : "/")
72407+
72408+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
72409+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
72410+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
72411+
72412+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
72413+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
72414+ (tsk)->exec_file->f_path.mnt) : "/")
72415+
72416+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
72417+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
72418+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
72419+
72420+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
72421+
72422+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
72423+
72424+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
72425+{
72426+ if (file1 && file2) {
72427+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
72428+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
72429+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
72430+ return true;
72431+ }
72432+
72433+ return false;
72434+}
72435+
72436+#define GR_CHROOT_CAPS {{ \
72437+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
72438+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
72439+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
72440+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
72441+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
72442+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
72443+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
72444+
72445+#define security_learn(normal_msg,args...) \
72446+({ \
72447+ read_lock(&grsec_exec_file_lock); \
72448+ gr_add_learn_entry(normal_msg "\n", ## args); \
72449+ read_unlock(&grsec_exec_file_lock); \
72450+})
72451+
72452+enum {
72453+ GR_DO_AUDIT,
72454+ GR_DONT_AUDIT,
72455+ /* used for non-audit messages that we shouldn't kill the task on */
72456+ GR_DONT_AUDIT_GOOD
72457+};
72458+
72459+enum {
72460+ GR_TTYSNIFF,
72461+ GR_RBAC,
72462+ GR_RBAC_STR,
72463+ GR_STR_RBAC,
72464+ GR_RBAC_MODE2,
72465+ GR_RBAC_MODE3,
72466+ GR_FILENAME,
72467+ GR_SYSCTL_HIDDEN,
72468+ GR_NOARGS,
72469+ GR_ONE_INT,
72470+ GR_ONE_INT_TWO_STR,
72471+ GR_ONE_STR,
72472+ GR_STR_INT,
72473+ GR_TWO_STR_INT,
72474+ GR_TWO_INT,
72475+ GR_TWO_U64,
72476+ GR_THREE_INT,
72477+ GR_FIVE_INT_TWO_STR,
72478+ GR_TWO_STR,
72479+ GR_THREE_STR,
72480+ GR_FOUR_STR,
72481+ GR_STR_FILENAME,
72482+ GR_FILENAME_STR,
72483+ GR_FILENAME_TWO_INT,
72484+ GR_FILENAME_TWO_INT_STR,
72485+ GR_TEXTREL,
72486+ GR_PTRACE,
72487+ GR_RESOURCE,
72488+ GR_CAP,
72489+ GR_SIG,
72490+ GR_SIG2,
72491+ GR_CRASH1,
72492+ GR_CRASH2,
72493+ GR_PSACCT,
72494+ GR_RWXMAP,
72495+ GR_RWXMAPVMA
72496+};
72497+
72498+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
72499+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
72500+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
72501+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
72502+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
72503+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
72504+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
72505+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
72506+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
72507+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
72508+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
72509+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
72510+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
72511+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
72512+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
72513+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
72514+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
72515+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
72516+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
72517+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
72518+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
72519+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
72520+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
72521+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
72522+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
72523+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
72524+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
72525+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
72526+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
72527+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
72528+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
72529+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
72530+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
72531+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
72532+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
72533+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
72534+
72535+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
72536+
72537+#endif
72538+
72539+#endif
72540diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
72541new file mode 100644
72542index 0000000..a4396b5
72543--- /dev/null
72544+++ b/include/linux/grmsg.h
72545@@ -0,0 +1,113 @@
72546+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
72547+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
72548+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
72549+#define GR_STOPMOD_MSG "denied modification of module state by "
72550+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
72551+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
72552+#define GR_IOPERM_MSG "denied use of ioperm() by "
72553+#define GR_IOPL_MSG "denied use of iopl() by "
72554+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
72555+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
72556+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
72557+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
72558+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
72559+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
72560+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
72561+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
72562+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
72563+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
72564+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
72565+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
72566+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
72567+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
72568+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
72569+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
72570+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
72571+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
72572+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
72573+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
72574+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
72575+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
72576+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
72577+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
72578+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
72579+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
72580+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
72581+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
72582+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
72583+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
72584+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
72585+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
72586+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
72587+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
72588+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
72589+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
72590+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
72591+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
72592+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
72593+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
72594+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
72595+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
72596+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
72597+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
72598+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
72599+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
72600+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
72601+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
72602+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
72603+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
72604+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
72605+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
72606+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
72607+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
72608+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
72609+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
72610+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
72611+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
72612+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
72613+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
72614+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
72615+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
72616+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
72617+#define GR_FAILFORK_MSG "failed fork with errno %s by "
72618+#define GR_NICE_CHROOT_MSG "denied priority change by "
72619+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
72620+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
72621+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
72622+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
72623+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
72624+#define GR_TIME_MSG "time set by "
72625+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
72626+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
72627+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
72628+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
72629+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
72630+#define GR_BIND_MSG "denied bind() by "
72631+#define GR_CONNECT_MSG "denied connect() by "
72632+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
72633+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
72634+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
72635+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
72636+#define GR_CAP_ACL_MSG "use of %s denied for "
72637+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
72638+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
72639+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
72640+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
72641+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
72642+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
72643+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
72644+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
72645+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
72646+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
72647+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
72648+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
72649+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
72650+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
72651+#define GR_VM86_MSG "denied use of vm86 by "
72652+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
72653+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
72654+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
72655+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
72656+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
72657+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
72658+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
72659diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
72660new file mode 100644
72661index 0000000..3676b0b
72662--- /dev/null
72663+++ b/include/linux/grsecurity.h
72664@@ -0,0 +1,242 @@
72665+#ifndef GR_SECURITY_H
72666+#define GR_SECURITY_H
72667+#include <linux/fs.h>
72668+#include <linux/fs_struct.h>
72669+#include <linux/binfmts.h>
72670+#include <linux/gracl.h>
72671+
72672+/* notify of brain-dead configs */
72673+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72674+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
72675+#endif
72676+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
72677+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
72678+#endif
72679+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
72680+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
72681+#endif
72682+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
72683+#error "CONFIG_PAX enabled, but no PaX options are enabled."
72684+#endif
72685+
72686+void gr_handle_brute_attach(unsigned long mm_flags);
72687+void gr_handle_brute_check(void);
72688+void gr_handle_kernel_exploit(void);
72689+
72690+char gr_roletype_to_char(void);
72691+
72692+int gr_acl_enable_at_secure(void);
72693+
72694+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
72695+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
72696+
72697+void gr_del_task_from_ip_table(struct task_struct *p);
72698+
72699+int gr_pid_is_chrooted(struct task_struct *p);
72700+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
72701+int gr_handle_chroot_nice(void);
72702+int gr_handle_chroot_sysctl(const int op);
72703+int gr_handle_chroot_setpriority(struct task_struct *p,
72704+ const int niceval);
72705+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
72706+int gr_handle_chroot_chroot(const struct dentry *dentry,
72707+ const struct vfsmount *mnt);
72708+void gr_handle_chroot_chdir(const struct path *path);
72709+int gr_handle_chroot_chmod(const struct dentry *dentry,
72710+ const struct vfsmount *mnt, const int mode);
72711+int gr_handle_chroot_mknod(const struct dentry *dentry,
72712+ const struct vfsmount *mnt, const int mode);
72713+int gr_handle_chroot_mount(const struct dentry *dentry,
72714+ const struct vfsmount *mnt,
72715+ const char *dev_name);
72716+int gr_handle_chroot_pivot(void);
72717+int gr_handle_chroot_unix(const pid_t pid);
72718+
72719+int gr_handle_rawio(const struct inode *inode);
72720+
72721+void gr_handle_ioperm(void);
72722+void gr_handle_iopl(void);
72723+
72724+umode_t gr_acl_umask(void);
72725+
72726+int gr_tpe_allow(const struct file *file);
72727+
72728+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
72729+void gr_clear_chroot_entries(struct task_struct *task);
72730+
72731+void gr_log_forkfail(const int retval);
72732+void gr_log_timechange(void);
72733+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
72734+void gr_log_chdir(const struct dentry *dentry,
72735+ const struct vfsmount *mnt);
72736+void gr_log_chroot_exec(const struct dentry *dentry,
72737+ const struct vfsmount *mnt);
72738+void gr_log_remount(const char *devname, const int retval);
72739+void gr_log_unmount(const char *devname, const int retval);
72740+void gr_log_mount(const char *from, const char *to, const int retval);
72741+void gr_log_textrel(struct vm_area_struct *vma);
72742+void gr_log_ptgnustack(struct file *file);
72743+void gr_log_rwxmmap(struct file *file);
72744+void gr_log_rwxmprotect(struct vm_area_struct *vma);
72745+
72746+int gr_handle_follow_link(const struct inode *parent,
72747+ const struct inode *inode,
72748+ const struct dentry *dentry,
72749+ const struct vfsmount *mnt);
72750+int gr_handle_fifo(const struct dentry *dentry,
72751+ const struct vfsmount *mnt,
72752+ const struct dentry *dir, const int flag,
72753+ const int acc_mode);
72754+int gr_handle_hardlink(const struct dentry *dentry,
72755+ const struct vfsmount *mnt,
72756+ struct inode *inode,
72757+ const int mode, const struct filename *to);
72758+
72759+int gr_is_capable(const int cap);
72760+int gr_is_capable_nolog(const int cap);
72761+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
72762+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
72763+
72764+void gr_copy_label(struct task_struct *tsk);
72765+void gr_handle_crash(struct task_struct *task, const int sig);
72766+int gr_handle_signal(const struct task_struct *p, const int sig);
72767+int gr_check_crash_uid(const kuid_t uid);
72768+int gr_check_protected_task(const struct task_struct *task);
72769+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
72770+int gr_acl_handle_mmap(const struct file *file,
72771+ const unsigned long prot);
72772+int gr_acl_handle_mprotect(const struct file *file,
72773+ const unsigned long prot);
72774+int gr_check_hidden_task(const struct task_struct *tsk);
72775+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
72776+ const struct vfsmount *mnt);
72777+__u32 gr_acl_handle_utime(const struct dentry *dentry,
72778+ const struct vfsmount *mnt);
72779+__u32 gr_acl_handle_access(const struct dentry *dentry,
72780+ const struct vfsmount *mnt, const int fmode);
72781+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
72782+ const struct vfsmount *mnt, umode_t *mode);
72783+__u32 gr_acl_handle_chown(const struct dentry *dentry,
72784+ const struct vfsmount *mnt);
72785+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
72786+ const struct vfsmount *mnt);
72787+int gr_handle_ptrace(struct task_struct *task, const long request);
72788+int gr_handle_proc_ptrace(struct task_struct *task);
72789+__u32 gr_acl_handle_execve(const struct dentry *dentry,
72790+ const struct vfsmount *mnt);
72791+int gr_check_crash_exec(const struct file *filp);
72792+int gr_acl_is_enabled(void);
72793+void gr_set_kernel_label(struct task_struct *task);
72794+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
72795+ const kgid_t gid);
72796+int gr_set_proc_label(const struct dentry *dentry,
72797+ const struct vfsmount *mnt,
72798+ const int unsafe_flags);
72799+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
72800+ const struct vfsmount *mnt);
72801+__u32 gr_acl_handle_open(const struct dentry *dentry,
72802+ const struct vfsmount *mnt, int acc_mode);
72803+__u32 gr_acl_handle_creat(const struct dentry *dentry,
72804+ const struct dentry *p_dentry,
72805+ const struct vfsmount *p_mnt,
72806+ int open_flags, int acc_mode, const int imode);
72807+void gr_handle_create(const struct dentry *dentry,
72808+ const struct vfsmount *mnt);
72809+void gr_handle_proc_create(const struct dentry *dentry,
72810+ const struct inode *inode);
72811+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
72812+ const struct dentry *parent_dentry,
72813+ const struct vfsmount *parent_mnt,
72814+ const int mode);
72815+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
72816+ const struct dentry *parent_dentry,
72817+ const struct vfsmount *parent_mnt);
72818+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
72819+ const struct vfsmount *mnt);
72820+void gr_handle_delete(const ino_t ino, const dev_t dev);
72821+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
72822+ const struct vfsmount *mnt);
72823+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
72824+ const struct dentry *parent_dentry,
72825+ const struct vfsmount *parent_mnt,
72826+ const struct filename *from);
72827+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
72828+ const struct dentry *parent_dentry,
72829+ const struct vfsmount *parent_mnt,
72830+ const struct dentry *old_dentry,
72831+ const struct vfsmount *old_mnt, const struct filename *to);
72832+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
72833+int gr_acl_handle_rename(struct dentry *new_dentry,
72834+ struct dentry *parent_dentry,
72835+ const struct vfsmount *parent_mnt,
72836+ struct dentry *old_dentry,
72837+ struct inode *old_parent_inode,
72838+ struct vfsmount *old_mnt, const struct filename *newname);
72839+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72840+ struct dentry *old_dentry,
72841+ struct dentry *new_dentry,
72842+ struct vfsmount *mnt, const __u8 replace);
72843+__u32 gr_check_link(const struct dentry *new_dentry,
72844+ const struct dentry *parent_dentry,
72845+ const struct vfsmount *parent_mnt,
72846+ const struct dentry *old_dentry,
72847+ const struct vfsmount *old_mnt);
72848+int gr_acl_handle_filldir(const struct file *file, const char *name,
72849+ const unsigned int namelen, const ino_t ino);
72850+
72851+__u32 gr_acl_handle_unix(const struct dentry *dentry,
72852+ const struct vfsmount *mnt);
72853+void gr_acl_handle_exit(void);
72854+void gr_acl_handle_psacct(struct task_struct *task, const long code);
72855+int gr_acl_handle_procpidmem(const struct task_struct *task);
72856+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
72857+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
72858+void gr_audit_ptrace(struct task_struct *task);
72859+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
72860+void gr_put_exec_file(struct task_struct *task);
72861+
72862+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
72863+
72864+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
72865+extern void gr_learn_resource(const struct task_struct *task, const int res,
72866+ const unsigned long wanted, const int gt);
72867+#else
72868+static inline void gr_learn_resource(const struct task_struct *task, const int res,
72869+ const unsigned long wanted, const int gt)
72870+{
72871+}
72872+#endif
72873+
72874+#ifdef CONFIG_GRKERNSEC_RESLOG
72875+extern void gr_log_resource(const struct task_struct *task, const int res,
72876+ const unsigned long wanted, const int gt);
72877+#else
72878+static inline void gr_log_resource(const struct task_struct *task, const int res,
72879+ const unsigned long wanted, const int gt)
72880+{
72881+}
72882+#endif
72883+
72884+#ifdef CONFIG_GRKERNSEC
72885+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
72886+void gr_handle_vm86(void);
72887+void gr_handle_mem_readwrite(u64 from, u64 to);
72888+
72889+void gr_log_badprocpid(const char *entry);
72890+
72891+extern int grsec_enable_dmesg;
72892+extern int grsec_disable_privio;
72893+
72894+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72895+extern kgid_t grsec_proc_gid;
72896+#endif
72897+
72898+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72899+extern int grsec_enable_chroot_findtask;
72900+#endif
72901+#ifdef CONFIG_GRKERNSEC_SETXID
72902+extern int grsec_enable_setxid;
72903+#endif
72904+#endif
72905+
72906+#endif
72907diff --git a/include/linux/grsock.h b/include/linux/grsock.h
72908new file mode 100644
72909index 0000000..e7ffaaf
72910--- /dev/null
72911+++ b/include/linux/grsock.h
72912@@ -0,0 +1,19 @@
72913+#ifndef __GRSOCK_H
72914+#define __GRSOCK_H
72915+
72916+extern void gr_attach_curr_ip(const struct sock *sk);
72917+extern int gr_handle_sock_all(const int family, const int type,
72918+ const int protocol);
72919+extern int gr_handle_sock_server(const struct sockaddr *sck);
72920+extern int gr_handle_sock_server_other(const struct sock *sck);
72921+extern int gr_handle_sock_client(const struct sockaddr *sck);
72922+extern int gr_search_connect(struct socket * sock,
72923+ struct sockaddr_in * addr);
72924+extern int gr_search_bind(struct socket * sock,
72925+ struct sockaddr_in * addr);
72926+extern int gr_search_listen(struct socket * sock);
72927+extern int gr_search_accept(struct socket * sock);
72928+extern int gr_search_socket(const int domain, const int type,
72929+ const int protocol);
72930+
72931+#endif
72932diff --git a/include/linux/highmem.h b/include/linux/highmem.h
72933index 7fb31da..08b5114 100644
72934--- a/include/linux/highmem.h
72935+++ b/include/linux/highmem.h
72936@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
72937 kunmap_atomic(kaddr);
72938 }
72939
72940+static inline void sanitize_highpage(struct page *page)
72941+{
72942+ void *kaddr;
72943+ unsigned long flags;
72944+
72945+ local_irq_save(flags);
72946+ kaddr = kmap_atomic(page);
72947+ clear_page(kaddr);
72948+ kunmap_atomic(kaddr);
72949+ local_irq_restore(flags);
72950+}
72951+
72952 static inline void zero_user_segments(struct page *page,
72953 unsigned start1, unsigned end1,
72954 unsigned start2, unsigned end2)
72955diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
72956index 1c7b89a..7f52502 100644
72957--- a/include/linux/hwmon-sysfs.h
72958+++ b/include/linux/hwmon-sysfs.h
72959@@ -25,7 +25,8 @@
72960 struct sensor_device_attribute{
72961 struct device_attribute dev_attr;
72962 int index;
72963-};
72964+} __do_const;
72965+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
72966 #define to_sensor_dev_attr(_dev_attr) \
72967 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
72968
72969@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
72970 struct device_attribute dev_attr;
72971 u8 index;
72972 u8 nr;
72973-};
72974+} __do_const;
72975 #define to_sensor_dev_attr_2(_dev_attr) \
72976 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
72977
72978diff --git a/include/linux/i2c.h b/include/linux/i2c.h
72979index e988fa9..ff9f17e 100644
72980--- a/include/linux/i2c.h
72981+++ b/include/linux/i2c.h
72982@@ -366,6 +366,7 @@ struct i2c_algorithm {
72983 /* To determine what the adapter supports */
72984 u32 (*functionality) (struct i2c_adapter *);
72985 };
72986+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
72987
72988 /**
72989 * struct i2c_bus_recovery_info - I2C bus recovery information
72990diff --git a/include/linux/i2o.h b/include/linux/i2o.h
72991index d23c3c2..eb63c81 100644
72992--- a/include/linux/i2o.h
72993+++ b/include/linux/i2o.h
72994@@ -565,7 +565,7 @@ struct i2o_controller {
72995 struct i2o_device *exec; /* Executive */
72996 #if BITS_PER_LONG == 64
72997 spinlock_t context_list_lock; /* lock for context_list */
72998- atomic_t context_list_counter; /* needed for unique contexts */
72999+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
73000 struct list_head context_list; /* list of context id's
73001 and pointers */
73002 #endif
73003diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
73004index aff7ad8..3942bbd 100644
73005--- a/include/linux/if_pppox.h
73006+++ b/include/linux/if_pppox.h
73007@@ -76,7 +76,7 @@ struct pppox_proto {
73008 int (*ioctl)(struct socket *sock, unsigned int cmd,
73009 unsigned long arg);
73010 struct module *owner;
73011-};
73012+} __do_const;
73013
73014 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
73015 extern void unregister_pppox_proto(int proto_num);
73016diff --git a/include/linux/init.h b/include/linux/init.h
73017index 8618147..0821126 100644
73018--- a/include/linux/init.h
73019+++ b/include/linux/init.h
73020@@ -39,9 +39,36 @@
73021 * Also note, that this data cannot be "const".
73022 */
73023
73024+#ifdef MODULE
73025+#define add_init_latent_entropy
73026+#define add_devinit_latent_entropy
73027+#define add_cpuinit_latent_entropy
73028+#define add_meminit_latent_entropy
73029+#else
73030+#define add_init_latent_entropy __latent_entropy
73031+
73032+#ifdef CONFIG_HOTPLUG
73033+#define add_devinit_latent_entropy
73034+#else
73035+#define add_devinit_latent_entropy __latent_entropy
73036+#endif
73037+
73038+#ifdef CONFIG_HOTPLUG_CPU
73039+#define add_cpuinit_latent_entropy
73040+#else
73041+#define add_cpuinit_latent_entropy __latent_entropy
73042+#endif
73043+
73044+#ifdef CONFIG_MEMORY_HOTPLUG
73045+#define add_meminit_latent_entropy
73046+#else
73047+#define add_meminit_latent_entropy __latent_entropy
73048+#endif
73049+#endif
73050+
73051 /* These are for everybody (although not all archs will actually
73052 discard it in modules) */
73053-#define __init __section(.init.text) __cold notrace
73054+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
73055 #define __initdata __section(.init.data)
73056 #define __initconst __constsection(.init.rodata)
73057 #define __exitdata __section(.exit.data)
73058@@ -94,7 +121,7 @@
73059 #define __exit __section(.exit.text) __exitused __cold notrace
73060
73061 /* Used for HOTPLUG_CPU */
73062-#define __cpuinit __section(.cpuinit.text) __cold notrace
73063+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
73064 #define __cpuinitdata __section(.cpuinit.data)
73065 #define __cpuinitconst __constsection(.cpuinit.rodata)
73066 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
73067@@ -102,7 +129,7 @@
73068 #define __cpuexitconst __constsection(.cpuexit.rodata)
73069
73070 /* Used for MEMORY_HOTPLUG */
73071-#define __meminit __section(.meminit.text) __cold notrace
73072+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
73073 #define __meminitdata __section(.meminit.data)
73074 #define __meminitconst __constsection(.meminit.rodata)
73075 #define __memexit __section(.memexit.text) __exitused __cold notrace
73076diff --git a/include/linux/init_task.h b/include/linux/init_task.h
73077index 5cd0f09..c9f67cc 100644
73078--- a/include/linux/init_task.h
73079+++ b/include/linux/init_task.h
73080@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
73081
73082 #define INIT_TASK_COMM "swapper"
73083
73084+#ifdef CONFIG_X86
73085+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
73086+#else
73087+#define INIT_TASK_THREAD_INFO
73088+#endif
73089+
73090 /*
73091 * INIT_TASK is used to set up the first task table, touch at
73092 * your own risk!. Base=0, limit=0x1fffff (=2MB)
73093@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
73094 RCU_POINTER_INITIALIZER(cred, &init_cred), \
73095 .comm = INIT_TASK_COMM, \
73096 .thread = INIT_THREAD, \
73097+ INIT_TASK_THREAD_INFO \
73098 .fs = &init_fs, \
73099 .files = &init_files, \
73100 .signal = &init_signals, \
73101diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
73102index 5fa5afe..ac55b25 100644
73103--- a/include/linux/interrupt.h
73104+++ b/include/linux/interrupt.h
73105@@ -430,7 +430,7 @@ enum
73106 /* map softirq index to softirq name. update 'softirq_to_name' in
73107 * kernel/softirq.c when adding a new softirq.
73108 */
73109-extern char *softirq_to_name[NR_SOFTIRQS];
73110+extern const char * const softirq_to_name[NR_SOFTIRQS];
73111
73112 /* softirq mask and active fields moved to irq_cpustat_t in
73113 * asm/hardirq.h to get better cache usage. KAO
73114@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
73115
73116 struct softirq_action
73117 {
73118- void (*action)(struct softirq_action *);
73119-};
73120+ void (*action)(void);
73121+} __no_const;
73122
73123 asmlinkage void do_softirq(void);
73124 asmlinkage void __do_softirq(void);
73125-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
73126+extern void open_softirq(int nr, void (*action)(void));
73127 extern void softirq_init(void);
73128 extern void __raise_softirq_irqoff(unsigned int nr);
73129
73130diff --git a/include/linux/iommu.h b/include/linux/iommu.h
73131index 3aeb730..2177f39 100644
73132--- a/include/linux/iommu.h
73133+++ b/include/linux/iommu.h
73134@@ -113,7 +113,7 @@ struct iommu_ops {
73135 u32 (*domain_get_windows)(struct iommu_domain *domain);
73136
73137 unsigned long pgsize_bitmap;
73138-};
73139+} __do_const;
73140
73141 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
73142 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
73143diff --git a/include/linux/ioport.h b/include/linux/ioport.h
73144index 89b7c24..382af74 100644
73145--- a/include/linux/ioport.h
73146+++ b/include/linux/ioport.h
73147@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
73148 int adjust_resource(struct resource *res, resource_size_t start,
73149 resource_size_t size);
73150 resource_size_t resource_alignment(struct resource *res);
73151-static inline resource_size_t resource_size(const struct resource *res)
73152+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
73153 {
73154 return res->end - res->start + 1;
73155 }
73156diff --git a/include/linux/irq.h b/include/linux/irq.h
73157index bc4e066..50468a9 100644
73158--- a/include/linux/irq.h
73159+++ b/include/linux/irq.h
73160@@ -328,7 +328,8 @@ struct irq_chip {
73161 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
73162
73163 unsigned long flags;
73164-};
73165+} __do_const;
73166+typedef struct irq_chip __no_const irq_chip_no_const;
73167
73168 /*
73169 * irq_chip specific flags
73170diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
73171index 3e203eb..3fe68d0 100644
73172--- a/include/linux/irqchip/arm-gic.h
73173+++ b/include/linux/irqchip/arm-gic.h
73174@@ -59,9 +59,11 @@
73175
73176 #ifndef __ASSEMBLY__
73177
73178+#include <linux/irq.h>
73179+
73180 struct device_node;
73181
73182-extern struct irq_chip gic_arch_extn;
73183+extern irq_chip_no_const gic_arch_extn;
73184
73185 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
73186 u32 offset, struct device_node *);
73187diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
73188index 6883e19..e854fcb 100644
73189--- a/include/linux/kallsyms.h
73190+++ b/include/linux/kallsyms.h
73191@@ -15,7 +15,8 @@
73192
73193 struct module;
73194
73195-#ifdef CONFIG_KALLSYMS
73196+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
73197+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73198 /* Lookup the address for a symbol. Returns 0 if not found. */
73199 unsigned long kallsyms_lookup_name(const char *name);
73200
73201@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
73202 /* Stupid that this does nothing, but I didn't create this mess. */
73203 #define __print_symbol(fmt, addr)
73204 #endif /*CONFIG_KALLSYMS*/
73205+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
73206+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
73207+extern unsigned long kallsyms_lookup_name(const char *name);
73208+extern void __print_symbol(const char *fmt, unsigned long address);
73209+extern int sprint_backtrace(char *buffer, unsigned long address);
73210+extern int sprint_symbol(char *buffer, unsigned long address);
73211+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
73212+const char *kallsyms_lookup(unsigned long addr,
73213+ unsigned long *symbolsize,
73214+ unsigned long *offset,
73215+ char **modname, char *namebuf);
73216+extern int kallsyms_lookup_size_offset(unsigned long addr,
73217+ unsigned long *symbolsize,
73218+ unsigned long *offset);
73219+#endif
73220
73221 /* This macro allows us to keep printk typechecking */
73222 static __printf(1, 2)
73223diff --git a/include/linux/key-type.h b/include/linux/key-type.h
73224index 518a53a..5e28358 100644
73225--- a/include/linux/key-type.h
73226+++ b/include/linux/key-type.h
73227@@ -125,7 +125,7 @@ struct key_type {
73228 /* internal fields */
73229 struct list_head link; /* link in types list */
73230 struct lock_class_key lock_class; /* key->sem lock class */
73231-};
73232+} __do_const;
73233
73234 extern struct key_type key_type_keyring;
73235
73236diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
73237index c6e091b..a940adf 100644
73238--- a/include/linux/kgdb.h
73239+++ b/include/linux/kgdb.h
73240@@ -52,7 +52,7 @@ extern int kgdb_connected;
73241 extern int kgdb_io_module_registered;
73242
73243 extern atomic_t kgdb_setting_breakpoint;
73244-extern atomic_t kgdb_cpu_doing_single_step;
73245+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
73246
73247 extern struct task_struct *kgdb_usethread;
73248 extern struct task_struct *kgdb_contthread;
73249@@ -254,7 +254,7 @@ struct kgdb_arch {
73250 void (*correct_hw_break)(void);
73251
73252 void (*enable_nmi)(bool on);
73253-};
73254+} __do_const;
73255
73256 /**
73257 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
73258@@ -279,7 +279,7 @@ struct kgdb_io {
73259 void (*pre_exception) (void);
73260 void (*post_exception) (void);
73261 int is_console;
73262-};
73263+} __do_const;
73264
73265 extern struct kgdb_arch arch_kgdb_ops;
73266
73267diff --git a/include/linux/kmod.h b/include/linux/kmod.h
73268index 0555cc6..b16a7a4 100644
73269--- a/include/linux/kmod.h
73270+++ b/include/linux/kmod.h
73271@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
73272 * usually useless though. */
73273 extern __printf(2, 3)
73274 int __request_module(bool wait, const char *name, ...);
73275+extern __printf(3, 4)
73276+int ___request_module(bool wait, char *param_name, const char *name, ...);
73277 #define request_module(mod...) __request_module(true, mod)
73278 #define request_module_nowait(mod...) __request_module(false, mod)
73279 #define try_then_request_module(x, mod...) \
73280diff --git a/include/linux/kobject.h b/include/linux/kobject.h
73281index 939b112..ed6ed51 100644
73282--- a/include/linux/kobject.h
73283+++ b/include/linux/kobject.h
73284@@ -111,7 +111,7 @@ struct kobj_type {
73285 struct attribute **default_attrs;
73286 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
73287 const void *(*namespace)(struct kobject *kobj);
73288-};
73289+} __do_const;
73290
73291 struct kobj_uevent_env {
73292 char *envp[UEVENT_NUM_ENVP];
73293@@ -134,6 +134,7 @@ struct kobj_attribute {
73294 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
73295 const char *buf, size_t count);
73296 };
73297+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
73298
73299 extern const struct sysfs_ops kobj_sysfs_ops;
73300
73301diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
73302index f66b065..c2c29b4 100644
73303--- a/include/linux/kobject_ns.h
73304+++ b/include/linux/kobject_ns.h
73305@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
73306 const void *(*netlink_ns)(struct sock *sk);
73307 const void *(*initial_ns)(void);
73308 void (*drop_ns)(void *);
73309-};
73310+} __do_const;
73311
73312 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
73313 int kobj_ns_type_registered(enum kobj_ns_type type);
73314diff --git a/include/linux/kref.h b/include/linux/kref.h
73315index 484604d..0f6c5b6 100644
73316--- a/include/linux/kref.h
73317+++ b/include/linux/kref.h
73318@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
73319 static inline int kref_sub(struct kref *kref, unsigned int count,
73320 void (*release)(struct kref *kref))
73321 {
73322- WARN_ON(release == NULL);
73323+ BUG_ON(release == NULL);
73324
73325 if (atomic_sub_and_test((int) count, &kref->refcount)) {
73326 release(kref);
73327diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
73328index 8db53cf..c21121d 100644
73329--- a/include/linux/kvm_host.h
73330+++ b/include/linux/kvm_host.h
73331@@ -444,7 +444,7 @@ static inline void kvm_irqfd_exit(void)
73332 {
73333 }
73334 #endif
73335-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
73336+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
73337 struct module *module);
73338 void kvm_exit(void);
73339
73340@@ -616,7 +616,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
73341 struct kvm_guest_debug *dbg);
73342 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
73343
73344-int kvm_arch_init(void *opaque);
73345+int kvm_arch_init(const void *opaque);
73346 void kvm_arch_exit(void);
73347
73348 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
73349diff --git a/include/linux/libata.h b/include/linux/libata.h
73350index eae7a05..2cdd875 100644
73351--- a/include/linux/libata.h
73352+++ b/include/linux/libata.h
73353@@ -919,7 +919,7 @@ struct ata_port_operations {
73354 * fields must be pointers.
73355 */
73356 const struct ata_port_operations *inherits;
73357-};
73358+} __do_const;
73359
73360 struct ata_port_info {
73361 unsigned long flags;
73362diff --git a/include/linux/list.h b/include/linux/list.h
73363index b83e565..baa6c1d 100644
73364--- a/include/linux/list.h
73365+++ b/include/linux/list.h
73366@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
73367 extern void list_del(struct list_head *entry);
73368 #endif
73369
73370+extern void __pax_list_add(struct list_head *new,
73371+ struct list_head *prev,
73372+ struct list_head *next);
73373+static inline void pax_list_add(struct list_head *new, struct list_head *head)
73374+{
73375+ __pax_list_add(new, head, head->next);
73376+}
73377+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
73378+{
73379+ __pax_list_add(new, head->prev, head);
73380+}
73381+extern void pax_list_del(struct list_head *entry);
73382+
73383 /**
73384 * list_replace - replace old entry by new one
73385 * @old : the element to be replaced
73386@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
73387 INIT_LIST_HEAD(entry);
73388 }
73389
73390+extern void pax_list_del_init(struct list_head *entry);
73391+
73392 /**
73393 * list_move - delete from one list and add as another's head
73394 * @list: the entry to move
73395diff --git a/include/linux/math64.h b/include/linux/math64.h
73396index 2913b86..8dcbb1e 100644
73397--- a/include/linux/math64.h
73398+++ b/include/linux/math64.h
73399@@ -15,7 +15,7 @@
73400 * This is commonly provided by 32bit archs to provide an optimized 64bit
73401 * divide.
73402 */
73403-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
73404+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
73405 {
73406 *remainder = dividend % divisor;
73407 return dividend / divisor;
73408@@ -33,7 +33,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
73409 /**
73410 * div64_u64 - unsigned 64bit divide with 64bit divisor
73411 */
73412-static inline u64 div64_u64(u64 dividend, u64 divisor)
73413+static inline u64 __intentional_overflow(0) div64_u64(u64 dividend, u64 divisor)
73414 {
73415 return dividend / divisor;
73416 }
73417@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
73418 #define div64_ul(x, y) div_u64((x), (y))
73419
73420 #ifndef div_u64_rem
73421-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
73422+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
73423 {
73424 *remainder = do_div(dividend, divisor);
73425 return dividend;
73426@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
73427 * divide.
73428 */
73429 #ifndef div_u64
73430-static inline u64 div_u64(u64 dividend, u32 divisor)
73431+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
73432 {
73433 u32 remainder;
73434 return div_u64_rem(dividend, divisor, &remainder);
73435diff --git a/include/linux/mm.h b/include/linux/mm.h
73436index e0c8528..bcf0c29 100644
73437--- a/include/linux/mm.h
73438+++ b/include/linux/mm.h
73439@@ -104,6 +104,11 @@ extern unsigned int kobjsize(const void *objp);
73440 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
73441 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
73442 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
73443+
73444+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
73445+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
73446+#endif
73447+
73448 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
73449
73450 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
73451@@ -205,8 +210,8 @@ struct vm_operations_struct {
73452 /* called by access_process_vm when get_user_pages() fails, typically
73453 * for use by special VMAs that can switch between memory and hardware
73454 */
73455- int (*access)(struct vm_area_struct *vma, unsigned long addr,
73456- void *buf, int len, int write);
73457+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
73458+ void *buf, size_t len, int write);
73459 #ifdef CONFIG_NUMA
73460 /*
73461 * set_policy() op must add a reference to any non-NULL @new mempolicy
73462@@ -236,6 +241,7 @@ struct vm_operations_struct {
73463 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
73464 unsigned long size, pgoff_t pgoff);
73465 };
73466+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
73467
73468 struct mmu_gather;
73469 struct inode;
73470@@ -980,8 +986,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
73471 unsigned long *pfn);
73472 int follow_phys(struct vm_area_struct *vma, unsigned long address,
73473 unsigned int flags, unsigned long *prot, resource_size_t *phys);
73474-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
73475- void *buf, int len, int write);
73476+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
73477+ void *buf, size_t len, int write);
73478
73479 static inline void unmap_shared_mapping_range(struct address_space *mapping,
73480 loff_t const holebegin, loff_t const holelen)
73481@@ -1020,9 +1026,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
73482 }
73483 #endif
73484
73485-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
73486-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
73487- void *buf, int len, int write);
73488+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
73489+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
73490+ void *buf, size_t len, int write);
73491
73492 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
73493 unsigned long start, unsigned long nr_pages,
73494@@ -1053,34 +1059,6 @@ int set_page_dirty(struct page *page);
73495 int set_page_dirty_lock(struct page *page);
73496 int clear_page_dirty_for_io(struct page *page);
73497
73498-/* Is the vma a continuation of the stack vma above it? */
73499-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
73500-{
73501- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
73502-}
73503-
73504-static inline int stack_guard_page_start(struct vm_area_struct *vma,
73505- unsigned long addr)
73506-{
73507- return (vma->vm_flags & VM_GROWSDOWN) &&
73508- (vma->vm_start == addr) &&
73509- !vma_growsdown(vma->vm_prev, addr);
73510-}
73511-
73512-/* Is the vma a continuation of the stack vma below it? */
73513-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
73514-{
73515- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
73516-}
73517-
73518-static inline int stack_guard_page_end(struct vm_area_struct *vma,
73519- unsigned long addr)
73520-{
73521- return (vma->vm_flags & VM_GROWSUP) &&
73522- (vma->vm_end == addr) &&
73523- !vma_growsup(vma->vm_next, addr);
73524-}
73525-
73526 extern pid_t
73527 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
73528
73529@@ -1180,6 +1158,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
73530 }
73531 #endif
73532
73533+#ifdef CONFIG_MMU
73534+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
73535+#else
73536+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
73537+{
73538+ return __pgprot(0);
73539+}
73540+#endif
73541+
73542 int vma_wants_writenotify(struct vm_area_struct *vma);
73543
73544 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
73545@@ -1198,8 +1185,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
73546 {
73547 return 0;
73548 }
73549+
73550+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
73551+ unsigned long address)
73552+{
73553+ return 0;
73554+}
73555 #else
73556 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
73557+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
73558 #endif
73559
73560 #ifdef __PAGETABLE_PMD_FOLDED
73561@@ -1208,8 +1202,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
73562 {
73563 return 0;
73564 }
73565+
73566+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
73567+ unsigned long address)
73568+{
73569+ return 0;
73570+}
73571 #else
73572 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
73573+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
73574 #endif
73575
73576 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
73577@@ -1227,11 +1228,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
73578 NULL: pud_offset(pgd, address);
73579 }
73580
73581+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
73582+{
73583+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
73584+ NULL: pud_offset(pgd, address);
73585+}
73586+
73587 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
73588 {
73589 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
73590 NULL: pmd_offset(pud, address);
73591 }
73592+
73593+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
73594+{
73595+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
73596+ NULL: pmd_offset(pud, address);
73597+}
73598 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
73599
73600 #if USE_SPLIT_PTLOCKS
73601@@ -1517,6 +1530,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
73602 unsigned long len, unsigned long prot, unsigned long flags,
73603 unsigned long pgoff, unsigned long *populate);
73604 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
73605+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
73606
73607 #ifdef CONFIG_MMU
73608 extern int __mm_populate(unsigned long addr, unsigned long len,
73609@@ -1545,10 +1559,11 @@ struct vm_unmapped_area_info {
73610 unsigned long high_limit;
73611 unsigned long align_mask;
73612 unsigned long align_offset;
73613+ unsigned long threadstack_offset;
73614 };
73615
73616-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
73617-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
73618+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
73619+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
73620
73621 /*
73622 * Search for an unmapped address range.
73623@@ -1560,7 +1575,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
73624 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
73625 */
73626 static inline unsigned long
73627-vm_unmapped_area(struct vm_unmapped_area_info *info)
73628+vm_unmapped_area(const struct vm_unmapped_area_info *info)
73629 {
73630 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
73631 return unmapped_area(info);
73632@@ -1623,6 +1638,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
73633 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
73634 struct vm_area_struct **pprev);
73635
73636+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
73637+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
73638+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
73639+
73640 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
73641 NULL if none. Assume start_addr < end_addr. */
73642 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
73643@@ -1651,15 +1670,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
73644 return vma;
73645 }
73646
73647-#ifdef CONFIG_MMU
73648-pgprot_t vm_get_page_prot(unsigned long vm_flags);
73649-#else
73650-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
73651-{
73652- return __pgprot(0);
73653-}
73654-#endif
73655-
73656 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
73657 unsigned long change_prot_numa(struct vm_area_struct *vma,
73658 unsigned long start, unsigned long end);
73659@@ -1711,6 +1721,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
73660 static inline void vm_stat_account(struct mm_struct *mm,
73661 unsigned long flags, struct file *file, long pages)
73662 {
73663+
73664+#ifdef CONFIG_PAX_RANDMMAP
73665+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
73666+#endif
73667+
73668 mm->total_vm += pages;
73669 }
73670 #endif /* CONFIG_PROC_FS */
73671@@ -1791,7 +1806,7 @@ extern int unpoison_memory(unsigned long pfn);
73672 extern int sysctl_memory_failure_early_kill;
73673 extern int sysctl_memory_failure_recovery;
73674 extern void shake_page(struct page *p, int access);
73675-extern atomic_long_t num_poisoned_pages;
73676+extern atomic_long_unchecked_t num_poisoned_pages;
73677 extern int soft_offline_page(struct page *page, int flags);
73678
73679 extern void dump_page(struct page *page);
73680@@ -1828,5 +1843,11 @@ void __init setup_nr_node_ids(void);
73681 static inline void setup_nr_node_ids(void) {}
73682 #endif
73683
73684+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73685+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
73686+#else
73687+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
73688+#endif
73689+
73690 #endif /* __KERNEL__ */
73691 #endif /* _LINUX_MM_H */
73692diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
73693index ace9a5f..81bdb59 100644
73694--- a/include/linux/mm_types.h
73695+++ b/include/linux/mm_types.h
73696@@ -289,6 +289,8 @@ struct vm_area_struct {
73697 #ifdef CONFIG_NUMA
73698 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
73699 #endif
73700+
73701+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
73702 };
73703
73704 struct core_thread {
73705@@ -437,6 +439,24 @@ struct mm_struct {
73706 int first_nid;
73707 #endif
73708 struct uprobes_state uprobes_state;
73709+
73710+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
73711+ unsigned long pax_flags;
73712+#endif
73713+
73714+#ifdef CONFIG_PAX_DLRESOLVE
73715+ unsigned long call_dl_resolve;
73716+#endif
73717+
73718+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
73719+ unsigned long call_syscall;
73720+#endif
73721+
73722+#ifdef CONFIG_PAX_ASLR
73723+ unsigned long delta_mmap; /* randomized offset */
73724+ unsigned long delta_stack; /* randomized offset */
73725+#endif
73726+
73727 };
73728
73729 /* first nid will either be a valid NID or one of these values */
73730diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
73731index c5d5278..f0b68c8 100644
73732--- a/include/linux/mmiotrace.h
73733+++ b/include/linux/mmiotrace.h
73734@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
73735 /* Called from ioremap.c */
73736 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
73737 void __iomem *addr);
73738-extern void mmiotrace_iounmap(volatile void __iomem *addr);
73739+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
73740
73741 /* For anyone to insert markers. Remember trailing newline. */
73742 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
73743@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
73744 {
73745 }
73746
73747-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
73748+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
73749 {
73750 }
73751
73752diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
73753index 5c76737..61f518e 100644
73754--- a/include/linux/mmzone.h
73755+++ b/include/linux/mmzone.h
73756@@ -396,7 +396,7 @@ struct zone {
73757 unsigned long flags; /* zone flags, see below */
73758
73759 /* Zone statistics */
73760- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
73761+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
73762
73763 /*
73764 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
73765diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
73766index b508016..237cfe5 100644
73767--- a/include/linux/mod_devicetable.h
73768+++ b/include/linux/mod_devicetable.h
73769@@ -13,7 +13,7 @@
73770 typedef unsigned long kernel_ulong_t;
73771 #endif
73772
73773-#define PCI_ANY_ID (~0)
73774+#define PCI_ANY_ID ((__u16)~0)
73775
73776 struct pci_device_id {
73777 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
73778@@ -139,7 +139,7 @@ struct usb_device_id {
73779 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
73780 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
73781
73782-#define HID_ANY_ID (~0)
73783+#define HID_ANY_ID (~0U)
73784 #define HID_BUS_ANY 0xffff
73785 #define HID_GROUP_ANY 0x0000
73786
73787@@ -465,7 +465,7 @@ struct dmi_system_id {
73788 const char *ident;
73789 struct dmi_strmatch matches[4];
73790 void *driver_data;
73791-};
73792+} __do_const;
73793 /*
73794 * struct dmi_device_id appears during expansion of
73795 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
73796diff --git a/include/linux/module.h b/include/linux/module.h
73797index 46f1ea0..a34ca37 100644
73798--- a/include/linux/module.h
73799+++ b/include/linux/module.h
73800@@ -17,9 +17,11 @@
73801 #include <linux/moduleparam.h>
73802 #include <linux/tracepoint.h>
73803 #include <linux/export.h>
73804+#include <linux/fs.h>
73805
73806 #include <linux/percpu.h>
73807 #include <asm/module.h>
73808+#include <asm/pgtable.h>
73809
73810 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
73811 #define MODULE_SIG_STRING "~Module signature appended~\n"
73812@@ -54,12 +56,13 @@ struct module_attribute {
73813 int (*test)(struct module *);
73814 void (*free)(struct module *);
73815 };
73816+typedef struct module_attribute __no_const module_attribute_no_const;
73817
73818 struct module_version_attribute {
73819 struct module_attribute mattr;
73820 const char *module_name;
73821 const char *version;
73822-} __attribute__ ((__aligned__(sizeof(void *))));
73823+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
73824
73825 extern ssize_t __modver_version_show(struct module_attribute *,
73826 struct module_kobject *, char *);
73827@@ -232,7 +235,7 @@ struct module
73828
73829 /* Sysfs stuff. */
73830 struct module_kobject mkobj;
73831- struct module_attribute *modinfo_attrs;
73832+ module_attribute_no_const *modinfo_attrs;
73833 const char *version;
73834 const char *srcversion;
73835 struct kobject *holders_dir;
73836@@ -281,19 +284,16 @@ struct module
73837 int (*init)(void);
73838
73839 /* If this is non-NULL, vfree after init() returns */
73840- void *module_init;
73841+ void *module_init_rx, *module_init_rw;
73842
73843 /* Here is the actual code + data, vfree'd on unload. */
73844- void *module_core;
73845+ void *module_core_rx, *module_core_rw;
73846
73847 /* Here are the sizes of the init and core sections */
73848- unsigned int init_size, core_size;
73849+ unsigned int init_size_rw, core_size_rw;
73850
73851 /* The size of the executable code in each section. */
73852- unsigned int init_text_size, core_text_size;
73853-
73854- /* Size of RO sections of the module (text+rodata) */
73855- unsigned int init_ro_size, core_ro_size;
73856+ unsigned int init_size_rx, core_size_rx;
73857
73858 /* Arch-specific module values */
73859 struct mod_arch_specific arch;
73860@@ -349,6 +349,10 @@ struct module
73861 #ifdef CONFIG_EVENT_TRACING
73862 struct ftrace_event_call **trace_events;
73863 unsigned int num_trace_events;
73864+ struct file_operations trace_id;
73865+ struct file_operations trace_enable;
73866+ struct file_operations trace_format;
73867+ struct file_operations trace_filter;
73868 #endif
73869 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
73870 unsigned int num_ftrace_callsites;
73871@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
73872 bool is_module_percpu_address(unsigned long addr);
73873 bool is_module_text_address(unsigned long addr);
73874
73875+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
73876+{
73877+
73878+#ifdef CONFIG_PAX_KERNEXEC
73879+ if (ktla_ktva(addr) >= (unsigned long)start &&
73880+ ktla_ktva(addr) < (unsigned long)start + size)
73881+ return 1;
73882+#endif
73883+
73884+ return ((void *)addr >= start && (void *)addr < start + size);
73885+}
73886+
73887+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
73888+{
73889+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
73890+}
73891+
73892+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
73893+{
73894+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
73895+}
73896+
73897+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
73898+{
73899+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
73900+}
73901+
73902+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
73903+{
73904+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
73905+}
73906+
73907 static inline int within_module_core(unsigned long addr, const struct module *mod)
73908 {
73909- return (unsigned long)mod->module_core <= addr &&
73910- addr < (unsigned long)mod->module_core + mod->core_size;
73911+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
73912 }
73913
73914 static inline int within_module_init(unsigned long addr, const struct module *mod)
73915 {
73916- return (unsigned long)mod->module_init <= addr &&
73917- addr < (unsigned long)mod->module_init + mod->init_size;
73918+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
73919 }
73920
73921 /* Search for module by name: must hold module_mutex. */
73922diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
73923index 560ca53..ef621ef 100644
73924--- a/include/linux/moduleloader.h
73925+++ b/include/linux/moduleloader.h
73926@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
73927 sections. Returns NULL on failure. */
73928 void *module_alloc(unsigned long size);
73929
73930+#ifdef CONFIG_PAX_KERNEXEC
73931+void *module_alloc_exec(unsigned long size);
73932+#else
73933+#define module_alloc_exec(x) module_alloc(x)
73934+#endif
73935+
73936 /* Free memory returned from module_alloc. */
73937 void module_free(struct module *mod, void *module_region);
73938
73939+#ifdef CONFIG_PAX_KERNEXEC
73940+void module_free_exec(struct module *mod, void *module_region);
73941+#else
73942+#define module_free_exec(x, y) module_free((x), (y))
73943+#endif
73944+
73945 /*
73946 * Apply the given relocation to the (simplified) ELF. Return -error
73947 * or 0.
73948@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
73949 unsigned int relsec,
73950 struct module *me)
73951 {
73952+#ifdef CONFIG_MODULES
73953 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
73954+#endif
73955 return -ENOEXEC;
73956 }
73957 #endif
73958@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
73959 unsigned int relsec,
73960 struct module *me)
73961 {
73962+#ifdef CONFIG_MODULES
73963 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
73964+#endif
73965 return -ENOEXEC;
73966 }
73967 #endif
73968diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
73969index 137b419..fe663ec 100644
73970--- a/include/linux/moduleparam.h
73971+++ b/include/linux/moduleparam.h
73972@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
73973 * @len is usually just sizeof(string).
73974 */
73975 #define module_param_string(name, string, len, perm) \
73976- static const struct kparam_string __param_string_##name \
73977+ static const struct kparam_string __param_string_##name __used \
73978 = { len, string }; \
73979 __module_param_call(MODULE_PARAM_PREFIX, name, \
73980 &param_ops_string, \
73981@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
73982 */
73983 #define module_param_array_named(name, array, type, nump, perm) \
73984 param_check_##type(name, &(array)[0]); \
73985- static const struct kparam_array __param_arr_##name \
73986+ static const struct kparam_array __param_arr_##name __used \
73987 = { .max = ARRAY_SIZE(array), .num = nump, \
73988 .ops = &param_ops_##type, \
73989 .elemsize = sizeof(array[0]), .elem = array }; \
73990diff --git a/include/linux/namei.h b/include/linux/namei.h
73991index 5a5ff57..5ae5070 100644
73992--- a/include/linux/namei.h
73993+++ b/include/linux/namei.h
73994@@ -19,7 +19,7 @@ struct nameidata {
73995 unsigned seq;
73996 int last_type;
73997 unsigned depth;
73998- char *saved_names[MAX_NESTED_LINKS + 1];
73999+ const char *saved_names[MAX_NESTED_LINKS + 1];
74000 };
74001
74002 /*
74003@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
74004
74005 extern void nd_jump_link(struct nameidata *nd, struct path *path);
74006
74007-static inline void nd_set_link(struct nameidata *nd, char *path)
74008+static inline void nd_set_link(struct nameidata *nd, const char *path)
74009 {
74010 nd->saved_names[nd->depth] = path;
74011 }
74012
74013-static inline char *nd_get_link(struct nameidata *nd)
74014+static inline const char *nd_get_link(const struct nameidata *nd)
74015 {
74016 return nd->saved_names[nd->depth];
74017 }
74018diff --git a/include/linux/net.h b/include/linux/net.h
74019index 99c9f0c..e1cf296 100644
74020--- a/include/linux/net.h
74021+++ b/include/linux/net.h
74022@@ -183,7 +183,7 @@ struct net_proto_family {
74023 int (*create)(struct net *net, struct socket *sock,
74024 int protocol, int kern);
74025 struct module *owner;
74026-};
74027+} __do_const;
74028
74029 struct iovec;
74030 struct kvec;
74031diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
74032index 96e4c21..9cc8278 100644
74033--- a/include/linux/netdevice.h
74034+++ b/include/linux/netdevice.h
74035@@ -1026,6 +1026,7 @@ struct net_device_ops {
74036 int (*ndo_change_carrier)(struct net_device *dev,
74037 bool new_carrier);
74038 };
74039+typedef struct net_device_ops __no_const net_device_ops_no_const;
74040
74041 /*
74042 * The DEVICE structure.
74043@@ -1094,7 +1095,7 @@ struct net_device {
74044 int iflink;
74045
74046 struct net_device_stats stats;
74047- atomic_long_t rx_dropped; /* dropped packets by core network
74048+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
74049 * Do not use this in drivers.
74050 */
74051
74052diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
74053index 0060fde..481c6ae 100644
74054--- a/include/linux/netfilter.h
74055+++ b/include/linux/netfilter.h
74056@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
74057 #endif
74058 /* Use the module struct to lock set/get code in place */
74059 struct module *owner;
74060-};
74061+} __do_const;
74062
74063 /* Function to register/unregister hook points. */
74064 int nf_register_hook(struct nf_hook_ops *reg);
74065diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
74066index d80e275..c3510b8 100644
74067--- a/include/linux/netfilter/ipset/ip_set.h
74068+++ b/include/linux/netfilter/ipset/ip_set.h
74069@@ -124,7 +124,7 @@ struct ip_set_type_variant {
74070 /* Return true if "b" set is the same as "a"
74071 * according to the create set parameters */
74072 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
74073-};
74074+} __do_const;
74075
74076 /* The core set type structure */
74077 struct ip_set_type {
74078diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
74079index cadb740..d7c37c0 100644
74080--- a/include/linux/netfilter/nfnetlink.h
74081+++ b/include/linux/netfilter/nfnetlink.h
74082@@ -16,7 +16,7 @@ struct nfnl_callback {
74083 const struct nlattr * const cda[]);
74084 const struct nla_policy *policy; /* netlink attribute policy */
74085 const u_int16_t attr_count; /* number of nlattr's */
74086-};
74087+} __do_const;
74088
74089 struct nfnetlink_subsystem {
74090 const char *name;
74091diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
74092new file mode 100644
74093index 0000000..33f4af8
74094--- /dev/null
74095+++ b/include/linux/netfilter/xt_gradm.h
74096@@ -0,0 +1,9 @@
74097+#ifndef _LINUX_NETFILTER_XT_GRADM_H
74098+#define _LINUX_NETFILTER_XT_GRADM_H 1
74099+
74100+struct xt_gradm_mtinfo {
74101+ __u16 flags;
74102+ __u16 invflags;
74103+};
74104+
74105+#endif
74106diff --git a/include/linux/nls.h b/include/linux/nls.h
74107index 5dc635f..35f5e11 100644
74108--- a/include/linux/nls.h
74109+++ b/include/linux/nls.h
74110@@ -31,7 +31,7 @@ struct nls_table {
74111 const unsigned char *charset2upper;
74112 struct module *owner;
74113 struct nls_table *next;
74114-};
74115+} __do_const;
74116
74117 /* this value hold the maximum octet of charset */
74118 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
74119diff --git a/include/linux/notifier.h b/include/linux/notifier.h
74120index d14a4c3..a078786 100644
74121--- a/include/linux/notifier.h
74122+++ b/include/linux/notifier.h
74123@@ -54,7 +54,8 @@ struct notifier_block {
74124 notifier_fn_t notifier_call;
74125 struct notifier_block __rcu *next;
74126 int priority;
74127-};
74128+} __do_const;
74129+typedef struct notifier_block __no_const notifier_block_no_const;
74130
74131 struct atomic_notifier_head {
74132 spinlock_t lock;
74133diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
74134index a4c5624..79d6d88 100644
74135--- a/include/linux/oprofile.h
74136+++ b/include/linux/oprofile.h
74137@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
74138 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
74139 char const * name, ulong * val);
74140
74141-/** Create a file for read-only access to an atomic_t. */
74142+/** Create a file for read-only access to an atomic_unchecked_t. */
74143 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
74144- char const * name, atomic_t * val);
74145+ char const * name, atomic_unchecked_t * val);
74146
74147 /** create a directory */
74148 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
74149diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
74150index 8db71dc..a76bf2c 100644
74151--- a/include/linux/pci_hotplug.h
74152+++ b/include/linux/pci_hotplug.h
74153@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
74154 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
74155 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
74156 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
74157-};
74158+} __do_const;
74159+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
74160
74161 /**
74162 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
74163diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
74164index c5b6dbf..b124155 100644
74165--- a/include/linux/perf_event.h
74166+++ b/include/linux/perf_event.h
74167@@ -318,8 +318,8 @@ struct perf_event {
74168
74169 enum perf_event_active_state state;
74170 unsigned int attach_state;
74171- local64_t count;
74172- atomic64_t child_count;
74173+ local64_t count; /* PaX: fix it one day */
74174+ atomic64_unchecked_t child_count;
74175
74176 /*
74177 * These are the total time in nanoseconds that the event
74178@@ -370,8 +370,8 @@ struct perf_event {
74179 * These accumulate total time (in nanoseconds) that children
74180 * events have been enabled and running, respectively.
74181 */
74182- atomic64_t child_total_time_enabled;
74183- atomic64_t child_total_time_running;
74184+ atomic64_unchecked_t child_total_time_enabled;
74185+ atomic64_unchecked_t child_total_time_running;
74186
74187 /*
74188 * Protect attach/detach and child_list:
74189@@ -692,7 +692,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
74190 entry->ip[entry->nr++] = ip;
74191 }
74192
74193-extern int sysctl_perf_event_paranoid;
74194+extern int sysctl_perf_event_legitimately_concerned;
74195 extern int sysctl_perf_event_mlock;
74196 extern int sysctl_perf_event_sample_rate;
74197
74198@@ -700,19 +700,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
74199 void __user *buffer, size_t *lenp,
74200 loff_t *ppos);
74201
74202+static inline bool perf_paranoid_any(void)
74203+{
74204+ return sysctl_perf_event_legitimately_concerned > 2;
74205+}
74206+
74207 static inline bool perf_paranoid_tracepoint_raw(void)
74208 {
74209- return sysctl_perf_event_paranoid > -1;
74210+ return sysctl_perf_event_legitimately_concerned > -1;
74211 }
74212
74213 static inline bool perf_paranoid_cpu(void)
74214 {
74215- return sysctl_perf_event_paranoid > 0;
74216+ return sysctl_perf_event_legitimately_concerned > 0;
74217 }
74218
74219 static inline bool perf_paranoid_kernel(void)
74220 {
74221- return sysctl_perf_event_paranoid > 1;
74222+ return sysctl_perf_event_legitimately_concerned > 1;
74223 }
74224
74225 extern void perf_event_init(void);
74226@@ -806,7 +811,7 @@ static inline void perf_restore_debug_store(void) { }
74227 */
74228 #define perf_cpu_notifier(fn) \
74229 do { \
74230- static struct notifier_block fn##_nb __cpuinitdata = \
74231+ static struct notifier_block fn##_nb = \
74232 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
74233 unsigned long cpu = smp_processor_id(); \
74234 unsigned long flags; \
74235@@ -826,7 +831,7 @@ struct perf_pmu_events_attr {
74236 struct device_attribute attr;
74237 u64 id;
74238 const char *event_str;
74239-};
74240+} __do_const;
74241
74242 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
74243 static struct perf_pmu_events_attr _var = { \
74244diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
74245index b8809fe..ae4ccd0 100644
74246--- a/include/linux/pipe_fs_i.h
74247+++ b/include/linux/pipe_fs_i.h
74248@@ -47,10 +47,10 @@ struct pipe_inode_info {
74249 struct mutex mutex;
74250 wait_queue_head_t wait;
74251 unsigned int nrbufs, curbuf, buffers;
74252- unsigned int readers;
74253- unsigned int writers;
74254- unsigned int files;
74255- unsigned int waiting_writers;
74256+ atomic_t readers;
74257+ atomic_t writers;
74258+ atomic_t files;
74259+ atomic_t waiting_writers;
74260 unsigned int r_counter;
74261 unsigned int w_counter;
74262 struct page *tmp_page;
74263diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
74264index 5f28cae..3d23723 100644
74265--- a/include/linux/platform_data/usb-ehci-s5p.h
74266+++ b/include/linux/platform_data/usb-ehci-s5p.h
74267@@ -14,7 +14,7 @@
74268 struct s5p_ehci_platdata {
74269 int (*phy_init)(struct platform_device *pdev, int type);
74270 int (*phy_exit)(struct platform_device *pdev, int type);
74271-};
74272+} __no_const;
74273
74274 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
74275
74276diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
74277index c256c59..8ea94c7 100644
74278--- a/include/linux/platform_data/usb-ohci-exynos.h
74279+++ b/include/linux/platform_data/usb-ohci-exynos.h
74280@@ -14,7 +14,7 @@
74281 struct exynos4_ohci_platdata {
74282 int (*phy_init)(struct platform_device *pdev, int type);
74283 int (*phy_exit)(struct platform_device *pdev, int type);
74284-};
74285+} __no_const;
74286
74287 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
74288
74289diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
74290index 7c1d252..c5c773e 100644
74291--- a/include/linux/pm_domain.h
74292+++ b/include/linux/pm_domain.h
74293@@ -48,7 +48,7 @@ struct gpd_dev_ops {
74294
74295 struct gpd_cpu_data {
74296 unsigned int saved_exit_latency;
74297- struct cpuidle_state *idle_state;
74298+ cpuidle_state_no_const *idle_state;
74299 };
74300
74301 struct generic_pm_domain {
74302diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
74303index 7d7e09e..8671ef8 100644
74304--- a/include/linux/pm_runtime.h
74305+++ b/include/linux/pm_runtime.h
74306@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
74307
74308 static inline void pm_runtime_mark_last_busy(struct device *dev)
74309 {
74310- ACCESS_ONCE(dev->power.last_busy) = jiffies;
74311+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
74312 }
74313
74314 #else /* !CONFIG_PM_RUNTIME */
74315diff --git a/include/linux/pnp.h b/include/linux/pnp.h
74316index 195aafc..49a7bc2 100644
74317--- a/include/linux/pnp.h
74318+++ b/include/linux/pnp.h
74319@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
74320 struct pnp_fixup {
74321 char id[7];
74322 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
74323-};
74324+} __do_const;
74325
74326 /* config parameters */
74327 #define PNP_CONFIG_NORMAL 0x0001
74328diff --git a/include/linux/poison.h b/include/linux/poison.h
74329index 2110a81..13a11bb 100644
74330--- a/include/linux/poison.h
74331+++ b/include/linux/poison.h
74332@@ -19,8 +19,8 @@
74333 * under normal circumstances, used to verify that nobody uses
74334 * non-initialized list entries.
74335 */
74336-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
74337-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
74338+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
74339+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
74340
74341 /********** include/linux/timer.h **********/
74342 /*
74343diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
74344index c0f44c2..1572583 100644
74345--- a/include/linux/power/smartreflex.h
74346+++ b/include/linux/power/smartreflex.h
74347@@ -238,7 +238,7 @@ struct omap_sr_class_data {
74348 int (*notify)(struct omap_sr *sr, u32 status);
74349 u8 notify_flags;
74350 u8 class_type;
74351-};
74352+} __do_const;
74353
74354 /**
74355 * struct omap_sr_nvalue_table - Smartreflex n-target value info
74356diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
74357index 4ea1d37..80f4b33 100644
74358--- a/include/linux/ppp-comp.h
74359+++ b/include/linux/ppp-comp.h
74360@@ -84,7 +84,7 @@ struct compressor {
74361 struct module *owner;
74362 /* Extra skb space needed by the compressor algorithm */
74363 unsigned int comp_extra;
74364-};
74365+} __do_const;
74366
74367 /*
74368 * The return value from decompress routine is the length of the
74369diff --git a/include/linux/preempt.h b/include/linux/preempt.h
74370index f5d4723..a6ea2fa 100644
74371--- a/include/linux/preempt.h
74372+++ b/include/linux/preempt.h
74373@@ -18,8 +18,13 @@
74374 # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
74375 #endif
74376
74377+#define raw_add_preempt_count(val) do { preempt_count() += (val); } while (0)
74378+#define raw_sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
74379+
74380 #define inc_preempt_count() add_preempt_count(1)
74381+#define raw_inc_preempt_count() raw_add_preempt_count(1)
74382 #define dec_preempt_count() sub_preempt_count(1)
74383+#define raw_dec_preempt_count() raw_sub_preempt_count(1)
74384
74385 #define preempt_count() (current_thread_info()->preempt_count)
74386
74387@@ -64,6 +69,12 @@ do { \
74388 barrier(); \
74389 } while (0)
74390
74391+#define raw_preempt_disable() \
74392+do { \
74393+ raw_inc_preempt_count(); \
74394+ barrier(); \
74395+} while (0)
74396+
74397 #define sched_preempt_enable_no_resched() \
74398 do { \
74399 barrier(); \
74400@@ -72,6 +83,12 @@ do { \
74401
74402 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
74403
74404+#define raw_preempt_enable_no_resched() \
74405+do { \
74406+ barrier(); \
74407+ raw_dec_preempt_count(); \
74408+} while (0)
74409+
74410 #define preempt_enable() \
74411 do { \
74412 preempt_enable_no_resched(); \
74413@@ -116,8 +133,10 @@ do { \
74414 * region.
74415 */
74416 #define preempt_disable() barrier()
74417+#define raw_preempt_disable() barrier()
74418 #define sched_preempt_enable_no_resched() barrier()
74419 #define preempt_enable_no_resched() barrier()
74420+#define raw_preempt_enable_no_resched() barrier()
74421 #define preempt_enable() barrier()
74422
74423 #define preempt_disable_notrace() barrier()
74424diff --git a/include/linux/printk.h b/include/linux/printk.h
74425index 22c7052..ad3fa0a 100644
74426--- a/include/linux/printk.h
74427+++ b/include/linux/printk.h
74428@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
74429 void early_printk(const char *s, ...) { }
74430 #endif
74431
74432+extern int kptr_restrict;
74433+
74434 #ifdef CONFIG_PRINTK
74435 asmlinkage __printf(5, 0)
74436 int vprintk_emit(int facility, int level,
74437@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
74438
74439 extern int printk_delay_msec;
74440 extern int dmesg_restrict;
74441-extern int kptr_restrict;
74442
74443 extern void wake_up_klogd(void);
74444
74445diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
74446index 608e60a..c26f864 100644
74447--- a/include/linux/proc_fs.h
74448+++ b/include/linux/proc_fs.h
74449@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
74450 return proc_create_data(name, mode, parent, proc_fops, NULL);
74451 }
74452
74453+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
74454+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
74455+{
74456+#ifdef CONFIG_GRKERNSEC_PROC_USER
74457+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
74458+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74459+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
74460+#else
74461+ return proc_create_data(name, mode, parent, proc_fops, NULL);
74462+#endif
74463+}
74464+
74465+
74466 extern void proc_set_size(struct proc_dir_entry *, loff_t);
74467 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
74468 extern void *PDE_DATA(const struct inode *);
74469diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
74470index 34a1e10..03a6d03 100644
74471--- a/include/linux/proc_ns.h
74472+++ b/include/linux/proc_ns.h
74473@@ -14,7 +14,7 @@ struct proc_ns_operations {
74474 void (*put)(void *ns);
74475 int (*install)(struct nsproxy *nsproxy, void *ns);
74476 unsigned int (*inum)(void *ns);
74477-};
74478+} __do_const;
74479
74480 struct proc_ns {
74481 void *ns;
74482diff --git a/include/linux/random.h b/include/linux/random.h
74483index 3b9377d..61b506a 100644
74484--- a/include/linux/random.h
74485+++ b/include/linux/random.h
74486@@ -32,6 +32,11 @@ void prandom_seed(u32 seed);
74487 u32 prandom_u32_state(struct rnd_state *);
74488 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
74489
74490+static inline unsigned long pax_get_random_long(void)
74491+{
74492+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
74493+}
74494+
74495 /*
74496 * Handle minimum values for seeds
74497 */
74498diff --git a/include/linux/rculist.h b/include/linux/rculist.h
74499index f4b1001..8ddb2b6 100644
74500--- a/include/linux/rculist.h
74501+++ b/include/linux/rculist.h
74502@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
74503 struct list_head *prev, struct list_head *next);
74504 #endif
74505
74506+extern void __pax_list_add_rcu(struct list_head *new,
74507+ struct list_head *prev, struct list_head *next);
74508+
74509 /**
74510 * list_add_rcu - add a new entry to rcu-protected list
74511 * @new: new entry to be added
74512@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
74513 __list_add_rcu(new, head, head->next);
74514 }
74515
74516+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
74517+{
74518+ __pax_list_add_rcu(new, head, head->next);
74519+}
74520+
74521 /**
74522 * list_add_tail_rcu - add a new entry to rcu-protected list
74523 * @new: new entry to be added
74524@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
74525 __list_add_rcu(new, head->prev, head);
74526 }
74527
74528+static inline void pax_list_add_tail_rcu(struct list_head *new,
74529+ struct list_head *head)
74530+{
74531+ __pax_list_add_rcu(new, head->prev, head);
74532+}
74533+
74534 /**
74535 * list_del_rcu - deletes entry from list without re-initialization
74536 * @entry: the element to delete from the list.
74537@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
74538 entry->prev = LIST_POISON2;
74539 }
74540
74541+extern void pax_list_del_rcu(struct list_head *entry);
74542+
74543 /**
74544 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
74545 * @n: the element to delete from the hash list.
74546diff --git a/include/linux/reboot.h b/include/linux/reboot.h
74547index 23b3630..e1bc12b 100644
74548--- a/include/linux/reboot.h
74549+++ b/include/linux/reboot.h
74550@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
74551 * Architecture-specific implementations of sys_reboot commands.
74552 */
74553
74554-extern void machine_restart(char *cmd);
74555-extern void machine_halt(void);
74556-extern void machine_power_off(void);
74557+extern void machine_restart(char *cmd) __noreturn;
74558+extern void machine_halt(void) __noreturn;
74559+extern void machine_power_off(void) __noreturn;
74560
74561 extern void machine_shutdown(void);
74562 struct pt_regs;
74563@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
74564 */
74565
74566 extern void kernel_restart_prepare(char *cmd);
74567-extern void kernel_restart(char *cmd);
74568-extern void kernel_halt(void);
74569-extern void kernel_power_off(void);
74570+extern void kernel_restart(char *cmd) __noreturn;
74571+extern void kernel_halt(void) __noreturn;
74572+extern void kernel_power_off(void) __noreturn;
74573
74574 extern int C_A_D; /* for sysctl */
74575 void ctrl_alt_del(void);
74576@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
74577 * Emergency restart, callable from an interrupt handler.
74578 */
74579
74580-extern void emergency_restart(void);
74581+extern void emergency_restart(void) __noreturn;
74582 #include <asm/emergency-restart.h>
74583
74584 #endif /* _LINUX_REBOOT_H */
74585diff --git a/include/linux/regset.h b/include/linux/regset.h
74586index 8e0c9fe..ac4d221 100644
74587--- a/include/linux/regset.h
74588+++ b/include/linux/regset.h
74589@@ -161,7 +161,8 @@ struct user_regset {
74590 unsigned int align;
74591 unsigned int bias;
74592 unsigned int core_note_type;
74593-};
74594+} __do_const;
74595+typedef struct user_regset __no_const user_regset_no_const;
74596
74597 /**
74598 * struct user_regset_view - available regsets
74599diff --git a/include/linux/relay.h b/include/linux/relay.h
74600index d7c8359..818daf5 100644
74601--- a/include/linux/relay.h
74602+++ b/include/linux/relay.h
74603@@ -157,7 +157,7 @@ struct rchan_callbacks
74604 * The callback should return 0 if successful, negative if not.
74605 */
74606 int (*remove_buf_file)(struct dentry *dentry);
74607-};
74608+} __no_const;
74609
74610 /*
74611 * CONFIG_RELAY kernel API, kernel/relay.c
74612diff --git a/include/linux/rio.h b/include/linux/rio.h
74613index 18e0993..8ab5b21 100644
74614--- a/include/linux/rio.h
74615+++ b/include/linux/rio.h
74616@@ -345,7 +345,7 @@ struct rio_ops {
74617 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
74618 u64 rstart, u32 size, u32 flags);
74619 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
74620-};
74621+} __no_const;
74622
74623 #define RIO_RESOURCE_MEM 0x00000100
74624 #define RIO_RESOURCE_DOORBELL 0x00000200
74625diff --git a/include/linux/rmap.h b/include/linux/rmap.h
74626index 6dacb93..6174423 100644
74627--- a/include/linux/rmap.h
74628+++ b/include/linux/rmap.h
74629@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
74630 void anon_vma_init(void); /* create anon_vma_cachep */
74631 int anon_vma_prepare(struct vm_area_struct *);
74632 void unlink_anon_vmas(struct vm_area_struct *);
74633-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
74634-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
74635+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
74636+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
74637
74638 static inline void anon_vma_merge(struct vm_area_struct *vma,
74639 struct vm_area_struct *next)
74640diff --git a/include/linux/sched.h b/include/linux/sched.h
74641index 178a8d9..450bf11 100644
74642--- a/include/linux/sched.h
74643+++ b/include/linux/sched.h
74644@@ -62,6 +62,7 @@ struct bio_list;
74645 struct fs_struct;
74646 struct perf_event_context;
74647 struct blk_plug;
74648+struct linux_binprm;
74649
74650 /*
74651 * List of flags we want to share for kernel threads,
74652@@ -303,7 +304,7 @@ extern char __sched_text_start[], __sched_text_end[];
74653 extern int in_sched_functions(unsigned long addr);
74654
74655 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
74656-extern signed long schedule_timeout(signed long timeout);
74657+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
74658 extern signed long schedule_timeout_interruptible(signed long timeout);
74659 extern signed long schedule_timeout_killable(signed long timeout);
74660 extern signed long schedule_timeout_uninterruptible(signed long timeout);
74661@@ -314,6 +315,19 @@ struct nsproxy;
74662 struct user_namespace;
74663
74664 #ifdef CONFIG_MMU
74665+
74666+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
74667+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
74668+#else
74669+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
74670+{
74671+ return 0;
74672+}
74673+#endif
74674+
74675+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
74676+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
74677+
74678 extern void arch_pick_mmap_layout(struct mm_struct *mm);
74679 extern unsigned long
74680 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
74681@@ -591,6 +605,17 @@ struct signal_struct {
74682 #ifdef CONFIG_TASKSTATS
74683 struct taskstats *stats;
74684 #endif
74685+
74686+#ifdef CONFIG_GRKERNSEC
74687+ u32 curr_ip;
74688+ u32 saved_ip;
74689+ u32 gr_saddr;
74690+ u32 gr_daddr;
74691+ u16 gr_sport;
74692+ u16 gr_dport;
74693+ u8 used_accept:1;
74694+#endif
74695+
74696 #ifdef CONFIG_AUDIT
74697 unsigned audit_tty;
74698 unsigned audit_tty_log_passwd;
74699@@ -671,6 +696,14 @@ struct user_struct {
74700 struct key *session_keyring; /* UID's default session keyring */
74701 #endif
74702
74703+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74704+ unsigned char kernel_banned;
74705+#endif
74706+#ifdef CONFIG_GRKERNSEC_BRUTE
74707+ unsigned char suid_banned;
74708+ unsigned long suid_ban_expires;
74709+#endif
74710+
74711 /* Hash table maintenance information */
74712 struct hlist_node uidhash_node;
74713 kuid_t uid;
74714@@ -1158,8 +1191,8 @@ struct task_struct {
74715 struct list_head thread_group;
74716
74717 struct completion *vfork_done; /* for vfork() */
74718- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
74719- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
74720+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
74721+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
74722
74723 cputime_t utime, stime, utimescaled, stimescaled;
74724 cputime_t gtime;
74725@@ -1184,11 +1217,6 @@ struct task_struct {
74726 struct task_cputime cputime_expires;
74727 struct list_head cpu_timers[3];
74728
74729-/* process credentials */
74730- const struct cred __rcu *real_cred; /* objective and real subjective task
74731- * credentials (COW) */
74732- const struct cred __rcu *cred; /* effective (overridable) subjective task
74733- * credentials (COW) */
74734 char comm[TASK_COMM_LEN]; /* executable name excluding path
74735 - access with [gs]et_task_comm (which lock
74736 it with task_lock())
74737@@ -1205,6 +1233,10 @@ struct task_struct {
74738 #endif
74739 /* CPU-specific state of this task */
74740 struct thread_struct thread;
74741+/* thread_info moved to task_struct */
74742+#ifdef CONFIG_X86
74743+ struct thread_info tinfo;
74744+#endif
74745 /* filesystem information */
74746 struct fs_struct *fs;
74747 /* open file information */
74748@@ -1278,6 +1310,10 @@ struct task_struct {
74749 gfp_t lockdep_reclaim_gfp;
74750 #endif
74751
74752+/* process credentials */
74753+ const struct cred __rcu *real_cred; /* objective and real subjective task
74754+ * credentials (COW) */
74755+
74756 /* journalling filesystem info */
74757 void *journal_info;
74758
74759@@ -1316,6 +1352,10 @@ struct task_struct {
74760 /* cg_list protected by css_set_lock and tsk->alloc_lock */
74761 struct list_head cg_list;
74762 #endif
74763+
74764+ const struct cred __rcu *cred; /* effective (overridable) subjective task
74765+ * credentials (COW) */
74766+
74767 #ifdef CONFIG_FUTEX
74768 struct robust_list_head __user *robust_list;
74769 #ifdef CONFIG_COMPAT
74770@@ -1416,8 +1456,76 @@ struct task_struct {
74771 unsigned int sequential_io;
74772 unsigned int sequential_io_avg;
74773 #endif
74774+
74775+#ifdef CONFIG_GRKERNSEC
74776+ /* grsecurity */
74777+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74778+ u64 exec_id;
74779+#endif
74780+#ifdef CONFIG_GRKERNSEC_SETXID
74781+ const struct cred *delayed_cred;
74782+#endif
74783+ struct dentry *gr_chroot_dentry;
74784+ struct acl_subject_label *acl;
74785+ struct acl_role_label *role;
74786+ struct file *exec_file;
74787+ unsigned long brute_expires;
74788+ u16 acl_role_id;
74789+ /* is this the task that authenticated to the special role */
74790+ u8 acl_sp_role;
74791+ u8 is_writable;
74792+ u8 brute;
74793+ u8 gr_is_chrooted;
74794+#endif
74795+
74796 };
74797
74798+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
74799+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
74800+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
74801+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
74802+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
74803+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
74804+
74805+#ifdef CONFIG_PAX_SOFTMODE
74806+extern int pax_softmode;
74807+#endif
74808+
74809+extern int pax_check_flags(unsigned long *);
74810+
74811+/* if tsk != current then task_lock must be held on it */
74812+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
74813+static inline unsigned long pax_get_flags(struct task_struct *tsk)
74814+{
74815+ if (likely(tsk->mm))
74816+ return tsk->mm->pax_flags;
74817+ else
74818+ return 0UL;
74819+}
74820+
74821+/* if tsk != current then task_lock must be held on it */
74822+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
74823+{
74824+ if (likely(tsk->mm)) {
74825+ tsk->mm->pax_flags = flags;
74826+ return 0;
74827+ }
74828+ return -EINVAL;
74829+}
74830+#endif
74831+
74832+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
74833+extern void pax_set_initial_flags(struct linux_binprm *bprm);
74834+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
74835+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
74836+#endif
74837+
74838+struct path;
74839+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
74840+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
74841+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
74842+extern void pax_report_refcount_overflow(struct pt_regs *regs);
74843+
74844 /* Future-safe accessor for struct task_struct's cpus_allowed. */
74845 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
74846
74847@@ -1476,7 +1584,7 @@ struct pid_namespace;
74848 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
74849 struct pid_namespace *ns);
74850
74851-static inline pid_t task_pid_nr(struct task_struct *tsk)
74852+static inline pid_t task_pid_nr(const struct task_struct *tsk)
74853 {
74854 return tsk->pid;
74855 }
74856@@ -1919,7 +2027,9 @@ void yield(void);
74857 extern struct exec_domain default_exec_domain;
74858
74859 union thread_union {
74860+#ifndef CONFIG_X86
74861 struct thread_info thread_info;
74862+#endif
74863 unsigned long stack[THREAD_SIZE/sizeof(long)];
74864 };
74865
74866@@ -1952,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
74867 */
74868
74869 extern struct task_struct *find_task_by_vpid(pid_t nr);
74870+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
74871 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
74872 struct pid_namespace *ns);
74873
74874@@ -2118,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
74875 extern void exit_itimers(struct signal_struct *);
74876 extern void flush_itimer_signals(void);
74877
74878-extern void do_group_exit(int);
74879+extern __noreturn void do_group_exit(int);
74880
74881 extern int allow_signal(int);
74882 extern int disallow_signal(int);
74883@@ -2309,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
74884
74885 #endif
74886
74887-static inline int object_is_on_stack(void *obj)
74888+static inline int object_starts_on_stack(void *obj)
74889 {
74890- void *stack = task_stack_page(current);
74891+ const void *stack = task_stack_page(current);
74892
74893 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
74894 }
74895diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
74896index bf8086b..962b035 100644
74897--- a/include/linux/sched/sysctl.h
74898+++ b/include/linux/sched/sysctl.h
74899@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
74900 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
74901
74902 extern int sysctl_max_map_count;
74903+extern unsigned long sysctl_heap_stack_gap;
74904
74905 extern unsigned int sysctl_sched_latency;
74906 extern unsigned int sysctl_sched_min_granularity;
74907diff --git a/include/linux/security.h b/include/linux/security.h
74908index 4686491..2bd210e 100644
74909--- a/include/linux/security.h
74910+++ b/include/linux/security.h
74911@@ -26,6 +26,7 @@
74912 #include <linux/capability.h>
74913 #include <linux/slab.h>
74914 #include <linux/err.h>
74915+#include <linux/grsecurity.h>
74916
74917 struct linux_binprm;
74918 struct cred;
74919diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
74920index 2da29ac..aac448ec 100644
74921--- a/include/linux/seq_file.h
74922+++ b/include/linux/seq_file.h
74923@@ -26,6 +26,9 @@ struct seq_file {
74924 struct mutex lock;
74925 const struct seq_operations *op;
74926 int poll_event;
74927+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74928+ u64 exec_id;
74929+#endif
74930 #ifdef CONFIG_USER_NS
74931 struct user_namespace *user_ns;
74932 #endif
74933@@ -38,6 +41,7 @@ struct seq_operations {
74934 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
74935 int (*show) (struct seq_file *m, void *v);
74936 };
74937+typedef struct seq_operations __no_const seq_operations_no_const;
74938
74939 #define SEQ_SKIP 1
74940
74941diff --git a/include/linux/shm.h b/include/linux/shm.h
74942index 429c199..4d42e38 100644
74943--- a/include/linux/shm.h
74944+++ b/include/linux/shm.h
74945@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
74946
74947 /* The task created the shm object. NULL if the task is dead. */
74948 struct task_struct *shm_creator;
74949+#ifdef CONFIG_GRKERNSEC
74950+ time_t shm_createtime;
74951+ pid_t shm_lapid;
74952+#endif
74953 };
74954
74955 /* shm_mode upper byte flags */
74956diff --git a/include/linux/signal.h b/include/linux/signal.h
74957index d897484..323ba98 100644
74958--- a/include/linux/signal.h
74959+++ b/include/linux/signal.h
74960@@ -433,6 +433,7 @@ void signals_init(void);
74961
74962 int restore_altstack(const stack_t __user *);
74963 int __save_altstack(stack_t __user *, unsigned long);
74964+void __save_altstack_ex(stack_t __user *, unsigned long);
74965
74966 #ifdef CONFIG_PROC_FS
74967 struct seq_file;
74968diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
74969index dec1748..112c1f9 100644
74970--- a/include/linux/skbuff.h
74971+++ b/include/linux/skbuff.h
74972@@ -640,7 +640,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
74973 extern struct sk_buff *__alloc_skb(unsigned int size,
74974 gfp_t priority, int flags, int node);
74975 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
74976-static inline struct sk_buff *alloc_skb(unsigned int size,
74977+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
74978 gfp_t priority)
74979 {
74980 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
74981@@ -756,7 +756,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
74982 */
74983 static inline int skb_queue_empty(const struct sk_buff_head *list)
74984 {
74985- return list->next == (struct sk_buff *)list;
74986+ return list->next == (const struct sk_buff *)list;
74987 }
74988
74989 /**
74990@@ -769,7 +769,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
74991 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
74992 const struct sk_buff *skb)
74993 {
74994- return skb->next == (struct sk_buff *)list;
74995+ return skb->next == (const struct sk_buff *)list;
74996 }
74997
74998 /**
74999@@ -782,7 +782,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
75000 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
75001 const struct sk_buff *skb)
75002 {
75003- return skb->prev == (struct sk_buff *)list;
75004+ return skb->prev == (const struct sk_buff *)list;
75005 }
75006
75007 /**
75008@@ -1848,7 +1848,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
75009 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
75010 */
75011 #ifndef NET_SKB_PAD
75012-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
75013+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
75014 #endif
75015
75016 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
75017@@ -2443,7 +2443,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
75018 int noblock, int *err);
75019 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
75020 struct poll_table_struct *wait);
75021-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
75022+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
75023 int offset, struct iovec *to,
75024 int size);
75025 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
75026@@ -2733,6 +2733,9 @@ static inline void nf_reset(struct sk_buff *skb)
75027 nf_bridge_put(skb->nf_bridge);
75028 skb->nf_bridge = NULL;
75029 #endif
75030+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
75031+ skb->nf_trace = 0;
75032+#endif
75033 }
75034
75035 static inline void nf_reset_trace(struct sk_buff *skb)
75036diff --git a/include/linux/slab.h b/include/linux/slab.h
75037index 0c62175..f016ac1 100644
75038--- a/include/linux/slab.h
75039+++ b/include/linux/slab.h
75040@@ -12,15 +12,29 @@
75041 #include <linux/gfp.h>
75042 #include <linux/types.h>
75043 #include <linux/workqueue.h>
75044-
75045+#include <linux/err.h>
75046
75047 /*
75048 * Flags to pass to kmem_cache_create().
75049 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
75050 */
75051 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
75052+
75053+#ifdef CONFIG_PAX_USERCOPY_SLABS
75054+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
75055+#else
75056+#define SLAB_USERCOPY 0x00000000UL
75057+#endif
75058+
75059 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
75060 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
75061+
75062+#ifdef CONFIG_PAX_MEMORY_SANITIZE
75063+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
75064+#else
75065+#define SLAB_NO_SANITIZE 0x00000000UL
75066+#endif
75067+
75068 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
75069 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
75070 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
75071@@ -89,10 +103,13 @@
75072 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
75073 * Both make kfree a no-op.
75074 */
75075-#define ZERO_SIZE_PTR ((void *)16)
75076+#define ZERO_SIZE_PTR \
75077+({ \
75078+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
75079+ (void *)(-MAX_ERRNO-1L); \
75080+})
75081
75082-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
75083- (unsigned long)ZERO_SIZE_PTR)
75084+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
75085
75086
75087 struct mem_cgroup;
75088@@ -132,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
75089 void kfree(const void *);
75090 void kzfree(const void *);
75091 size_t ksize(const void *);
75092+const char *check_heap_object(const void *ptr, unsigned long n);
75093+bool is_usercopy_object(const void *ptr);
75094
75095 /*
75096 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
75097@@ -164,7 +183,7 @@ struct kmem_cache {
75098 unsigned int align; /* Alignment as calculated */
75099 unsigned long flags; /* Active flags on the slab */
75100 const char *name; /* Slab name for sysfs */
75101- int refcount; /* Use counter */
75102+ atomic_t refcount; /* Use counter */
75103 void (*ctor)(void *); /* Called on object slot creation */
75104 struct list_head list; /* List of all slab caches on the system */
75105 };
75106@@ -226,6 +245,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
75107 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
75108 #endif
75109
75110+#ifdef CONFIG_PAX_USERCOPY_SLABS
75111+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
75112+#endif
75113+
75114 /*
75115 * Figure out which kmalloc slab an allocation of a certain size
75116 * belongs to.
75117@@ -234,7 +257,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
75118 * 2 = 120 .. 192 bytes
75119 * n = 2^(n-1) .. 2^n -1
75120 */
75121-static __always_inline int kmalloc_index(size_t size)
75122+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
75123 {
75124 if (!size)
75125 return 0;
75126@@ -406,6 +429,7 @@ void print_slabinfo_header(struct seq_file *m);
75127 * for general use, and so are not documented here. For a full list of
75128 * potential flags, always refer to linux/gfp.h.
75129 */
75130+
75131 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
75132 {
75133 if (size != 0 && n > SIZE_MAX / size)
75134@@ -465,7 +489,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
75135 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
75136 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
75137 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
75138-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
75139+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
75140 #define kmalloc_track_caller(size, flags) \
75141 __kmalloc_track_caller(size, flags, _RET_IP_)
75142 #else
75143@@ -485,7 +509,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
75144 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
75145 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
75146 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
75147-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
75148+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
75149 #define kmalloc_node_track_caller(size, flags, node) \
75150 __kmalloc_node_track_caller(size, flags, node, \
75151 _RET_IP_)
75152diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
75153index cd40158..4e2f7af 100644
75154--- a/include/linux/slab_def.h
75155+++ b/include/linux/slab_def.h
75156@@ -50,7 +50,7 @@ struct kmem_cache {
75157 /* 4) cache creation/removal */
75158 const char *name;
75159 struct list_head list;
75160- int refcount;
75161+ atomic_t refcount;
75162 int object_size;
75163 int align;
75164
75165@@ -66,10 +66,14 @@ struct kmem_cache {
75166 unsigned long node_allocs;
75167 unsigned long node_frees;
75168 unsigned long node_overflow;
75169- atomic_t allochit;
75170- atomic_t allocmiss;
75171- atomic_t freehit;
75172- atomic_t freemiss;
75173+ atomic_unchecked_t allochit;
75174+ atomic_unchecked_t allocmiss;
75175+ atomic_unchecked_t freehit;
75176+ atomic_unchecked_t freemiss;
75177+#ifdef CONFIG_PAX_MEMORY_SANITIZE
75178+ atomic_unchecked_t sanitized;
75179+ atomic_unchecked_t not_sanitized;
75180+#endif
75181
75182 /*
75183 * If debugging is enabled, then the allocator can add additional
75184@@ -103,7 +107,7 @@ struct kmem_cache {
75185 };
75186
75187 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
75188-void *__kmalloc(size_t size, gfp_t flags);
75189+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
75190
75191 #ifdef CONFIG_TRACING
75192 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
75193@@ -136,6 +140,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
75194 cachep = kmalloc_dma_caches[i];
75195 else
75196 #endif
75197+
75198+#ifdef CONFIG_PAX_USERCOPY_SLABS
75199+ if (flags & GFP_USERCOPY)
75200+ cachep = kmalloc_usercopy_caches[i];
75201+ else
75202+#endif
75203+
75204 cachep = kmalloc_caches[i];
75205
75206 ret = kmem_cache_alloc_trace(cachep, flags, size);
75207@@ -146,7 +157,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
75208 }
75209
75210 #ifdef CONFIG_NUMA
75211-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
75212+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
75213 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
75214
75215 #ifdef CONFIG_TRACING
75216@@ -185,6 +196,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
75217 cachep = kmalloc_dma_caches[i];
75218 else
75219 #endif
75220+
75221+#ifdef CONFIG_PAX_USERCOPY_SLABS
75222+ if (flags & GFP_USERCOPY)
75223+ cachep = kmalloc_usercopy_caches[i];
75224+ else
75225+#endif
75226+
75227 cachep = kmalloc_caches[i];
75228
75229 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
75230diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
75231index f28e14a..7831211 100644
75232--- a/include/linux/slob_def.h
75233+++ b/include/linux/slob_def.h
75234@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
75235 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
75236 }
75237
75238-void *__kmalloc_node(size_t size, gfp_t flags, int node);
75239+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
75240
75241 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
75242 {
75243@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
75244 return __kmalloc_node(size, flags, NUMA_NO_NODE);
75245 }
75246
75247-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
75248+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
75249 {
75250 return kmalloc(size, flags);
75251 }
75252diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
75253index 027276f..092bfe8 100644
75254--- a/include/linux/slub_def.h
75255+++ b/include/linux/slub_def.h
75256@@ -80,7 +80,7 @@ struct kmem_cache {
75257 struct kmem_cache_order_objects max;
75258 struct kmem_cache_order_objects min;
75259 gfp_t allocflags; /* gfp flags to use on each alloc */
75260- int refcount; /* Refcount for slab cache destroy */
75261+ atomic_t refcount; /* Refcount for slab cache destroy */
75262 void (*ctor)(void *);
75263 int inuse; /* Offset to metadata */
75264 int align; /* Alignment */
75265@@ -105,7 +105,7 @@ struct kmem_cache {
75266 };
75267
75268 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
75269-void *__kmalloc(size_t size, gfp_t flags);
75270+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
75271
75272 static __always_inline void *
75273 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
75274@@ -149,7 +149,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
75275 }
75276 #endif
75277
75278-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
75279+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
75280 {
75281 unsigned int order = get_order(size);
75282 return kmalloc_order_trace(size, flags, order);
75283@@ -175,7 +175,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
75284 }
75285
75286 #ifdef CONFIG_NUMA
75287-void *__kmalloc_node(size_t size, gfp_t flags, int node);
75288+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
75289 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
75290
75291 #ifdef CONFIG_TRACING
75292diff --git a/include/linux/smp.h b/include/linux/smp.h
75293index c848876..11e8a84 100644
75294--- a/include/linux/smp.h
75295+++ b/include/linux/smp.h
75296@@ -221,7 +221,9 @@ static inline void kick_all_cpus_sync(void) { }
75297 #endif
75298
75299 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
75300+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
75301 #define put_cpu() preempt_enable()
75302+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
75303
75304 /*
75305 * Callback to arch code if there's nosmp or maxcpus=0 on the
75306diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
75307index 54f91d3..be2c379 100644
75308--- a/include/linux/sock_diag.h
75309+++ b/include/linux/sock_diag.h
75310@@ -11,7 +11,7 @@ struct sock;
75311 struct sock_diag_handler {
75312 __u8 family;
75313 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
75314-};
75315+} __do_const;
75316
75317 int sock_diag_register(const struct sock_diag_handler *h);
75318 void sock_diag_unregister(const struct sock_diag_handler *h);
75319diff --git a/include/linux/sonet.h b/include/linux/sonet.h
75320index 680f9a3..f13aeb0 100644
75321--- a/include/linux/sonet.h
75322+++ b/include/linux/sonet.h
75323@@ -7,7 +7,7 @@
75324 #include <uapi/linux/sonet.h>
75325
75326 struct k_sonet_stats {
75327-#define __HANDLE_ITEM(i) atomic_t i
75328+#define __HANDLE_ITEM(i) atomic_unchecked_t i
75329 __SONET_ITEMS
75330 #undef __HANDLE_ITEM
75331 };
75332diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
75333index 07d8e53..dc934c9 100644
75334--- a/include/linux/sunrpc/addr.h
75335+++ b/include/linux/sunrpc/addr.h
75336@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
75337 {
75338 switch (sap->sa_family) {
75339 case AF_INET:
75340- return ntohs(((struct sockaddr_in *)sap)->sin_port);
75341+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
75342 case AF_INET6:
75343- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
75344+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
75345 }
75346 return 0;
75347 }
75348@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
75349 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
75350 const struct sockaddr *src)
75351 {
75352- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
75353+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
75354 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
75355
75356 dsin->sin_family = ssin->sin_family;
75357@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
75358 if (sa->sa_family != AF_INET6)
75359 return 0;
75360
75361- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
75362+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
75363 }
75364
75365 #endif /* _LINUX_SUNRPC_ADDR_H */
75366diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
75367index bfe11be..12bc8c4 100644
75368--- a/include/linux/sunrpc/clnt.h
75369+++ b/include/linux/sunrpc/clnt.h
75370@@ -96,7 +96,7 @@ struct rpc_procinfo {
75371 unsigned int p_timer; /* Which RTT timer to use */
75372 u32 p_statidx; /* Which procedure to account */
75373 const char * p_name; /* name of procedure */
75374-};
75375+} __do_const;
75376
75377 #ifdef __KERNEL__
75378
75379diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
75380index 1f0216b..6a4fa50 100644
75381--- a/include/linux/sunrpc/svc.h
75382+++ b/include/linux/sunrpc/svc.h
75383@@ -411,7 +411,7 @@ struct svc_procedure {
75384 unsigned int pc_count; /* call count */
75385 unsigned int pc_cachetype; /* cache info (NFS) */
75386 unsigned int pc_xdrressize; /* maximum size of XDR reply */
75387-};
75388+} __do_const;
75389
75390 /*
75391 * Function prototypes.
75392diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
75393index 0b8e3e6..33e0a01 100644
75394--- a/include/linux/sunrpc/svc_rdma.h
75395+++ b/include/linux/sunrpc/svc_rdma.h
75396@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
75397 extern unsigned int svcrdma_max_requests;
75398 extern unsigned int svcrdma_max_req_size;
75399
75400-extern atomic_t rdma_stat_recv;
75401-extern atomic_t rdma_stat_read;
75402-extern atomic_t rdma_stat_write;
75403-extern atomic_t rdma_stat_sq_starve;
75404-extern atomic_t rdma_stat_rq_starve;
75405-extern atomic_t rdma_stat_rq_poll;
75406-extern atomic_t rdma_stat_rq_prod;
75407-extern atomic_t rdma_stat_sq_poll;
75408-extern atomic_t rdma_stat_sq_prod;
75409+extern atomic_unchecked_t rdma_stat_recv;
75410+extern atomic_unchecked_t rdma_stat_read;
75411+extern atomic_unchecked_t rdma_stat_write;
75412+extern atomic_unchecked_t rdma_stat_sq_starve;
75413+extern atomic_unchecked_t rdma_stat_rq_starve;
75414+extern atomic_unchecked_t rdma_stat_rq_poll;
75415+extern atomic_unchecked_t rdma_stat_rq_prod;
75416+extern atomic_unchecked_t rdma_stat_sq_poll;
75417+extern atomic_unchecked_t rdma_stat_sq_prod;
75418
75419 #define RPCRDMA_VERSION 1
75420
75421diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
75422index ff374ab..7fd2ecb 100644
75423--- a/include/linux/sunrpc/svcauth.h
75424+++ b/include/linux/sunrpc/svcauth.h
75425@@ -109,7 +109,7 @@ struct auth_ops {
75426 int (*release)(struct svc_rqst *rq);
75427 void (*domain_release)(struct auth_domain *);
75428 int (*set_client)(struct svc_rqst *rq);
75429-};
75430+} __do_const;
75431
75432 #define SVC_GARBAGE 1
75433 #define SVC_SYSERR 2
75434diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
75435index a5ffd32..0935dea 100644
75436--- a/include/linux/swiotlb.h
75437+++ b/include/linux/swiotlb.h
75438@@ -60,7 +60,8 @@ extern void
75439
75440 extern void
75441 swiotlb_free_coherent(struct device *hwdev, size_t size,
75442- void *vaddr, dma_addr_t dma_handle);
75443+ void *vaddr, dma_addr_t dma_handle,
75444+ struct dma_attrs *attrs);
75445
75446 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
75447 unsigned long offset, size_t size,
75448diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
75449index 4147d70..d356a10 100644
75450--- a/include/linux/syscalls.h
75451+++ b/include/linux/syscalls.h
75452@@ -97,8 +97,12 @@ struct sigaltstack;
75453 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
75454
75455 #define __SC_DECL(t, a) t a
75456-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
75457-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
75458+#define __TYPE_IS_SL(t) (__same_type((t)0, 0L))
75459+#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
75460+#define __TYPE_IS_SLL(t) (__same_type((t)0, 0LL))
75461+#define __TYPE_IS_ULL(t) (__same_type((t)0, 0ULL))
75462+#define __TYPE_IS_LL(t) (__TYPE_IS_SLL(t) || __TYPE_IS_ULL(t))
75463+#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), __builtin_choose_expr(__TYPE_IS_ULL(t), 0ULL, 0LL), __builtin_choose_expr(__TYPE_IS_UL(t), 0UL, 0L))) a
75464 #define __SC_CAST(t, a) (t) a
75465 #define __SC_ARGS(t, a) a
75466 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
75467@@ -362,11 +366,11 @@ asmlinkage long sys_sync(void);
75468 asmlinkage long sys_fsync(unsigned int fd);
75469 asmlinkage long sys_fdatasync(unsigned int fd);
75470 asmlinkage long sys_bdflush(int func, long data);
75471-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
75472- char __user *type, unsigned long flags,
75473+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
75474+ const char __user *type, unsigned long flags,
75475 void __user *data);
75476-asmlinkage long sys_umount(char __user *name, int flags);
75477-asmlinkage long sys_oldumount(char __user *name);
75478+asmlinkage long sys_umount(const char __user *name, int flags);
75479+asmlinkage long sys_oldumount(const char __user *name);
75480 asmlinkage long sys_truncate(const char __user *path, long length);
75481 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
75482 asmlinkage long sys_stat(const char __user *filename,
75483@@ -578,7 +582,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
75484 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
75485 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
75486 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
75487- struct sockaddr __user *, int);
75488+ struct sockaddr __user *, int) __intentional_overflow(0);
75489 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
75490 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
75491 unsigned int vlen, unsigned flags);
75492diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
75493index 27b3b0b..e093dd9 100644
75494--- a/include/linux/syscore_ops.h
75495+++ b/include/linux/syscore_ops.h
75496@@ -16,7 +16,7 @@ struct syscore_ops {
75497 int (*suspend)(void);
75498 void (*resume)(void);
75499 void (*shutdown)(void);
75500-};
75501+} __do_const;
75502
75503 extern void register_syscore_ops(struct syscore_ops *ops);
75504 extern void unregister_syscore_ops(struct syscore_ops *ops);
75505diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
75506index 14a8ff2..af52bad 100644
75507--- a/include/linux/sysctl.h
75508+++ b/include/linux/sysctl.h
75509@@ -34,13 +34,13 @@ struct ctl_table_root;
75510 struct ctl_table_header;
75511 struct ctl_dir;
75512
75513-typedef struct ctl_table ctl_table;
75514-
75515 typedef int proc_handler (struct ctl_table *ctl, int write,
75516 void __user *buffer, size_t *lenp, loff_t *ppos);
75517
75518 extern int proc_dostring(struct ctl_table *, int,
75519 void __user *, size_t *, loff_t *);
75520+extern int proc_dostring_modpriv(struct ctl_table *, int,
75521+ void __user *, size_t *, loff_t *);
75522 extern int proc_dointvec(struct ctl_table *, int,
75523 void __user *, size_t *, loff_t *);
75524 extern int proc_dointvec_minmax(struct ctl_table *, int,
75525@@ -115,7 +115,9 @@ struct ctl_table
75526 struct ctl_table_poll *poll;
75527 void *extra1;
75528 void *extra2;
75529-};
75530+} __do_const;
75531+typedef struct ctl_table __no_const ctl_table_no_const;
75532+typedef struct ctl_table ctl_table;
75533
75534 struct ctl_node {
75535 struct rb_node node;
75536diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
75537index e2cee22..3ddb921 100644
75538--- a/include/linux/sysfs.h
75539+++ b/include/linux/sysfs.h
75540@@ -31,7 +31,8 @@ struct attribute {
75541 struct lock_class_key *key;
75542 struct lock_class_key skey;
75543 #endif
75544-};
75545+} __do_const;
75546+typedef struct attribute __no_const attribute_no_const;
75547
75548 /**
75549 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
75550@@ -59,8 +60,8 @@ struct attribute_group {
75551 umode_t (*is_visible)(struct kobject *,
75552 struct attribute *, int);
75553 struct attribute **attrs;
75554-};
75555-
75556+} __do_const;
75557+typedef struct attribute_group __no_const attribute_group_no_const;
75558
75559
75560 /**
75561@@ -107,7 +108,8 @@ struct bin_attribute {
75562 char *, loff_t, size_t);
75563 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
75564 struct vm_area_struct *vma);
75565-};
75566+} __do_const;
75567+typedef struct bin_attribute __no_const bin_attribute_no_const;
75568
75569 /**
75570 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
75571diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
75572index 7faf933..9b85a0c 100644
75573--- a/include/linux/sysrq.h
75574+++ b/include/linux/sysrq.h
75575@@ -16,6 +16,7 @@
75576
75577 #include <linux/errno.h>
75578 #include <linux/types.h>
75579+#include <linux/compiler.h>
75580
75581 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
75582 #define SYSRQ_DEFAULT_ENABLE 1
75583@@ -36,7 +37,7 @@ struct sysrq_key_op {
75584 char *help_msg;
75585 char *action_msg;
75586 int enable_mask;
75587-};
75588+} __do_const;
75589
75590 #ifdef CONFIG_MAGIC_SYSRQ
75591
75592diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
75593index e7e0473..7989295 100644
75594--- a/include/linux/thread_info.h
75595+++ b/include/linux/thread_info.h
75596@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
75597 #error "no set_restore_sigmask() provided and default one won't work"
75598 #endif
75599
75600+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
75601+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
75602+{
75603+#ifndef CONFIG_PAX_USERCOPY_DEBUG
75604+ if (!__builtin_constant_p(n))
75605+#endif
75606+ __check_object_size(ptr, n, to_user);
75607+}
75608+
75609 #endif /* __KERNEL__ */
75610
75611 #endif /* _LINUX_THREAD_INFO_H */
75612diff --git a/include/linux/tty.h b/include/linux/tty.h
75613index 8780bd2..d1ae08b 100644
75614--- a/include/linux/tty.h
75615+++ b/include/linux/tty.h
75616@@ -194,7 +194,7 @@ struct tty_port {
75617 const struct tty_port_operations *ops; /* Port operations */
75618 spinlock_t lock; /* Lock protecting tty field */
75619 int blocked_open; /* Waiting to open */
75620- int count; /* Usage count */
75621+ atomic_t count; /* Usage count */
75622 wait_queue_head_t open_wait; /* Open waiters */
75623 wait_queue_head_t close_wait; /* Close waiters */
75624 wait_queue_head_t delta_msr_wait; /* Modem status change */
75625@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
75626 struct tty_struct *tty, struct file *filp);
75627 static inline int tty_port_users(struct tty_port *port)
75628 {
75629- return port->count + port->blocked_open;
75630+ return atomic_read(&port->count) + port->blocked_open;
75631 }
75632
75633 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
75634diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
75635index 756a609..b302dd6 100644
75636--- a/include/linux/tty_driver.h
75637+++ b/include/linux/tty_driver.h
75638@@ -285,7 +285,7 @@ struct tty_operations {
75639 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
75640 #endif
75641 const struct file_operations *proc_fops;
75642-};
75643+} __do_const;
75644
75645 struct tty_driver {
75646 int magic; /* magic number for this structure */
75647diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
75648index 58390c7..95e214c 100644
75649--- a/include/linux/tty_ldisc.h
75650+++ b/include/linux/tty_ldisc.h
75651@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
75652
75653 struct module *owner;
75654
75655- int refcount;
75656+ atomic_t refcount;
75657 };
75658
75659 struct tty_ldisc {
75660diff --git a/include/linux/types.h b/include/linux/types.h
75661index 4d118ba..c3ee9bf 100644
75662--- a/include/linux/types.h
75663+++ b/include/linux/types.h
75664@@ -176,10 +176,26 @@ typedef struct {
75665 int counter;
75666 } atomic_t;
75667
75668+#ifdef CONFIG_PAX_REFCOUNT
75669+typedef struct {
75670+ int counter;
75671+} atomic_unchecked_t;
75672+#else
75673+typedef atomic_t atomic_unchecked_t;
75674+#endif
75675+
75676 #ifdef CONFIG_64BIT
75677 typedef struct {
75678 long counter;
75679 } atomic64_t;
75680+
75681+#ifdef CONFIG_PAX_REFCOUNT
75682+typedef struct {
75683+ long counter;
75684+} atomic64_unchecked_t;
75685+#else
75686+typedef atomic64_t atomic64_unchecked_t;
75687+#endif
75688 #endif
75689
75690 struct list_head {
75691diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
75692index 5ca0951..ab496a5 100644
75693--- a/include/linux/uaccess.h
75694+++ b/include/linux/uaccess.h
75695@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
75696 long ret; \
75697 mm_segment_t old_fs = get_fs(); \
75698 \
75699- set_fs(KERNEL_DS); \
75700 pagefault_disable(); \
75701- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
75702- pagefault_enable(); \
75703+ set_fs(KERNEL_DS); \
75704+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
75705 set_fs(old_fs); \
75706+ pagefault_enable(); \
75707 ret; \
75708 })
75709
75710diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
75711index 8e522cbc..aa8572d 100644
75712--- a/include/linux/uidgid.h
75713+++ b/include/linux/uidgid.h
75714@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
75715
75716 #endif /* CONFIG_USER_NS */
75717
75718+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
75719+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
75720+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
75721+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
75722+
75723 #endif /* _LINUX_UIDGID_H */
75724diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
75725index 99c1b4d..562e6f3 100644
75726--- a/include/linux/unaligned/access_ok.h
75727+++ b/include/linux/unaligned/access_ok.h
75728@@ -4,34 +4,34 @@
75729 #include <linux/kernel.h>
75730 #include <asm/byteorder.h>
75731
75732-static inline u16 get_unaligned_le16(const void *p)
75733+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
75734 {
75735- return le16_to_cpup((__le16 *)p);
75736+ return le16_to_cpup((const __le16 *)p);
75737 }
75738
75739-static inline u32 get_unaligned_le32(const void *p)
75740+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
75741 {
75742- return le32_to_cpup((__le32 *)p);
75743+ return le32_to_cpup((const __le32 *)p);
75744 }
75745
75746-static inline u64 get_unaligned_le64(const void *p)
75747+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
75748 {
75749- return le64_to_cpup((__le64 *)p);
75750+ return le64_to_cpup((const __le64 *)p);
75751 }
75752
75753-static inline u16 get_unaligned_be16(const void *p)
75754+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
75755 {
75756- return be16_to_cpup((__be16 *)p);
75757+ return be16_to_cpup((const __be16 *)p);
75758 }
75759
75760-static inline u32 get_unaligned_be32(const void *p)
75761+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
75762 {
75763- return be32_to_cpup((__be32 *)p);
75764+ return be32_to_cpup((const __be32 *)p);
75765 }
75766
75767-static inline u64 get_unaligned_be64(const void *p)
75768+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
75769 {
75770- return be64_to_cpup((__be64 *)p);
75771+ return be64_to_cpup((const __be64 *)p);
75772 }
75773
75774 static inline void put_unaligned_le16(u16 val, void *p)
75775diff --git a/include/linux/usb.h b/include/linux/usb.h
75776index a0bee5a..5533a52 100644
75777--- a/include/linux/usb.h
75778+++ b/include/linux/usb.h
75779@@ -552,7 +552,7 @@ struct usb_device {
75780 int maxchild;
75781
75782 u32 quirks;
75783- atomic_t urbnum;
75784+ atomic_unchecked_t urbnum;
75785
75786 unsigned long active_duration;
75787
75788@@ -1607,7 +1607,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
75789
75790 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
75791 __u8 request, __u8 requesttype, __u16 value, __u16 index,
75792- void *data, __u16 size, int timeout);
75793+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
75794 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
75795 void *data, int len, int *actual_length, int timeout);
75796 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
75797diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
75798index e452ba6..78f8e80 100644
75799--- a/include/linux/usb/renesas_usbhs.h
75800+++ b/include/linux/usb/renesas_usbhs.h
75801@@ -39,7 +39,7 @@ enum {
75802 */
75803 struct renesas_usbhs_driver_callback {
75804 int (*notify_hotplug)(struct platform_device *pdev);
75805-};
75806+} __no_const;
75807
75808 /*
75809 * callback functions for platform
75810diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
75811index 6f8fbcf..8259001 100644
75812--- a/include/linux/vermagic.h
75813+++ b/include/linux/vermagic.h
75814@@ -25,9 +25,35 @@
75815 #define MODULE_ARCH_VERMAGIC ""
75816 #endif
75817
75818+#ifdef CONFIG_PAX_REFCOUNT
75819+#define MODULE_PAX_REFCOUNT "REFCOUNT "
75820+#else
75821+#define MODULE_PAX_REFCOUNT ""
75822+#endif
75823+
75824+#ifdef CONSTIFY_PLUGIN
75825+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
75826+#else
75827+#define MODULE_CONSTIFY_PLUGIN ""
75828+#endif
75829+
75830+#ifdef STACKLEAK_PLUGIN
75831+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
75832+#else
75833+#define MODULE_STACKLEAK_PLUGIN ""
75834+#endif
75835+
75836+#ifdef CONFIG_GRKERNSEC
75837+#define MODULE_GRSEC "GRSEC "
75838+#else
75839+#define MODULE_GRSEC ""
75840+#endif
75841+
75842 #define VERMAGIC_STRING \
75843 UTS_RELEASE " " \
75844 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
75845 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
75846- MODULE_ARCH_VERMAGIC
75847+ MODULE_ARCH_VERMAGIC \
75848+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
75849+ MODULE_GRSEC
75850
75851diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
75852index 7d5773a..541c01c 100644
75853--- a/include/linux/vmalloc.h
75854+++ b/include/linux/vmalloc.h
75855@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
75856 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
75857 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
75858 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
75859+
75860+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75861+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
75862+#endif
75863+
75864 /* bits [20..32] reserved for arch specific ioremap internals */
75865
75866 /*
75867@@ -75,7 +80,7 @@ extern void *vmalloc_32_user(unsigned long size);
75868 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
75869 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
75870 unsigned long start, unsigned long end, gfp_t gfp_mask,
75871- pgprot_t prot, int node, const void *caller);
75872+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
75873 extern void vfree(const void *addr);
75874
75875 extern void *vmap(struct page **pages, unsigned int count,
75876@@ -137,8 +142,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
75877 extern void free_vm_area(struct vm_struct *area);
75878
75879 /* for /dev/kmem */
75880-extern long vread(char *buf, char *addr, unsigned long count);
75881-extern long vwrite(char *buf, char *addr, unsigned long count);
75882+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
75883+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
75884
75885 /*
75886 * Internals. Dont't use..
75887diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
75888index c586679..f06b389 100644
75889--- a/include/linux/vmstat.h
75890+++ b/include/linux/vmstat.h
75891@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
75892 /*
75893 * Zone based page accounting with per cpu differentials.
75894 */
75895-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75896+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
75897
75898 static inline void zone_page_state_add(long x, struct zone *zone,
75899 enum zone_stat_item item)
75900 {
75901- atomic_long_add(x, &zone->vm_stat[item]);
75902- atomic_long_add(x, &vm_stat[item]);
75903+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
75904+ atomic_long_add_unchecked(x, &vm_stat[item]);
75905 }
75906
75907 static inline unsigned long global_page_state(enum zone_stat_item item)
75908 {
75909- long x = atomic_long_read(&vm_stat[item]);
75910+ long x = atomic_long_read_unchecked(&vm_stat[item]);
75911 #ifdef CONFIG_SMP
75912 if (x < 0)
75913 x = 0;
75914@@ -112,7 +112,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
75915 static inline unsigned long zone_page_state(struct zone *zone,
75916 enum zone_stat_item item)
75917 {
75918- long x = atomic_long_read(&zone->vm_stat[item]);
75919+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
75920 #ifdef CONFIG_SMP
75921 if (x < 0)
75922 x = 0;
75923@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
75924 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
75925 enum zone_stat_item item)
75926 {
75927- long x = atomic_long_read(&zone->vm_stat[item]);
75928+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
75929
75930 #ifdef CONFIG_SMP
75931 int cpu;
75932@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
75933
75934 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
75935 {
75936- atomic_long_inc(&zone->vm_stat[item]);
75937- atomic_long_inc(&vm_stat[item]);
75938+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
75939+ atomic_long_inc_unchecked(&vm_stat[item]);
75940 }
75941
75942 static inline void __inc_zone_page_state(struct page *page,
75943@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
75944
75945 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
75946 {
75947- atomic_long_dec(&zone->vm_stat[item]);
75948- atomic_long_dec(&vm_stat[item]);
75949+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
75950+ atomic_long_dec_unchecked(&vm_stat[item]);
75951 }
75952
75953 static inline void __dec_zone_page_state(struct page *page,
75954diff --git a/include/linux/xattr.h b/include/linux/xattr.h
75955index fdbafc6..49dfe4f 100644
75956--- a/include/linux/xattr.h
75957+++ b/include/linux/xattr.h
75958@@ -28,7 +28,7 @@ struct xattr_handler {
75959 size_t size, int handler_flags);
75960 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
75961 size_t size, int flags, int handler_flags);
75962-};
75963+} __do_const;
75964
75965 struct xattr {
75966 char *name;
75967@@ -37,6 +37,9 @@ struct xattr {
75968 };
75969
75970 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
75971+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
75972+ssize_t pax_getxattr(struct dentry *, void *, size_t);
75973+#endif
75974 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
75975 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
75976 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
75977diff --git a/include/linux/zlib.h b/include/linux/zlib.h
75978index 9c5a6b4..09c9438 100644
75979--- a/include/linux/zlib.h
75980+++ b/include/linux/zlib.h
75981@@ -31,6 +31,7 @@
75982 #define _ZLIB_H
75983
75984 #include <linux/zconf.h>
75985+#include <linux/compiler.h>
75986
75987 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
75988 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
75989@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
75990
75991 /* basic functions */
75992
75993-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
75994+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
75995 /*
75996 Returns the number of bytes that needs to be allocated for a per-
75997 stream workspace with the specified parameters. A pointer to this
75998diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
75999index 95d1c91..6798cca 100644
76000--- a/include/media/v4l2-dev.h
76001+++ b/include/media/v4l2-dev.h
76002@@ -76,7 +76,7 @@ struct v4l2_file_operations {
76003 int (*mmap) (struct file *, struct vm_area_struct *);
76004 int (*open) (struct file *);
76005 int (*release) (struct file *);
76006-};
76007+} __do_const;
76008
76009 /*
76010 * Newer version of video_device, handled by videodev2.c
76011diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
76012index adcbb20..62c2559 100644
76013--- a/include/net/9p/transport.h
76014+++ b/include/net/9p/transport.h
76015@@ -57,7 +57,7 @@ struct p9_trans_module {
76016 int (*cancel) (struct p9_client *, struct p9_req_t *req);
76017 int (*zc_request)(struct p9_client *, struct p9_req_t *,
76018 char *, char *, int , int, int, int);
76019-};
76020+} __do_const;
76021
76022 void v9fs_register_trans(struct p9_trans_module *m);
76023 void v9fs_unregister_trans(struct p9_trans_module *m);
76024diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
76025index fb94cf1..7c0c987 100644
76026--- a/include/net/bluetooth/l2cap.h
76027+++ b/include/net/bluetooth/l2cap.h
76028@@ -551,7 +551,7 @@ struct l2cap_ops {
76029 void (*defer) (struct l2cap_chan *chan);
76030 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
76031 unsigned long len, int nb);
76032-};
76033+} __do_const;
76034
76035 struct l2cap_conn {
76036 struct hci_conn *hcon;
76037diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
76038index f2ae33d..c457cf0 100644
76039--- a/include/net/caif/cfctrl.h
76040+++ b/include/net/caif/cfctrl.h
76041@@ -52,7 +52,7 @@ struct cfctrl_rsp {
76042 void (*radioset_rsp)(void);
76043 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
76044 struct cflayer *client_layer);
76045-};
76046+} __no_const;
76047
76048 /* Link Setup Parameters for CAIF-Links. */
76049 struct cfctrl_link_param {
76050@@ -101,8 +101,8 @@ struct cfctrl_request_info {
76051 struct cfctrl {
76052 struct cfsrvl serv;
76053 struct cfctrl_rsp res;
76054- atomic_t req_seq_no;
76055- atomic_t rsp_seq_no;
76056+ atomic_unchecked_t req_seq_no;
76057+ atomic_unchecked_t rsp_seq_no;
76058 struct list_head list;
76059 /* Protects from simultaneous access to first_req list */
76060 spinlock_t info_list_lock;
76061diff --git a/include/net/flow.h b/include/net/flow.h
76062index 628e11b..4c475df 100644
76063--- a/include/net/flow.h
76064+++ b/include/net/flow.h
76065@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
76066
76067 extern void flow_cache_flush(void);
76068 extern void flow_cache_flush_deferred(void);
76069-extern atomic_t flow_cache_genid;
76070+extern atomic_unchecked_t flow_cache_genid;
76071
76072 #endif
76073diff --git a/include/net/genetlink.h b/include/net/genetlink.h
76074index 93024a4..eeb6b6e 100644
76075--- a/include/net/genetlink.h
76076+++ b/include/net/genetlink.h
76077@@ -119,7 +119,7 @@ struct genl_ops {
76078 struct netlink_callback *cb);
76079 int (*done)(struct netlink_callback *cb);
76080 struct list_head ops_list;
76081-};
76082+} __do_const;
76083
76084 extern int genl_register_family(struct genl_family *family);
76085 extern int genl_register_family_with_ops(struct genl_family *family,
76086diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
76087index 734d9b5..48a9a4b 100644
76088--- a/include/net/gro_cells.h
76089+++ b/include/net/gro_cells.h
76090@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
76091 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
76092
76093 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
76094- atomic_long_inc(&dev->rx_dropped);
76095+ atomic_long_inc_unchecked(&dev->rx_dropped);
76096 kfree_skb(skb);
76097 return;
76098 }
76099diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
76100index de2c785..0588a6b 100644
76101--- a/include/net/inet_connection_sock.h
76102+++ b/include/net/inet_connection_sock.h
76103@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
76104 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
76105 int (*bind_conflict)(const struct sock *sk,
76106 const struct inet_bind_bucket *tb, bool relax);
76107-};
76108+} __do_const;
76109
76110 /** inet_connection_sock - INET connection oriented sock
76111 *
76112diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
76113index 53f464d..ba76aaa 100644
76114--- a/include/net/inetpeer.h
76115+++ b/include/net/inetpeer.h
76116@@ -47,8 +47,8 @@ struct inet_peer {
76117 */
76118 union {
76119 struct {
76120- atomic_t rid; /* Frag reception counter */
76121- atomic_t ip_id_count; /* IP ID for the next packet */
76122+ atomic_unchecked_t rid; /* Frag reception counter */
76123+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
76124 };
76125 struct rcu_head rcu;
76126 struct inet_peer *gc_next;
76127@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
76128 more++;
76129 inet_peer_refcheck(p);
76130 do {
76131- old = atomic_read(&p->ip_id_count);
76132+ old = atomic_read_unchecked(&p->ip_id_count);
76133 new = old + more;
76134 if (!new)
76135 new = 1;
76136- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
76137+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
76138 return new;
76139 }
76140
76141diff --git a/include/net/ip.h b/include/net/ip.h
76142index a68f838..74518ab 100644
76143--- a/include/net/ip.h
76144+++ b/include/net/ip.h
76145@@ -202,7 +202,7 @@ extern struct local_ports {
76146 } sysctl_local_ports;
76147 extern void inet_get_local_port_range(int *low, int *high);
76148
76149-extern unsigned long *sysctl_local_reserved_ports;
76150+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
76151 static inline int inet_is_reserved_local_port(int port)
76152 {
76153 return test_bit(port, sysctl_local_reserved_ports);
76154diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
76155index e49db91..76a81de 100644
76156--- a/include/net/ip_fib.h
76157+++ b/include/net/ip_fib.h
76158@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
76159
76160 #define FIB_RES_SADDR(net, res) \
76161 ((FIB_RES_NH(res).nh_saddr_genid == \
76162- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
76163+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
76164 FIB_RES_NH(res).nh_saddr : \
76165 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
76166 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
76167diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
76168index 4c062cc..3562c31 100644
76169--- a/include/net/ip_vs.h
76170+++ b/include/net/ip_vs.h
76171@@ -612,7 +612,7 @@ struct ip_vs_conn {
76172 struct ip_vs_conn *control; /* Master control connection */
76173 atomic_t n_control; /* Number of controlled ones */
76174 struct ip_vs_dest *dest; /* real server */
76175- atomic_t in_pkts; /* incoming packet counter */
76176+ atomic_unchecked_t in_pkts; /* incoming packet counter */
76177
76178 /* packet transmitter for different forwarding methods. If it
76179 mangles the packet, it must return NF_DROP or better NF_STOLEN,
76180@@ -761,7 +761,7 @@ struct ip_vs_dest {
76181 __be16 port; /* port number of the server */
76182 union nf_inet_addr addr; /* IP address of the server */
76183 volatile unsigned int flags; /* dest status flags */
76184- atomic_t conn_flags; /* flags to copy to conn */
76185+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
76186 atomic_t weight; /* server weight */
76187
76188 atomic_t refcnt; /* reference counter */
76189@@ -1013,11 +1013,11 @@ struct netns_ipvs {
76190 /* ip_vs_lblc */
76191 int sysctl_lblc_expiration;
76192 struct ctl_table_header *lblc_ctl_header;
76193- struct ctl_table *lblc_ctl_table;
76194+ ctl_table_no_const *lblc_ctl_table;
76195 /* ip_vs_lblcr */
76196 int sysctl_lblcr_expiration;
76197 struct ctl_table_header *lblcr_ctl_header;
76198- struct ctl_table *lblcr_ctl_table;
76199+ ctl_table_no_const *lblcr_ctl_table;
76200 /* ip_vs_est */
76201 struct list_head est_list; /* estimator list */
76202 spinlock_t est_lock;
76203diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
76204index 80ffde3..968b0f4 100644
76205--- a/include/net/irda/ircomm_tty.h
76206+++ b/include/net/irda/ircomm_tty.h
76207@@ -35,6 +35,7 @@
76208 #include <linux/termios.h>
76209 #include <linux/timer.h>
76210 #include <linux/tty.h> /* struct tty_struct */
76211+#include <asm/local.h>
76212
76213 #include <net/irda/irias_object.h>
76214 #include <net/irda/ircomm_core.h>
76215diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
76216index 714cc9a..ea05f3e 100644
76217--- a/include/net/iucv/af_iucv.h
76218+++ b/include/net/iucv/af_iucv.h
76219@@ -149,7 +149,7 @@ struct iucv_skb_cb {
76220 struct iucv_sock_list {
76221 struct hlist_head head;
76222 rwlock_t lock;
76223- atomic_t autobind_name;
76224+ atomic_unchecked_t autobind_name;
76225 };
76226
76227 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
76228diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
76229index df83f69..9b640b8 100644
76230--- a/include/net/llc_c_ac.h
76231+++ b/include/net/llc_c_ac.h
76232@@ -87,7 +87,7 @@
76233 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
76234 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
76235
76236-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
76237+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
76238
76239 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
76240 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
76241diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
76242index 6ca3113..f8026dd 100644
76243--- a/include/net/llc_c_ev.h
76244+++ b/include/net/llc_c_ev.h
76245@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
76246 return (struct llc_conn_state_ev *)skb->cb;
76247 }
76248
76249-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
76250-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
76251+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
76252+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
76253
76254 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
76255 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
76256diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
76257index 0e79cfb..f46db31 100644
76258--- a/include/net/llc_c_st.h
76259+++ b/include/net/llc_c_st.h
76260@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
76261 u8 next_state;
76262 llc_conn_ev_qfyr_t *ev_qualifiers;
76263 llc_conn_action_t *ev_actions;
76264-};
76265+} __do_const;
76266
76267 struct llc_conn_state {
76268 u8 current_state;
76269diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
76270index 37a3bbd..55a4241 100644
76271--- a/include/net/llc_s_ac.h
76272+++ b/include/net/llc_s_ac.h
76273@@ -23,7 +23,7 @@
76274 #define SAP_ACT_TEST_IND 9
76275
76276 /* All action functions must look like this */
76277-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
76278+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
76279
76280 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
76281 struct sk_buff *skb);
76282diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
76283index 567c681..cd73ac0 100644
76284--- a/include/net/llc_s_st.h
76285+++ b/include/net/llc_s_st.h
76286@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
76287 llc_sap_ev_t ev;
76288 u8 next_state;
76289 llc_sap_action_t *ev_actions;
76290-};
76291+} __do_const;
76292
76293 struct llc_sap_state {
76294 u8 curr_state;
76295diff --git a/include/net/mac80211.h b/include/net/mac80211.h
76296index 885898a..cdace34 100644
76297--- a/include/net/mac80211.h
76298+++ b/include/net/mac80211.h
76299@@ -4205,7 +4205,7 @@ struct rate_control_ops {
76300 void (*add_sta_debugfs)(void *priv, void *priv_sta,
76301 struct dentry *dir);
76302 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
76303-};
76304+} __do_const;
76305
76306 static inline int rate_supported(struct ieee80211_sta *sta,
76307 enum ieee80211_band band,
76308diff --git a/include/net/neighbour.h b/include/net/neighbour.h
76309index 7e748ad..5c6229b 100644
76310--- a/include/net/neighbour.h
76311+++ b/include/net/neighbour.h
76312@@ -123,7 +123,7 @@ struct neigh_ops {
76313 void (*error_report)(struct neighbour *, struct sk_buff *);
76314 int (*output)(struct neighbour *, struct sk_buff *);
76315 int (*connected_output)(struct neighbour *, struct sk_buff *);
76316-};
76317+} __do_const;
76318
76319 struct pneigh_entry {
76320 struct pneigh_entry *next;
76321diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
76322index b176978..ea169f4 100644
76323--- a/include/net/net_namespace.h
76324+++ b/include/net/net_namespace.h
76325@@ -117,7 +117,7 @@ struct net {
76326 #endif
76327 struct netns_ipvs *ipvs;
76328 struct sock *diag_nlsk;
76329- atomic_t rt_genid;
76330+ atomic_unchecked_t rt_genid;
76331 };
76332
76333 /*
76334@@ -274,7 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
76335 #define __net_init __init
76336 #define __net_exit __exit_refok
76337 #define __net_initdata __initdata
76338+#ifdef CONSTIFY_PLUGIN
76339 #define __net_initconst __initconst
76340+#else
76341+#define __net_initconst __initdata
76342+#endif
76343 #endif
76344
76345 struct pernet_operations {
76346@@ -284,7 +288,7 @@ struct pernet_operations {
76347 void (*exit_batch)(struct list_head *net_exit_list);
76348 int *id;
76349 size_t size;
76350-};
76351+} __do_const;
76352
76353 /*
76354 * Use these carefully. If you implement a network device and it
76355@@ -332,12 +336,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
76356
76357 static inline int rt_genid(struct net *net)
76358 {
76359- return atomic_read(&net->rt_genid);
76360+ return atomic_read_unchecked(&net->rt_genid);
76361 }
76362
76363 static inline void rt_genid_bump(struct net *net)
76364 {
76365- atomic_inc(&net->rt_genid);
76366+ atomic_inc_unchecked(&net->rt_genid);
76367 }
76368
76369 #endif /* __NET_NET_NAMESPACE_H */
76370diff --git a/include/net/netdma.h b/include/net/netdma.h
76371index 8ba8ce2..99b7fff 100644
76372--- a/include/net/netdma.h
76373+++ b/include/net/netdma.h
76374@@ -24,7 +24,7 @@
76375 #include <linux/dmaengine.h>
76376 #include <linux/skbuff.h>
76377
76378-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
76379+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
76380 struct sk_buff *skb, int offset, struct iovec *to,
76381 size_t len, struct dma_pinned_list *pinned_list);
76382
76383diff --git a/include/net/netlink.h b/include/net/netlink.h
76384index 9690b0f..87aded7 100644
76385--- a/include/net/netlink.h
76386+++ b/include/net/netlink.h
76387@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
76388 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
76389 {
76390 if (mark)
76391- skb_trim(skb, (unsigned char *) mark - skb->data);
76392+ skb_trim(skb, (const unsigned char *) mark - skb->data);
76393 }
76394
76395 /**
76396diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
76397index c9c0c53..53f24c3 100644
76398--- a/include/net/netns/conntrack.h
76399+++ b/include/net/netns/conntrack.h
76400@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
76401 struct nf_proto_net {
76402 #ifdef CONFIG_SYSCTL
76403 struct ctl_table_header *ctl_table_header;
76404- struct ctl_table *ctl_table;
76405+ ctl_table_no_const *ctl_table;
76406 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
76407 struct ctl_table_header *ctl_compat_header;
76408- struct ctl_table *ctl_compat_table;
76409+ ctl_table_no_const *ctl_compat_table;
76410 #endif
76411 #endif
76412 unsigned int users;
76413@@ -58,7 +58,7 @@ struct nf_ip_net {
76414 struct nf_icmp_net icmpv6;
76415 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
76416 struct ctl_table_header *ctl_table_header;
76417- struct ctl_table *ctl_table;
76418+ ctl_table_no_const *ctl_table;
76419 #endif
76420 };
76421
76422diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
76423index 2ba9de8..47bd6c7 100644
76424--- a/include/net/netns/ipv4.h
76425+++ b/include/net/netns/ipv4.h
76426@@ -67,7 +67,7 @@ struct netns_ipv4 {
76427 kgid_t sysctl_ping_group_range[2];
76428 long sysctl_tcp_mem[3];
76429
76430- atomic_t dev_addr_genid;
76431+ atomic_unchecked_t dev_addr_genid;
76432
76433 #ifdef CONFIG_IP_MROUTE
76434 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
76435diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
76436index 005e2c2..023d340 100644
76437--- a/include/net/netns/ipv6.h
76438+++ b/include/net/netns/ipv6.h
76439@@ -71,7 +71,7 @@ struct netns_ipv6 {
76440 struct fib_rules_ops *mr6_rules_ops;
76441 #endif
76442 #endif
76443- atomic_t dev_addr_genid;
76444+ atomic_unchecked_t dev_addr_genid;
76445 };
76446
76447 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
76448diff --git a/include/net/protocol.h b/include/net/protocol.h
76449index 047c047..b9dad15 100644
76450--- a/include/net/protocol.h
76451+++ b/include/net/protocol.h
76452@@ -44,7 +44,7 @@ struct net_protocol {
76453 void (*err_handler)(struct sk_buff *skb, u32 info);
76454 unsigned int no_policy:1,
76455 netns_ok:1;
76456-};
76457+} __do_const;
76458
76459 #if IS_ENABLED(CONFIG_IPV6)
76460 struct inet6_protocol {
76461@@ -57,7 +57,7 @@ struct inet6_protocol {
76462 u8 type, u8 code, int offset,
76463 __be32 info);
76464 unsigned int flags; /* INET6_PROTO_xxx */
76465-};
76466+} __do_const;
76467
76468 #define INET6_PROTO_NOPOLICY 0x1
76469 #define INET6_PROTO_FINAL 0x2
76470diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
76471index 7026648..584cc8c 100644
76472--- a/include/net/rtnetlink.h
76473+++ b/include/net/rtnetlink.h
76474@@ -81,7 +81,7 @@ struct rtnl_link_ops {
76475 const struct net_device *dev);
76476 unsigned int (*get_num_tx_queues)(void);
76477 unsigned int (*get_num_rx_queues)(void);
76478-};
76479+} __do_const;
76480
76481 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
76482 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
76483diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
76484index cd89510..d67810f 100644
76485--- a/include/net/sctp/sctp.h
76486+++ b/include/net/sctp/sctp.h
76487@@ -330,9 +330,9 @@ do { \
76488
76489 #else /* SCTP_DEBUG */
76490
76491-#define SCTP_DEBUG_PRINTK(whatever...)
76492-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
76493-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
76494+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
76495+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
76496+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
76497 #define SCTP_ENABLE_DEBUG
76498 #define SCTP_DISABLE_DEBUG
76499 #define SCTP_ASSERT(expr, str, func)
76500diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
76501index 2a82d13..62a31c2 100644
76502--- a/include/net/sctp/sm.h
76503+++ b/include/net/sctp/sm.h
76504@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
76505 typedef struct {
76506 sctp_state_fn_t *fn;
76507 const char *name;
76508-} sctp_sm_table_entry_t;
76509+} __do_const sctp_sm_table_entry_t;
76510
76511 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
76512 * currently in use.
76513@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
76514 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
76515
76516 /* Extern declarations for major data structures. */
76517-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
76518+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
76519
76520
76521 /* Get the size of a DATA chunk payload. */
76522diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
76523index 1bd4c41..9250b5b 100644
76524--- a/include/net/sctp/structs.h
76525+++ b/include/net/sctp/structs.h
76526@@ -516,7 +516,7 @@ struct sctp_pf {
76527 struct sctp_association *asoc);
76528 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
76529 struct sctp_af *af;
76530-};
76531+} __do_const;
76532
76533
76534 /* Structure to track chunk fragments that have been acked, but peer
76535diff --git a/include/net/sock.h b/include/net/sock.h
76536index 66772cf..25bc45b 100644
76537--- a/include/net/sock.h
76538+++ b/include/net/sock.h
76539@@ -325,7 +325,7 @@ struct sock {
76540 #ifdef CONFIG_RPS
76541 __u32 sk_rxhash;
76542 #endif
76543- atomic_t sk_drops;
76544+ atomic_unchecked_t sk_drops;
76545 int sk_rcvbuf;
76546
76547 struct sk_filter __rcu *sk_filter;
76548@@ -1797,7 +1797,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
76549 }
76550
76551 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
76552- char __user *from, char *to,
76553+ char __user *from, unsigned char *to,
76554 int copy, int offset)
76555 {
76556 if (skb->ip_summed == CHECKSUM_NONE) {
76557@@ -2056,7 +2056,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
76558 }
76559 }
76560
76561-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
76562+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
76563
76564 /**
76565 * sk_page_frag - return an appropriate page_frag
76566diff --git a/include/net/tcp.h b/include/net/tcp.h
76567index 5bba80f..8520a82 100644
76568--- a/include/net/tcp.h
76569+++ b/include/net/tcp.h
76570@@ -524,7 +524,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
76571 extern void tcp_xmit_retransmit_queue(struct sock *);
76572 extern void tcp_simple_retransmit(struct sock *);
76573 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
76574-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
76575+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
76576
76577 extern void tcp_send_probe0(struct sock *);
76578 extern void tcp_send_partial(struct sock *);
76579@@ -697,8 +697,8 @@ struct tcp_skb_cb {
76580 struct inet6_skb_parm h6;
76581 #endif
76582 } header; /* For incoming frames */
76583- __u32 seq; /* Starting sequence number */
76584- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
76585+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
76586+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
76587 __u32 when; /* used to compute rtt's */
76588 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
76589
76590@@ -712,7 +712,7 @@ struct tcp_skb_cb {
76591
76592 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
76593 /* 1 byte hole */
76594- __u32 ack_seq; /* Sequence number ACK'd */
76595+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
76596 };
76597
76598 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
76599diff --git a/include/net/xfrm.h b/include/net/xfrm.h
76600index 94ce082..62b278d 100644
76601--- a/include/net/xfrm.h
76602+++ b/include/net/xfrm.h
76603@@ -305,7 +305,7 @@ struct xfrm_policy_afinfo {
76604 struct net_device *dev,
76605 const struct flowi *fl);
76606 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
76607-};
76608+} __do_const;
76609
76610 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
76611 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
76612@@ -341,7 +341,7 @@ struct xfrm_state_afinfo {
76613 struct sk_buff *skb);
76614 int (*transport_finish)(struct sk_buff *skb,
76615 int async);
76616-};
76617+} __do_const;
76618
76619 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
76620 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
76621@@ -424,7 +424,7 @@ struct xfrm_mode {
76622 struct module *owner;
76623 unsigned int encap;
76624 int flags;
76625-};
76626+} __do_const;
76627
76628 /* Flags for xfrm_mode. */
76629 enum {
76630@@ -521,7 +521,7 @@ struct xfrm_policy {
76631 struct timer_list timer;
76632
76633 struct flow_cache_object flo;
76634- atomic_t genid;
76635+ atomic_unchecked_t genid;
76636 u32 priority;
76637 u32 index;
76638 struct xfrm_mark mark;
76639diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
76640index 1a046b1..ee0bef0 100644
76641--- a/include/rdma/iw_cm.h
76642+++ b/include/rdma/iw_cm.h
76643@@ -122,7 +122,7 @@ struct iw_cm_verbs {
76644 int backlog);
76645
76646 int (*destroy_listen)(struct iw_cm_id *cm_id);
76647-};
76648+} __no_const;
76649
76650 /**
76651 * iw_create_cm_id - Create an IW CM identifier.
76652diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
76653index e1379b4..67eafbe 100644
76654--- a/include/scsi/libfc.h
76655+++ b/include/scsi/libfc.h
76656@@ -762,6 +762,7 @@ struct libfc_function_template {
76657 */
76658 void (*disc_stop_final) (struct fc_lport *);
76659 };
76660+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
76661
76662 /**
76663 * struct fc_disc - Discovery context
76664@@ -866,7 +867,7 @@ struct fc_lport {
76665 struct fc_vport *vport;
76666
76667 /* Operational Information */
76668- struct libfc_function_template tt;
76669+ libfc_function_template_no_const tt;
76670 u8 link_up;
76671 u8 qfull;
76672 enum fc_lport_state state;
76673diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
76674index cc64587..608f523 100644
76675--- a/include/scsi/scsi_device.h
76676+++ b/include/scsi/scsi_device.h
76677@@ -171,9 +171,9 @@ struct scsi_device {
76678 unsigned int max_device_blocked; /* what device_blocked counts down from */
76679 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
76680
76681- atomic_t iorequest_cnt;
76682- atomic_t iodone_cnt;
76683- atomic_t ioerr_cnt;
76684+ atomic_unchecked_t iorequest_cnt;
76685+ atomic_unchecked_t iodone_cnt;
76686+ atomic_unchecked_t ioerr_cnt;
76687
76688 struct device sdev_gendev,
76689 sdev_dev;
76690diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
76691index b797e8f..8e2c3aa 100644
76692--- a/include/scsi/scsi_transport_fc.h
76693+++ b/include/scsi/scsi_transport_fc.h
76694@@ -751,7 +751,8 @@ struct fc_function_template {
76695 unsigned long show_host_system_hostname:1;
76696
76697 unsigned long disable_target_scan:1;
76698-};
76699+} __do_const;
76700+typedef struct fc_function_template __no_const fc_function_template_no_const;
76701
76702
76703 /**
76704diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
76705index 9031a26..750d592 100644
76706--- a/include/sound/compress_driver.h
76707+++ b/include/sound/compress_driver.h
76708@@ -128,7 +128,7 @@ struct snd_compr_ops {
76709 struct snd_compr_caps *caps);
76710 int (*get_codec_caps) (struct snd_compr_stream *stream,
76711 struct snd_compr_codec_caps *codec);
76712-};
76713+} __no_const;
76714
76715 /**
76716 * struct snd_compr: Compressed device
76717diff --git a/include/sound/soc.h b/include/sound/soc.h
76718index 85c1522..f44bad1 100644
76719--- a/include/sound/soc.h
76720+++ b/include/sound/soc.h
76721@@ -781,7 +781,7 @@ struct snd_soc_codec_driver {
76722 /* probe ordering - for components with runtime dependencies */
76723 int probe_order;
76724 int remove_order;
76725-};
76726+} __do_const;
76727
76728 /* SoC platform interface */
76729 struct snd_soc_platform_driver {
76730@@ -827,7 +827,7 @@ struct snd_soc_platform_driver {
76731 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
76732 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
76733 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
76734-};
76735+} __do_const;
76736
76737 struct snd_soc_platform {
76738 const char *name;
76739diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
76740index 4ea4f98..a63629b 100644
76741--- a/include/target/target_core_base.h
76742+++ b/include/target/target_core_base.h
76743@@ -653,7 +653,7 @@ struct se_device {
76744 spinlock_t stats_lock;
76745 /* Active commands on this virtual SE device */
76746 atomic_t simple_cmds;
76747- atomic_t dev_ordered_id;
76748+ atomic_unchecked_t dev_ordered_id;
76749 atomic_t dev_ordered_sync;
76750 atomic_t dev_qf_count;
76751 int export_count;
76752diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
76753new file mode 100644
76754index 0000000..fb634b7
76755--- /dev/null
76756+++ b/include/trace/events/fs.h
76757@@ -0,0 +1,53 @@
76758+#undef TRACE_SYSTEM
76759+#define TRACE_SYSTEM fs
76760+
76761+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
76762+#define _TRACE_FS_H
76763+
76764+#include <linux/fs.h>
76765+#include <linux/tracepoint.h>
76766+
76767+TRACE_EVENT(do_sys_open,
76768+
76769+ TP_PROTO(const char *filename, int flags, int mode),
76770+
76771+ TP_ARGS(filename, flags, mode),
76772+
76773+ TP_STRUCT__entry(
76774+ __string( filename, filename )
76775+ __field( int, flags )
76776+ __field( int, mode )
76777+ ),
76778+
76779+ TP_fast_assign(
76780+ __assign_str(filename, filename);
76781+ __entry->flags = flags;
76782+ __entry->mode = mode;
76783+ ),
76784+
76785+ TP_printk("\"%s\" %x %o",
76786+ __get_str(filename), __entry->flags, __entry->mode)
76787+);
76788+
76789+TRACE_EVENT(open_exec,
76790+
76791+ TP_PROTO(const char *filename),
76792+
76793+ TP_ARGS(filename),
76794+
76795+ TP_STRUCT__entry(
76796+ __string( filename, filename )
76797+ ),
76798+
76799+ TP_fast_assign(
76800+ __assign_str(filename, filename);
76801+ ),
76802+
76803+ TP_printk("\"%s\"",
76804+ __get_str(filename))
76805+);
76806+
76807+#endif /* _TRACE_FS_H */
76808+
76809+/* This part must be outside protection */
76810+#include <trace/define_trace.h>
76811diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
76812index 1c09820..7f5ec79 100644
76813--- a/include/trace/events/irq.h
76814+++ b/include/trace/events/irq.h
76815@@ -36,7 +36,7 @@ struct softirq_action;
76816 */
76817 TRACE_EVENT(irq_handler_entry,
76818
76819- TP_PROTO(int irq, struct irqaction *action),
76820+ TP_PROTO(int irq, const struct irqaction *action),
76821
76822 TP_ARGS(irq, action),
76823
76824@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
76825 */
76826 TRACE_EVENT(irq_handler_exit,
76827
76828- TP_PROTO(int irq, struct irqaction *action, int ret),
76829+ TP_PROTO(int irq, const struct irqaction *action, int ret),
76830
76831 TP_ARGS(irq, action, ret),
76832
76833diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
76834index 7caf44c..23c6f27 100644
76835--- a/include/uapi/linux/a.out.h
76836+++ b/include/uapi/linux/a.out.h
76837@@ -39,6 +39,14 @@ enum machine_type {
76838 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
76839 };
76840
76841+/* Constants for the N_FLAGS field */
76842+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
76843+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
76844+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
76845+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
76846+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
76847+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
76848+
76849 #if !defined (N_MAGIC)
76850 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
76851 #endif
76852diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
76853index d876736..ccce5c0 100644
76854--- a/include/uapi/linux/byteorder/little_endian.h
76855+++ b/include/uapi/linux/byteorder/little_endian.h
76856@@ -42,51 +42,51 @@
76857
76858 static inline __le64 __cpu_to_le64p(const __u64 *p)
76859 {
76860- return (__force __le64)*p;
76861+ return (__force const __le64)*p;
76862 }
76863-static inline __u64 __le64_to_cpup(const __le64 *p)
76864+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
76865 {
76866- return (__force __u64)*p;
76867+ return (__force const __u64)*p;
76868 }
76869 static inline __le32 __cpu_to_le32p(const __u32 *p)
76870 {
76871- return (__force __le32)*p;
76872+ return (__force const __le32)*p;
76873 }
76874 static inline __u32 __le32_to_cpup(const __le32 *p)
76875 {
76876- return (__force __u32)*p;
76877+ return (__force const __u32)*p;
76878 }
76879 static inline __le16 __cpu_to_le16p(const __u16 *p)
76880 {
76881- return (__force __le16)*p;
76882+ return (__force const __le16)*p;
76883 }
76884 static inline __u16 __le16_to_cpup(const __le16 *p)
76885 {
76886- return (__force __u16)*p;
76887+ return (__force const __u16)*p;
76888 }
76889 static inline __be64 __cpu_to_be64p(const __u64 *p)
76890 {
76891- return (__force __be64)__swab64p(p);
76892+ return (__force const __be64)__swab64p(p);
76893 }
76894 static inline __u64 __be64_to_cpup(const __be64 *p)
76895 {
76896- return __swab64p((__u64 *)p);
76897+ return __swab64p((const __u64 *)p);
76898 }
76899 static inline __be32 __cpu_to_be32p(const __u32 *p)
76900 {
76901- return (__force __be32)__swab32p(p);
76902+ return (__force const __be32)__swab32p(p);
76903 }
76904-static inline __u32 __be32_to_cpup(const __be32 *p)
76905+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
76906 {
76907- return __swab32p((__u32 *)p);
76908+ return __swab32p((const __u32 *)p);
76909 }
76910 static inline __be16 __cpu_to_be16p(const __u16 *p)
76911 {
76912- return (__force __be16)__swab16p(p);
76913+ return (__force const __be16)__swab16p(p);
76914 }
76915 static inline __u16 __be16_to_cpup(const __be16 *p)
76916 {
76917- return __swab16p((__u16 *)p);
76918+ return __swab16p((const __u16 *)p);
76919 }
76920 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
76921 #define __le64_to_cpus(x) do { (void)(x); } while (0)
76922diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
76923index ef6103b..d4e65dd 100644
76924--- a/include/uapi/linux/elf.h
76925+++ b/include/uapi/linux/elf.h
76926@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
76927 #define PT_GNU_EH_FRAME 0x6474e550
76928
76929 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
76930+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
76931+
76932+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
76933+
76934+/* Constants for the e_flags field */
76935+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
76936+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
76937+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
76938+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
76939+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
76940+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
76941
76942 /*
76943 * Extended Numbering
76944@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
76945 #define DT_DEBUG 21
76946 #define DT_TEXTREL 22
76947 #define DT_JMPREL 23
76948+#define DT_FLAGS 30
76949+ #define DF_TEXTREL 0x00000004
76950 #define DT_ENCODING 32
76951 #define OLD_DT_LOOS 0x60000000
76952 #define DT_LOOS 0x6000000d
76953@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
76954 #define PF_W 0x2
76955 #define PF_X 0x1
76956
76957+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
76958+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
76959+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
76960+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
76961+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
76962+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
76963+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
76964+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
76965+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
76966+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
76967+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
76968+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
76969+
76970 typedef struct elf32_phdr{
76971 Elf32_Word p_type;
76972 Elf32_Off p_offset;
76973@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
76974 #define EI_OSABI 7
76975 #define EI_PAD 8
76976
76977+#define EI_PAX 14
76978+
76979 #define ELFMAG0 0x7f /* EI_MAG */
76980 #define ELFMAG1 'E'
76981 #define ELFMAG2 'L'
76982diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
76983index aa169c4..6a2771d 100644
76984--- a/include/uapi/linux/personality.h
76985+++ b/include/uapi/linux/personality.h
76986@@ -30,6 +30,7 @@ enum {
76987 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
76988 ADDR_NO_RANDOMIZE | \
76989 ADDR_COMPAT_LAYOUT | \
76990+ ADDR_LIMIT_3GB | \
76991 MMAP_PAGE_ZERO)
76992
76993 /*
76994diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
76995index 7530e74..e714828 100644
76996--- a/include/uapi/linux/screen_info.h
76997+++ b/include/uapi/linux/screen_info.h
76998@@ -43,7 +43,8 @@ struct screen_info {
76999 __u16 pages; /* 0x32 */
77000 __u16 vesa_attributes; /* 0x34 */
77001 __u32 capabilities; /* 0x36 */
77002- __u8 _reserved[6]; /* 0x3a */
77003+ __u16 vesapm_size; /* 0x3a */
77004+ __u8 _reserved[4]; /* 0x3c */
77005 } __attribute__((packed));
77006
77007 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
77008diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
77009index 0e011eb..82681b1 100644
77010--- a/include/uapi/linux/swab.h
77011+++ b/include/uapi/linux/swab.h
77012@@ -43,7 +43,7 @@
77013 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
77014 */
77015
77016-static inline __attribute_const__ __u16 __fswab16(__u16 val)
77017+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
77018 {
77019 #ifdef __HAVE_BUILTIN_BSWAP16__
77020 return __builtin_bswap16(val);
77021@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
77022 #endif
77023 }
77024
77025-static inline __attribute_const__ __u32 __fswab32(__u32 val)
77026+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
77027 {
77028 #ifdef __HAVE_BUILTIN_BSWAP32__
77029 return __builtin_bswap32(val);
77030@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
77031 #endif
77032 }
77033
77034-static inline __attribute_const__ __u64 __fswab64(__u64 val)
77035+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
77036 {
77037 #ifdef __HAVE_BUILTIN_BSWAP64__
77038 return __builtin_bswap64(val);
77039diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
77040index 6d67213..8dab561 100644
77041--- a/include/uapi/linux/sysctl.h
77042+++ b/include/uapi/linux/sysctl.h
77043@@ -155,7 +155,11 @@ enum
77044 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
77045 };
77046
77047-
77048+#ifdef CONFIG_PAX_SOFTMODE
77049+enum {
77050+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
77051+};
77052+#endif
77053
77054 /* CTL_VM names: */
77055 enum
77056diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
77057index e4629b9..6958086 100644
77058--- a/include/uapi/linux/xattr.h
77059+++ b/include/uapi/linux/xattr.h
77060@@ -63,5 +63,9 @@
77061 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
77062 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
77063
77064+/* User namespace */
77065+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
77066+#define XATTR_PAX_FLAGS_SUFFIX "flags"
77067+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
77068
77069 #endif /* _UAPI_LINUX_XATTR_H */
77070diff --git a/include/video/udlfb.h b/include/video/udlfb.h
77071index f9466fa..f4e2b81 100644
77072--- a/include/video/udlfb.h
77073+++ b/include/video/udlfb.h
77074@@ -53,10 +53,10 @@ struct dlfb_data {
77075 u32 pseudo_palette[256];
77076 int blank_mode; /*one of FB_BLANK_ */
77077 /* blit-only rendering path metrics, exposed through sysfs */
77078- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
77079- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
77080- atomic_t bytes_sent; /* to usb, after compression including overhead */
77081- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
77082+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
77083+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
77084+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
77085+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
77086 };
77087
77088 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
77089diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
77090index 1a91850..28573f8 100644
77091--- a/include/video/uvesafb.h
77092+++ b/include/video/uvesafb.h
77093@@ -122,6 +122,7 @@ struct uvesafb_par {
77094 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
77095 u8 pmi_setpal; /* PMI for palette changes */
77096 u16 *pmi_base; /* protected mode interface location */
77097+ u8 *pmi_code; /* protected mode code location */
77098 void *pmi_start;
77099 void *pmi_pal;
77100 u8 *vbe_state_orig; /*
77101diff --git a/init/Kconfig b/init/Kconfig
77102index 2d9b831..ae4c8ac 100644
77103--- a/init/Kconfig
77104+++ b/init/Kconfig
77105@@ -1029,6 +1029,7 @@ endif # CGROUPS
77106
77107 config CHECKPOINT_RESTORE
77108 bool "Checkpoint/restore support" if EXPERT
77109+ depends on !GRKERNSEC
77110 default n
77111 help
77112 Enables additional kernel features in a sake of checkpoint/restore.
77113@@ -1516,7 +1517,7 @@ config SLUB_DEBUG
77114
77115 config COMPAT_BRK
77116 bool "Disable heap randomization"
77117- default y
77118+ default n
77119 help
77120 Randomizing heap placement makes heap exploits harder, but it
77121 also breaks ancient binaries (including anything libc5 based).
77122@@ -1779,7 +1780,7 @@ config INIT_ALL_POSSIBLE
77123 config STOP_MACHINE
77124 bool
77125 default y
77126- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
77127+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
77128 help
77129 Need stop_machine() primitive.
77130
77131diff --git a/init/Makefile b/init/Makefile
77132index 7bc47ee..6da2dc7 100644
77133--- a/init/Makefile
77134+++ b/init/Makefile
77135@@ -2,6 +2,9 @@
77136 # Makefile for the linux kernel.
77137 #
77138
77139+ccflags-y := $(GCC_PLUGINS_CFLAGS)
77140+asflags-y := $(GCC_PLUGINS_AFLAGS)
77141+
77142 obj-y := main.o version.o mounts.o
77143 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
77144 obj-y += noinitramfs.o
77145diff --git a/init/do_mounts.c b/init/do_mounts.c
77146index a2b49f2..03a0e17c 100644
77147--- a/init/do_mounts.c
77148+++ b/init/do_mounts.c
77149@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
77150 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
77151 {
77152 struct super_block *s;
77153- int err = sys_mount(name, "/root", fs, flags, data);
77154+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
77155 if (err)
77156 return err;
77157
77158- sys_chdir("/root");
77159+ sys_chdir((const char __force_user *)"/root");
77160 s = current->fs->pwd.dentry->d_sb;
77161 ROOT_DEV = s->s_dev;
77162 printk(KERN_INFO
77163@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
77164 va_start(args, fmt);
77165 vsprintf(buf, fmt, args);
77166 va_end(args);
77167- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
77168+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
77169 if (fd >= 0) {
77170 sys_ioctl(fd, FDEJECT, 0);
77171 sys_close(fd);
77172 }
77173 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
77174- fd = sys_open("/dev/console", O_RDWR, 0);
77175+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
77176 if (fd >= 0) {
77177 sys_ioctl(fd, TCGETS, (long)&termios);
77178 termios.c_lflag &= ~ICANON;
77179 sys_ioctl(fd, TCSETSF, (long)&termios);
77180- sys_read(fd, &c, 1);
77181+ sys_read(fd, (char __user *)&c, 1);
77182 termios.c_lflag |= ICANON;
77183 sys_ioctl(fd, TCSETSF, (long)&termios);
77184 sys_close(fd);
77185@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
77186 mount_root();
77187 out:
77188 devtmpfs_mount("dev");
77189- sys_mount(".", "/", NULL, MS_MOVE, NULL);
77190- sys_chroot(".");
77191+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
77192+ sys_chroot((const char __force_user *)".");
77193 }
77194diff --git a/init/do_mounts.h b/init/do_mounts.h
77195index f5b978a..69dbfe8 100644
77196--- a/init/do_mounts.h
77197+++ b/init/do_mounts.h
77198@@ -15,15 +15,15 @@ extern int root_mountflags;
77199
77200 static inline int create_dev(char *name, dev_t dev)
77201 {
77202- sys_unlink(name);
77203- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
77204+ sys_unlink((char __force_user *)name);
77205+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
77206 }
77207
77208 #if BITS_PER_LONG == 32
77209 static inline u32 bstat(char *name)
77210 {
77211 struct stat64 stat;
77212- if (sys_stat64(name, &stat) != 0)
77213+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
77214 return 0;
77215 if (!S_ISBLK(stat.st_mode))
77216 return 0;
77217@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
77218 static inline u32 bstat(char *name)
77219 {
77220 struct stat stat;
77221- if (sys_newstat(name, &stat) != 0)
77222+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
77223 return 0;
77224 if (!S_ISBLK(stat.st_mode))
77225 return 0;
77226diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
77227index 3e0878e..8a9d7a0 100644
77228--- a/init/do_mounts_initrd.c
77229+++ b/init/do_mounts_initrd.c
77230@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
77231 {
77232 sys_unshare(CLONE_FS | CLONE_FILES);
77233 /* stdin/stdout/stderr for /linuxrc */
77234- sys_open("/dev/console", O_RDWR, 0);
77235+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
77236 sys_dup(0);
77237 sys_dup(0);
77238 /* move initrd over / and chdir/chroot in initrd root */
77239- sys_chdir("/root");
77240- sys_mount(".", "/", NULL, MS_MOVE, NULL);
77241- sys_chroot(".");
77242+ sys_chdir((const char __force_user *)"/root");
77243+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
77244+ sys_chroot((const char __force_user *)".");
77245 sys_setsid();
77246 return 0;
77247 }
77248@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
77249 create_dev("/dev/root.old", Root_RAM0);
77250 /* mount initrd on rootfs' /root */
77251 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
77252- sys_mkdir("/old", 0700);
77253- sys_chdir("/old");
77254+ sys_mkdir((const char __force_user *)"/old", 0700);
77255+ sys_chdir((const char __force_user *)"/old");
77256
77257 /* try loading default modules from initrd */
77258 load_default_modules();
77259@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
77260 current->flags &= ~PF_FREEZER_SKIP;
77261
77262 /* move initrd to rootfs' /old */
77263- sys_mount("..", ".", NULL, MS_MOVE, NULL);
77264+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
77265 /* switch root and cwd back to / of rootfs */
77266- sys_chroot("..");
77267+ sys_chroot((const char __force_user *)"..");
77268
77269 if (new_decode_dev(real_root_dev) == Root_RAM0) {
77270- sys_chdir("/old");
77271+ sys_chdir((const char __force_user *)"/old");
77272 return;
77273 }
77274
77275- sys_chdir("/");
77276+ sys_chdir((const char __force_user *)"/");
77277 ROOT_DEV = new_decode_dev(real_root_dev);
77278 mount_root();
77279
77280 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
77281- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
77282+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
77283 if (!error)
77284 printk("okay\n");
77285 else {
77286- int fd = sys_open("/dev/root.old", O_RDWR, 0);
77287+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
77288 if (error == -ENOENT)
77289 printk("/initrd does not exist. Ignored.\n");
77290 else
77291 printk("failed\n");
77292 printk(KERN_NOTICE "Unmounting old root\n");
77293- sys_umount("/old", MNT_DETACH);
77294+ sys_umount((char __force_user *)"/old", MNT_DETACH);
77295 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
77296 if (fd < 0) {
77297 error = fd;
77298@@ -127,11 +127,11 @@ int __init initrd_load(void)
77299 * mounted in the normal path.
77300 */
77301 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
77302- sys_unlink("/initrd.image");
77303+ sys_unlink((const char __force_user *)"/initrd.image");
77304 handle_initrd();
77305 return 1;
77306 }
77307 }
77308- sys_unlink("/initrd.image");
77309+ sys_unlink((const char __force_user *)"/initrd.image");
77310 return 0;
77311 }
77312diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
77313index 8cb6db5..d729f50 100644
77314--- a/init/do_mounts_md.c
77315+++ b/init/do_mounts_md.c
77316@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
77317 partitioned ? "_d" : "", minor,
77318 md_setup_args[ent].device_names);
77319
77320- fd = sys_open(name, 0, 0);
77321+ fd = sys_open((char __force_user *)name, 0, 0);
77322 if (fd < 0) {
77323 printk(KERN_ERR "md: open failed - cannot start "
77324 "array %s\n", name);
77325@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
77326 * array without it
77327 */
77328 sys_close(fd);
77329- fd = sys_open(name, 0, 0);
77330+ fd = sys_open((char __force_user *)name, 0, 0);
77331 sys_ioctl(fd, BLKRRPART, 0);
77332 }
77333 sys_close(fd);
77334@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
77335
77336 wait_for_device_probe();
77337
77338- fd = sys_open("/dev/md0", 0, 0);
77339+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
77340 if (fd >= 0) {
77341 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
77342 sys_close(fd);
77343diff --git a/init/init_task.c b/init/init_task.c
77344index ba0a7f36..2bcf1d5 100644
77345--- a/init/init_task.c
77346+++ b/init/init_task.c
77347@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
77348 * Initial thread structure. Alignment of this is handled by a special
77349 * linker map entry.
77350 */
77351+#ifdef CONFIG_X86
77352+union thread_union init_thread_union __init_task_data;
77353+#else
77354 union thread_union init_thread_union __init_task_data =
77355 { INIT_THREAD_INFO(init_task) };
77356+#endif
77357diff --git a/init/initramfs.c b/init/initramfs.c
77358index a67ef9d..2d17ed9 100644
77359--- a/init/initramfs.c
77360+++ b/init/initramfs.c
77361@@ -84,7 +84,7 @@ static void __init free_hash(void)
77362 }
77363 }
77364
77365-static long __init do_utime(char *filename, time_t mtime)
77366+static long __init do_utime(char __force_user *filename, time_t mtime)
77367 {
77368 struct timespec t[2];
77369
77370@@ -119,7 +119,7 @@ static void __init dir_utime(void)
77371 struct dir_entry *de, *tmp;
77372 list_for_each_entry_safe(de, tmp, &dir_list, list) {
77373 list_del(&de->list);
77374- do_utime(de->name, de->mtime);
77375+ do_utime((char __force_user *)de->name, de->mtime);
77376 kfree(de->name);
77377 kfree(de);
77378 }
77379@@ -281,7 +281,7 @@ static int __init maybe_link(void)
77380 if (nlink >= 2) {
77381 char *old = find_link(major, minor, ino, mode, collected);
77382 if (old)
77383- return (sys_link(old, collected) < 0) ? -1 : 1;
77384+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
77385 }
77386 return 0;
77387 }
77388@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
77389 {
77390 struct stat st;
77391
77392- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
77393+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
77394 if (S_ISDIR(st.st_mode))
77395- sys_rmdir(path);
77396+ sys_rmdir((char __force_user *)path);
77397 else
77398- sys_unlink(path);
77399+ sys_unlink((char __force_user *)path);
77400 }
77401 }
77402
77403@@ -315,7 +315,7 @@ static int __init do_name(void)
77404 int openflags = O_WRONLY|O_CREAT;
77405 if (ml != 1)
77406 openflags |= O_TRUNC;
77407- wfd = sys_open(collected, openflags, mode);
77408+ wfd = sys_open((char __force_user *)collected, openflags, mode);
77409
77410 if (wfd >= 0) {
77411 sys_fchown(wfd, uid, gid);
77412@@ -327,17 +327,17 @@ static int __init do_name(void)
77413 }
77414 }
77415 } else if (S_ISDIR(mode)) {
77416- sys_mkdir(collected, mode);
77417- sys_chown(collected, uid, gid);
77418- sys_chmod(collected, mode);
77419+ sys_mkdir((char __force_user *)collected, mode);
77420+ sys_chown((char __force_user *)collected, uid, gid);
77421+ sys_chmod((char __force_user *)collected, mode);
77422 dir_add(collected, mtime);
77423 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
77424 S_ISFIFO(mode) || S_ISSOCK(mode)) {
77425 if (maybe_link() == 0) {
77426- sys_mknod(collected, mode, rdev);
77427- sys_chown(collected, uid, gid);
77428- sys_chmod(collected, mode);
77429- do_utime(collected, mtime);
77430+ sys_mknod((char __force_user *)collected, mode, rdev);
77431+ sys_chown((char __force_user *)collected, uid, gid);
77432+ sys_chmod((char __force_user *)collected, mode);
77433+ do_utime((char __force_user *)collected, mtime);
77434 }
77435 }
77436 return 0;
77437@@ -346,15 +346,15 @@ static int __init do_name(void)
77438 static int __init do_copy(void)
77439 {
77440 if (count >= body_len) {
77441- sys_write(wfd, victim, body_len);
77442+ sys_write(wfd, (char __force_user *)victim, body_len);
77443 sys_close(wfd);
77444- do_utime(vcollected, mtime);
77445+ do_utime((char __force_user *)vcollected, mtime);
77446 kfree(vcollected);
77447 eat(body_len);
77448 state = SkipIt;
77449 return 0;
77450 } else {
77451- sys_write(wfd, victim, count);
77452+ sys_write(wfd, (char __force_user *)victim, count);
77453 body_len -= count;
77454 eat(count);
77455 return 1;
77456@@ -365,9 +365,9 @@ static int __init do_symlink(void)
77457 {
77458 collected[N_ALIGN(name_len) + body_len] = '\0';
77459 clean_path(collected, 0);
77460- sys_symlink(collected + N_ALIGN(name_len), collected);
77461- sys_lchown(collected, uid, gid);
77462- do_utime(collected, mtime);
77463+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
77464+ sys_lchown((char __force_user *)collected, uid, gid);
77465+ do_utime((char __force_user *)collected, mtime);
77466 state = SkipIt;
77467 next_state = Reset;
77468 return 0;
77469@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
77470 {
77471 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
77472 if (err)
77473- panic(err); /* Failed to decompress INTERNAL initramfs */
77474+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
77475 if (initrd_start) {
77476 #ifdef CONFIG_BLK_DEV_RAM
77477 int fd;
77478diff --git a/init/main.c b/init/main.c
77479index 9484f4b..0eac7c3 100644
77480--- a/init/main.c
77481+++ b/init/main.c
77482@@ -100,6 +100,8 @@ static inline void mark_rodata_ro(void) { }
77483 extern void tc_init(void);
77484 #endif
77485
77486+extern void grsecurity_init(void);
77487+
77488 /*
77489 * Debug helper: via this flag we know that we are in 'early bootup code'
77490 * where only the boot processor is running with IRQ disabled. This means
77491@@ -153,6 +155,74 @@ static int __init set_reset_devices(char *str)
77492
77493 __setup("reset_devices", set_reset_devices);
77494
77495+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
77496+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
77497+static int __init setup_grsec_proc_gid(char *str)
77498+{
77499+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
77500+ return 1;
77501+}
77502+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
77503+#endif
77504+
77505+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
77506+unsigned long pax_user_shadow_base __read_only;
77507+EXPORT_SYMBOL(pax_user_shadow_base);
77508+extern char pax_enter_kernel_user[];
77509+extern char pax_exit_kernel_user[];
77510+#endif
77511+
77512+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
77513+static int __init setup_pax_nouderef(char *str)
77514+{
77515+#ifdef CONFIG_X86_32
77516+ unsigned int cpu;
77517+ struct desc_struct *gdt;
77518+
77519+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
77520+ gdt = get_cpu_gdt_table(cpu);
77521+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
77522+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
77523+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
77524+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
77525+ }
77526+ loadsegment(ds, __KERNEL_DS);
77527+ loadsegment(es, __KERNEL_DS);
77528+ loadsegment(ss, __KERNEL_DS);
77529+#else
77530+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
77531+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
77532+ clone_pgd_mask = ~(pgdval_t)0UL;
77533+ pax_user_shadow_base = 0UL;
77534+ setup_clear_cpu_cap(X86_FEATURE_PCID);
77535+#endif
77536+
77537+ return 0;
77538+}
77539+early_param("pax_nouderef", setup_pax_nouderef);
77540+
77541+#ifdef CONFIG_X86_64
77542+static int __init setup_pax_weakuderef(char *str)
77543+{
77544+ if (clone_pgd_mask != ~(pgdval_t)0UL)
77545+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
77546+ return 1;
77547+}
77548+__setup("pax_weakuderef", setup_pax_weakuderef);
77549+#endif
77550+#endif
77551+
77552+#ifdef CONFIG_PAX_SOFTMODE
77553+int pax_softmode;
77554+
77555+static int __init setup_pax_softmode(char *str)
77556+{
77557+ get_option(&str, &pax_softmode);
77558+ return 1;
77559+}
77560+__setup("pax_softmode=", setup_pax_softmode);
77561+#endif
77562+
77563 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
77564 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
77565 static const char *panic_later, *panic_param;
77566@@ -655,8 +725,6 @@ static void __init do_ctors(void)
77567 bool initcall_debug;
77568 core_param(initcall_debug, initcall_debug, bool, 0644);
77569
77570-static char msgbuf[64];
77571-
77572 static int __init_or_module do_one_initcall_debug(initcall_t fn)
77573 {
77574 ktime_t calltime, delta, rettime;
77575@@ -679,23 +747,22 @@ int __init_or_module do_one_initcall(initcall_t fn)
77576 {
77577 int count = preempt_count();
77578 int ret;
77579+ const char *msg1 = "", *msg2 = "";
77580
77581 if (initcall_debug)
77582 ret = do_one_initcall_debug(fn);
77583 else
77584 ret = fn();
77585
77586- msgbuf[0] = 0;
77587-
77588 if (preempt_count() != count) {
77589- sprintf(msgbuf, "preemption imbalance ");
77590+ msg1 = " preemption imbalance";
77591 preempt_count() = count;
77592 }
77593 if (irqs_disabled()) {
77594- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
77595+ msg2 = " disabled interrupts";
77596 local_irq_enable();
77597 }
77598- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
77599+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
77600
77601 return ret;
77602 }
77603@@ -748,8 +815,14 @@ static void __init do_initcall_level(int level)
77604 level, level,
77605 &repair_env_string);
77606
77607- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
77608+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
77609 do_one_initcall(*fn);
77610+
77611+#ifdef LATENT_ENTROPY_PLUGIN
77612+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
77613+#endif
77614+
77615+ }
77616 }
77617
77618 static void __init do_initcalls(void)
77619@@ -783,8 +856,14 @@ static void __init do_pre_smp_initcalls(void)
77620 {
77621 initcall_t *fn;
77622
77623- for (fn = __initcall_start; fn < __initcall0_start; fn++)
77624+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
77625 do_one_initcall(*fn);
77626+
77627+#ifdef LATENT_ENTROPY_PLUGIN
77628+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
77629+#endif
77630+
77631+ }
77632 }
77633
77634 /*
77635@@ -802,8 +881,8 @@ static int run_init_process(const char *init_filename)
77636 {
77637 argv_init[0] = init_filename;
77638 return do_execve(init_filename,
77639- (const char __user *const __user *)argv_init,
77640- (const char __user *const __user *)envp_init);
77641+ (const char __user *const __force_user *)argv_init,
77642+ (const char __user *const __force_user *)envp_init);
77643 }
77644
77645 static noinline void __init kernel_init_freeable(void);
77646@@ -880,7 +959,7 @@ static noinline void __init kernel_init_freeable(void)
77647 do_basic_setup();
77648
77649 /* Open the /dev/console on the rootfs, this should never fail */
77650- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
77651+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
77652 pr_err("Warning: unable to open an initial console.\n");
77653
77654 (void) sys_dup(0);
77655@@ -893,11 +972,13 @@ static noinline void __init kernel_init_freeable(void)
77656 if (!ramdisk_execute_command)
77657 ramdisk_execute_command = "/init";
77658
77659- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
77660+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
77661 ramdisk_execute_command = NULL;
77662 prepare_namespace();
77663 }
77664
77665+ grsecurity_init();
77666+
77667 /*
77668 * Ok, we have completed the initial bootup, and
77669 * we're essentially up and running. Get rid of the
77670diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
77671index 130dfec..cc88451 100644
77672--- a/ipc/ipc_sysctl.c
77673+++ b/ipc/ipc_sysctl.c
77674@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
77675 static int proc_ipc_dointvec(ctl_table *table, int write,
77676 void __user *buffer, size_t *lenp, loff_t *ppos)
77677 {
77678- struct ctl_table ipc_table;
77679+ ctl_table_no_const ipc_table;
77680
77681 memcpy(&ipc_table, table, sizeof(ipc_table));
77682 ipc_table.data = get_ipc(table);
77683@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
77684 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
77685 void __user *buffer, size_t *lenp, loff_t *ppos)
77686 {
77687- struct ctl_table ipc_table;
77688+ ctl_table_no_const ipc_table;
77689
77690 memcpy(&ipc_table, table, sizeof(ipc_table));
77691 ipc_table.data = get_ipc(table);
77692@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
77693 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
77694 void __user *buffer, size_t *lenp, loff_t *ppos)
77695 {
77696- struct ctl_table ipc_table;
77697+ ctl_table_no_const ipc_table;
77698 size_t lenp_bef = *lenp;
77699 int rc;
77700
77701@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
77702 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
77703 void __user *buffer, size_t *lenp, loff_t *ppos)
77704 {
77705- struct ctl_table ipc_table;
77706+ ctl_table_no_const ipc_table;
77707 memcpy(&ipc_table, table, sizeof(ipc_table));
77708 ipc_table.data = get_ipc(table);
77709
77710@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
77711 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
77712 void __user *buffer, size_t *lenp, loff_t *ppos)
77713 {
77714- struct ctl_table ipc_table;
77715+ ctl_table_no_const ipc_table;
77716 size_t lenp_bef = *lenp;
77717 int oldval;
77718 int rc;
77719diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
77720index 383d638..943fdbb 100644
77721--- a/ipc/mq_sysctl.c
77722+++ b/ipc/mq_sysctl.c
77723@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
77724 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
77725 void __user *buffer, size_t *lenp, loff_t *ppos)
77726 {
77727- struct ctl_table mq_table;
77728+ ctl_table_no_const mq_table;
77729 memcpy(&mq_table, table, sizeof(mq_table));
77730 mq_table.data = get_mq(table);
77731
77732diff --git a/ipc/mqueue.c b/ipc/mqueue.c
77733index e4e47f6..a85e0ad 100644
77734--- a/ipc/mqueue.c
77735+++ b/ipc/mqueue.c
77736@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
77737 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
77738 info->attr.mq_msgsize);
77739
77740+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
77741 spin_lock(&mq_lock);
77742 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
77743 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
77744diff --git a/ipc/msg.c b/ipc/msg.c
77745index d0c6d96..69a893c 100644
77746--- a/ipc/msg.c
77747+++ b/ipc/msg.c
77748@@ -296,18 +296,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
77749 return security_msg_queue_associate(msq, msgflg);
77750 }
77751
77752+static struct ipc_ops msg_ops = {
77753+ .getnew = newque,
77754+ .associate = msg_security,
77755+ .more_checks = NULL
77756+};
77757+
77758 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
77759 {
77760 struct ipc_namespace *ns;
77761- struct ipc_ops msg_ops;
77762 struct ipc_params msg_params;
77763
77764 ns = current->nsproxy->ipc_ns;
77765
77766- msg_ops.getnew = newque;
77767- msg_ops.associate = msg_security;
77768- msg_ops.more_checks = NULL;
77769-
77770 msg_params.key = key;
77771 msg_params.flg = msgflg;
77772
77773diff --git a/ipc/sem.c b/ipc/sem.c
77774index 70480a3..f4e8262 100644
77775--- a/ipc/sem.c
77776+++ b/ipc/sem.c
77777@@ -460,10 +460,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
77778 return 0;
77779 }
77780
77781+static struct ipc_ops sem_ops = {
77782+ .getnew = newary,
77783+ .associate = sem_security,
77784+ .more_checks = sem_more_checks
77785+};
77786+
77787 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
77788 {
77789 struct ipc_namespace *ns;
77790- struct ipc_ops sem_ops;
77791 struct ipc_params sem_params;
77792
77793 ns = current->nsproxy->ipc_ns;
77794@@ -471,10 +476,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
77795 if (nsems < 0 || nsems > ns->sc_semmsl)
77796 return -EINVAL;
77797
77798- sem_ops.getnew = newary;
77799- sem_ops.associate = sem_security;
77800- sem_ops.more_checks = sem_more_checks;
77801-
77802 sem_params.key = key;
77803 sem_params.flg = semflg;
77804 sem_params.u.nsems = nsems;
77805diff --git a/ipc/shm.c b/ipc/shm.c
77806index 7e199fa..180a1ca 100644
77807--- a/ipc/shm.c
77808+++ b/ipc/shm.c
77809@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
77810 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
77811 #endif
77812
77813+#ifdef CONFIG_GRKERNSEC
77814+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77815+ const time_t shm_createtime, const kuid_t cuid,
77816+ const int shmid);
77817+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77818+ const time_t shm_createtime);
77819+#endif
77820+
77821 void shm_init_ns(struct ipc_namespace *ns)
77822 {
77823 ns->shm_ctlmax = SHMMAX;
77824@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
77825 shp->shm_lprid = 0;
77826 shp->shm_atim = shp->shm_dtim = 0;
77827 shp->shm_ctim = get_seconds();
77828+#ifdef CONFIG_GRKERNSEC
77829+ {
77830+ struct timespec timeval;
77831+ do_posix_clock_monotonic_gettime(&timeval);
77832+
77833+ shp->shm_createtime = timeval.tv_sec;
77834+ }
77835+#endif
77836 shp->shm_segsz = size;
77837 shp->shm_nattch = 0;
77838 shp->shm_file = file;
77839@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
77840 return 0;
77841 }
77842
77843+static struct ipc_ops shm_ops = {
77844+ .getnew = newseg,
77845+ .associate = shm_security,
77846+ .more_checks = shm_more_checks
77847+};
77848+
77849 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
77850 {
77851 struct ipc_namespace *ns;
77852- struct ipc_ops shm_ops;
77853 struct ipc_params shm_params;
77854
77855 ns = current->nsproxy->ipc_ns;
77856
77857- shm_ops.getnew = newseg;
77858- shm_ops.associate = shm_security;
77859- shm_ops.more_checks = shm_more_checks;
77860-
77861 shm_params.key = key;
77862 shm_params.flg = shmflg;
77863 shm_params.u.size = size;
77864@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
77865 f_mode = FMODE_READ | FMODE_WRITE;
77866 }
77867 if (shmflg & SHM_EXEC) {
77868+
77869+#ifdef CONFIG_PAX_MPROTECT
77870+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
77871+ goto out;
77872+#endif
77873+
77874 prot |= PROT_EXEC;
77875 acc_mode |= S_IXUGO;
77876 }
77877@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
77878 if (err)
77879 goto out_unlock;
77880
77881+#ifdef CONFIG_GRKERNSEC
77882+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
77883+ shp->shm_perm.cuid, shmid) ||
77884+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
77885+ err = -EACCES;
77886+ goto out_unlock;
77887+ }
77888+#endif
77889+
77890 path = shp->shm_file->f_path;
77891 path_get(&path);
77892 shp->shm_nattch++;
77893+#ifdef CONFIG_GRKERNSEC
77894+ shp->shm_lapid = current->pid;
77895+#endif
77896 size = i_size_read(path.dentry->d_inode);
77897 shm_unlock(shp);
77898
77899diff --git a/kernel/acct.c b/kernel/acct.c
77900index 8d6e145..33e0b1e 100644
77901--- a/kernel/acct.c
77902+++ b/kernel/acct.c
77903@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
77904 */
77905 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
77906 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
77907- file->f_op->write(file, (char *)&ac,
77908+ file->f_op->write(file, (char __force_user *)&ac,
77909 sizeof(acct_t), &file->f_pos);
77910 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
77911 set_fs(fs);
77912diff --git a/kernel/audit.c b/kernel/audit.c
77913index 91e53d0..d9e3ec4 100644
77914--- a/kernel/audit.c
77915+++ b/kernel/audit.c
77916@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
77917 3) suppressed due to audit_rate_limit
77918 4) suppressed due to audit_backlog_limit
77919 */
77920-static atomic_t audit_lost = ATOMIC_INIT(0);
77921+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
77922
77923 /* The netlink socket. */
77924 static struct sock *audit_sock;
77925@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
77926 unsigned long now;
77927 int print;
77928
77929- atomic_inc(&audit_lost);
77930+ atomic_inc_unchecked(&audit_lost);
77931
77932 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
77933
77934@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
77935 printk(KERN_WARNING
77936 "audit: audit_lost=%d audit_rate_limit=%d "
77937 "audit_backlog_limit=%d\n",
77938- atomic_read(&audit_lost),
77939+ atomic_read_unchecked(&audit_lost),
77940 audit_rate_limit,
77941 audit_backlog_limit);
77942 audit_panic(message);
77943@@ -664,7 +664,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
77944 status_set.pid = audit_pid;
77945 status_set.rate_limit = audit_rate_limit;
77946 status_set.backlog_limit = audit_backlog_limit;
77947- status_set.lost = atomic_read(&audit_lost);
77948+ status_set.lost = atomic_read_unchecked(&audit_lost);
77949 status_set.backlog = skb_queue_len(&audit_skb_queue);
77950 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
77951 &status_set, sizeof(status_set));
77952diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
77953index 6bd4a90..0ee9eff 100644
77954--- a/kernel/auditfilter.c
77955+++ b/kernel/auditfilter.c
77956@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
77957 f->lsm_rule = NULL;
77958
77959 /* Support legacy tests for a valid loginuid */
77960- if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
77961+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
77962 f->type = AUDIT_LOGINUID_SET;
77963 f->val = 0;
77964 }
77965diff --git a/kernel/auditsc.c b/kernel/auditsc.c
77966index 3c8a601..3a416f6 100644
77967--- a/kernel/auditsc.c
77968+++ b/kernel/auditsc.c
77969@@ -1956,7 +1956,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
77970 }
77971
77972 /* global counter which is incremented every time something logs in */
77973-static atomic_t session_id = ATOMIC_INIT(0);
77974+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
77975
77976 /**
77977 * audit_set_loginuid - set current task's audit_context loginuid
77978@@ -1980,7 +1980,7 @@ int audit_set_loginuid(kuid_t loginuid)
77979 return -EPERM;
77980 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
77981
77982- sessionid = atomic_inc_return(&session_id);
77983+ sessionid = atomic_inc_return_unchecked(&session_id);
77984 if (context && context->in_syscall) {
77985 struct audit_buffer *ab;
77986
77987diff --git a/kernel/capability.c b/kernel/capability.c
77988index f6c2ce5..982c0f9 100644
77989--- a/kernel/capability.c
77990+++ b/kernel/capability.c
77991@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
77992 * before modification is attempted and the application
77993 * fails.
77994 */
77995+ if (tocopy > ARRAY_SIZE(kdata))
77996+ return -EFAULT;
77997+
77998 if (copy_to_user(dataptr, kdata, tocopy
77999 * sizeof(struct __user_cap_data_struct))) {
78000 return -EFAULT;
78001@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
78002 int ret;
78003
78004 rcu_read_lock();
78005- ret = security_capable(__task_cred(t), ns, cap);
78006+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
78007+ gr_task_is_capable(t, __task_cred(t), cap);
78008 rcu_read_unlock();
78009
78010- return (ret == 0);
78011+ return ret;
78012 }
78013
78014 /**
78015@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
78016 int ret;
78017
78018 rcu_read_lock();
78019- ret = security_capable_noaudit(__task_cred(t), ns, cap);
78020+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
78021 rcu_read_unlock();
78022
78023- return (ret == 0);
78024+ return ret;
78025 }
78026
78027 /**
78028@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
78029 BUG();
78030 }
78031
78032- if (security_capable(current_cred(), ns, cap) == 0) {
78033+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
78034 current->flags |= PF_SUPERPRIV;
78035 return true;
78036 }
78037@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
78038 }
78039 EXPORT_SYMBOL(ns_capable);
78040
78041+bool ns_capable_nolog(struct user_namespace *ns, int cap)
78042+{
78043+ if (unlikely(!cap_valid(cap))) {
78044+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
78045+ BUG();
78046+ }
78047+
78048+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
78049+ current->flags |= PF_SUPERPRIV;
78050+ return true;
78051+ }
78052+ return false;
78053+}
78054+EXPORT_SYMBOL(ns_capable_nolog);
78055+
78056 /**
78057 * file_ns_capable - Determine if the file's opener had a capability in effect
78058 * @file: The file we want to check
78059@@ -432,6 +451,12 @@ bool capable(int cap)
78060 }
78061 EXPORT_SYMBOL(capable);
78062
78063+bool capable_nolog(int cap)
78064+{
78065+ return ns_capable_nolog(&init_user_ns, cap);
78066+}
78067+EXPORT_SYMBOL(capable_nolog);
78068+
78069 /**
78070 * nsown_capable - Check superior capability to one's own user_ns
78071 * @cap: The capability in question
78072@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
78073
78074 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
78075 }
78076+
78077+bool inode_capable_nolog(const struct inode *inode, int cap)
78078+{
78079+ struct user_namespace *ns = current_user_ns();
78080+
78081+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
78082+}
78083diff --git a/kernel/cgroup.c b/kernel/cgroup.c
78084index 2e9b387..61817b1 100644
78085--- a/kernel/cgroup.c
78086+++ b/kernel/cgroup.c
78087@@ -5398,7 +5398,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
78088 struct css_set *cg = link->cg;
78089 struct task_struct *task;
78090 int count = 0;
78091- seq_printf(seq, "css_set %p\n", cg);
78092+ seq_printf(seq, "css_set %pK\n", cg);
78093 list_for_each_entry(task, &cg->tasks, cg_list) {
78094 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
78095 seq_puts(seq, " ...\n");
78096diff --git a/kernel/compat.c b/kernel/compat.c
78097index 0a09e48..f44f3f0 100644
78098--- a/kernel/compat.c
78099+++ b/kernel/compat.c
78100@@ -13,6 +13,7 @@
78101
78102 #include <linux/linkage.h>
78103 #include <linux/compat.h>
78104+#include <linux/module.h>
78105 #include <linux/errno.h>
78106 #include <linux/time.h>
78107 #include <linux/signal.h>
78108@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
78109 mm_segment_t oldfs;
78110 long ret;
78111
78112- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
78113+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
78114 oldfs = get_fs();
78115 set_fs(KERNEL_DS);
78116 ret = hrtimer_nanosleep_restart(restart);
78117@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
78118 oldfs = get_fs();
78119 set_fs(KERNEL_DS);
78120 ret = hrtimer_nanosleep(&tu,
78121- rmtp ? (struct timespec __user *)&rmt : NULL,
78122+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
78123 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
78124 set_fs(oldfs);
78125
78126@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
78127 mm_segment_t old_fs = get_fs();
78128
78129 set_fs(KERNEL_DS);
78130- ret = sys_sigpending((old_sigset_t __user *) &s);
78131+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
78132 set_fs(old_fs);
78133 if (ret == 0)
78134 ret = put_user(s, set);
78135@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
78136 mm_segment_t old_fs = get_fs();
78137
78138 set_fs(KERNEL_DS);
78139- ret = sys_old_getrlimit(resource, &r);
78140+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
78141 set_fs(old_fs);
78142
78143 if (!ret) {
78144@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
78145 set_fs (KERNEL_DS);
78146 ret = sys_wait4(pid,
78147 (stat_addr ?
78148- (unsigned int __user *) &status : NULL),
78149- options, (struct rusage __user *) &r);
78150+ (unsigned int __force_user *) &status : NULL),
78151+ options, (struct rusage __force_user *) &r);
78152 set_fs (old_fs);
78153
78154 if (ret > 0) {
78155@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
78156 memset(&info, 0, sizeof(info));
78157
78158 set_fs(KERNEL_DS);
78159- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
78160- uru ? (struct rusage __user *)&ru : NULL);
78161+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
78162+ uru ? (struct rusage __force_user *)&ru : NULL);
78163 set_fs(old_fs);
78164
78165 if ((ret < 0) || (info.si_signo == 0))
78166@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
78167 oldfs = get_fs();
78168 set_fs(KERNEL_DS);
78169 err = sys_timer_settime(timer_id, flags,
78170- (struct itimerspec __user *) &newts,
78171- (struct itimerspec __user *) &oldts);
78172+ (struct itimerspec __force_user *) &newts,
78173+ (struct itimerspec __force_user *) &oldts);
78174 set_fs(oldfs);
78175 if (!err && old && put_compat_itimerspec(old, &oldts))
78176 return -EFAULT;
78177@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
78178 oldfs = get_fs();
78179 set_fs(KERNEL_DS);
78180 err = sys_timer_gettime(timer_id,
78181- (struct itimerspec __user *) &ts);
78182+ (struct itimerspec __force_user *) &ts);
78183 set_fs(oldfs);
78184 if (!err && put_compat_itimerspec(setting, &ts))
78185 return -EFAULT;
78186@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
78187 oldfs = get_fs();
78188 set_fs(KERNEL_DS);
78189 err = sys_clock_settime(which_clock,
78190- (struct timespec __user *) &ts);
78191+ (struct timespec __force_user *) &ts);
78192 set_fs(oldfs);
78193 return err;
78194 }
78195@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
78196 oldfs = get_fs();
78197 set_fs(KERNEL_DS);
78198 err = sys_clock_gettime(which_clock,
78199- (struct timespec __user *) &ts);
78200+ (struct timespec __force_user *) &ts);
78201 set_fs(oldfs);
78202 if (!err && put_compat_timespec(&ts, tp))
78203 return -EFAULT;
78204@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
78205
78206 oldfs = get_fs();
78207 set_fs(KERNEL_DS);
78208- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
78209+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
78210 set_fs(oldfs);
78211
78212 err = compat_put_timex(utp, &txc);
78213@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
78214 oldfs = get_fs();
78215 set_fs(KERNEL_DS);
78216 err = sys_clock_getres(which_clock,
78217- (struct timespec __user *) &ts);
78218+ (struct timespec __force_user *) &ts);
78219 set_fs(oldfs);
78220 if (!err && tp && put_compat_timespec(&ts, tp))
78221 return -EFAULT;
78222@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
78223 long err;
78224 mm_segment_t oldfs;
78225 struct timespec tu;
78226- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
78227+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
78228
78229- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
78230+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
78231 oldfs = get_fs();
78232 set_fs(KERNEL_DS);
78233 err = clock_nanosleep_restart(restart);
78234@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
78235 oldfs = get_fs();
78236 set_fs(KERNEL_DS);
78237 err = sys_clock_nanosleep(which_clock, flags,
78238- (struct timespec __user *) &in,
78239- (struct timespec __user *) &out);
78240+ (struct timespec __force_user *) &in,
78241+ (struct timespec __force_user *) &out);
78242 set_fs(oldfs);
78243
78244 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
78245diff --git a/kernel/configs.c b/kernel/configs.c
78246index c18b1f1..b9a0132 100644
78247--- a/kernel/configs.c
78248+++ b/kernel/configs.c
78249@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
78250 struct proc_dir_entry *entry;
78251
78252 /* create the current config file */
78253+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
78254+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
78255+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
78256+ &ikconfig_file_ops);
78257+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78258+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
78259+ &ikconfig_file_ops);
78260+#endif
78261+#else
78262 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
78263 &ikconfig_file_ops);
78264+#endif
78265+
78266 if (!entry)
78267 return -ENOMEM;
78268
78269diff --git a/kernel/cred.c b/kernel/cred.c
78270index e0573a4..3874e41 100644
78271--- a/kernel/cred.c
78272+++ b/kernel/cred.c
78273@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
78274 validate_creds(cred);
78275 alter_cred_subscribers(cred, -1);
78276 put_cred(cred);
78277+
78278+#ifdef CONFIG_GRKERNSEC_SETXID
78279+ cred = (struct cred *) tsk->delayed_cred;
78280+ if (cred != NULL) {
78281+ tsk->delayed_cred = NULL;
78282+ validate_creds(cred);
78283+ alter_cred_subscribers(cred, -1);
78284+ put_cred(cred);
78285+ }
78286+#endif
78287 }
78288
78289 /**
78290@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
78291 * Always returns 0 thus allowing this function to be tail-called at the end
78292 * of, say, sys_setgid().
78293 */
78294-int commit_creds(struct cred *new)
78295+static int __commit_creds(struct cred *new)
78296 {
78297 struct task_struct *task = current;
78298 const struct cred *old = task->real_cred;
78299@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
78300
78301 get_cred(new); /* we will require a ref for the subj creds too */
78302
78303+ gr_set_role_label(task, new->uid, new->gid);
78304+
78305 /* dumpability changes */
78306 if (!uid_eq(old->euid, new->euid) ||
78307 !gid_eq(old->egid, new->egid) ||
78308@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
78309 put_cred(old);
78310 return 0;
78311 }
78312+#ifdef CONFIG_GRKERNSEC_SETXID
78313+extern int set_user(struct cred *new);
78314+
78315+void gr_delayed_cred_worker(void)
78316+{
78317+ const struct cred *new = current->delayed_cred;
78318+ struct cred *ncred;
78319+
78320+ current->delayed_cred = NULL;
78321+
78322+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
78323+ // from doing get_cred on it when queueing this
78324+ put_cred(new);
78325+ return;
78326+ } else if (new == NULL)
78327+ return;
78328+
78329+ ncred = prepare_creds();
78330+ if (!ncred)
78331+ goto die;
78332+ // uids
78333+ ncred->uid = new->uid;
78334+ ncred->euid = new->euid;
78335+ ncred->suid = new->suid;
78336+ ncred->fsuid = new->fsuid;
78337+ // gids
78338+ ncred->gid = new->gid;
78339+ ncred->egid = new->egid;
78340+ ncred->sgid = new->sgid;
78341+ ncred->fsgid = new->fsgid;
78342+ // groups
78343+ if (set_groups(ncred, new->group_info) < 0) {
78344+ abort_creds(ncred);
78345+ goto die;
78346+ }
78347+ // caps
78348+ ncred->securebits = new->securebits;
78349+ ncred->cap_inheritable = new->cap_inheritable;
78350+ ncred->cap_permitted = new->cap_permitted;
78351+ ncred->cap_effective = new->cap_effective;
78352+ ncred->cap_bset = new->cap_bset;
78353+
78354+ if (set_user(ncred)) {
78355+ abort_creds(ncred);
78356+ goto die;
78357+ }
78358+
78359+ // from doing get_cred on it when queueing this
78360+ put_cred(new);
78361+
78362+ __commit_creds(ncred);
78363+ return;
78364+die:
78365+ // from doing get_cred on it when queueing this
78366+ put_cred(new);
78367+ do_group_exit(SIGKILL);
78368+}
78369+#endif
78370+
78371+int commit_creds(struct cred *new)
78372+{
78373+#ifdef CONFIG_GRKERNSEC_SETXID
78374+ int ret;
78375+ int schedule_it = 0;
78376+ struct task_struct *t;
78377+
78378+ /* we won't get called with tasklist_lock held for writing
78379+ and interrupts disabled as the cred struct in that case is
78380+ init_cred
78381+ */
78382+ if (grsec_enable_setxid && !current_is_single_threaded() &&
78383+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
78384+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
78385+ schedule_it = 1;
78386+ }
78387+ ret = __commit_creds(new);
78388+ if (schedule_it) {
78389+ rcu_read_lock();
78390+ read_lock(&tasklist_lock);
78391+ for (t = next_thread(current); t != current;
78392+ t = next_thread(t)) {
78393+ if (t->delayed_cred == NULL) {
78394+ t->delayed_cred = get_cred(new);
78395+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
78396+ set_tsk_need_resched(t);
78397+ }
78398+ }
78399+ read_unlock(&tasklist_lock);
78400+ rcu_read_unlock();
78401+ }
78402+ return ret;
78403+#else
78404+ return __commit_creds(new);
78405+#endif
78406+}
78407+
78408 EXPORT_SYMBOL(commit_creds);
78409
78410 /**
78411diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
78412index 0506d44..2c20034 100644
78413--- a/kernel/debug/debug_core.c
78414+++ b/kernel/debug/debug_core.c
78415@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
78416 */
78417 static atomic_t masters_in_kgdb;
78418 static atomic_t slaves_in_kgdb;
78419-static atomic_t kgdb_break_tasklet_var;
78420+static atomic_unchecked_t kgdb_break_tasklet_var;
78421 atomic_t kgdb_setting_breakpoint;
78422
78423 struct task_struct *kgdb_usethread;
78424@@ -133,7 +133,7 @@ int kgdb_single_step;
78425 static pid_t kgdb_sstep_pid;
78426
78427 /* to keep track of the CPU which is doing the single stepping*/
78428-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
78429+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
78430
78431 /*
78432 * If you are debugging a problem where roundup (the collection of
78433@@ -541,7 +541,7 @@ return_normal:
78434 * kernel will only try for the value of sstep_tries before
78435 * giving up and continuing on.
78436 */
78437- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
78438+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
78439 (kgdb_info[cpu].task &&
78440 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
78441 atomic_set(&kgdb_active, -1);
78442@@ -635,8 +635,8 @@ cpu_master_loop:
78443 }
78444
78445 kgdb_restore:
78446- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
78447- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
78448+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
78449+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
78450 if (kgdb_info[sstep_cpu].task)
78451 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
78452 else
78453@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
78454 static void kgdb_tasklet_bpt(unsigned long ing)
78455 {
78456 kgdb_breakpoint();
78457- atomic_set(&kgdb_break_tasklet_var, 0);
78458+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
78459 }
78460
78461 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
78462
78463 void kgdb_schedule_breakpoint(void)
78464 {
78465- if (atomic_read(&kgdb_break_tasklet_var) ||
78466+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
78467 atomic_read(&kgdb_active) != -1 ||
78468 atomic_read(&kgdb_setting_breakpoint))
78469 return;
78470- atomic_inc(&kgdb_break_tasklet_var);
78471+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
78472 tasklet_schedule(&kgdb_tasklet_breakpoint);
78473 }
78474 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
78475diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
78476index 00eb8f7..d7e3244 100644
78477--- a/kernel/debug/kdb/kdb_main.c
78478+++ b/kernel/debug/kdb/kdb_main.c
78479@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
78480 continue;
78481
78482 kdb_printf("%-20s%8u 0x%p ", mod->name,
78483- mod->core_size, (void *)mod);
78484+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
78485 #ifdef CONFIG_MODULE_UNLOAD
78486 kdb_printf("%4ld ", module_refcount(mod));
78487 #endif
78488@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
78489 kdb_printf(" (Loading)");
78490 else
78491 kdb_printf(" (Live)");
78492- kdb_printf(" 0x%p", mod->module_core);
78493+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
78494
78495 #ifdef CONFIG_MODULE_UNLOAD
78496 {
78497diff --git a/kernel/events/core.c b/kernel/events/core.c
78498index e76e495..cbfe63a 100644
78499--- a/kernel/events/core.c
78500+++ b/kernel/events/core.c
78501@@ -156,8 +156,15 @@ static struct srcu_struct pmus_srcu;
78502 * 0 - disallow raw tracepoint access for unpriv
78503 * 1 - disallow cpu events for unpriv
78504 * 2 - disallow kernel profiling for unpriv
78505+ * 3 - disallow all unpriv perf event use
78506 */
78507-int sysctl_perf_event_paranoid __read_mostly = 1;
78508+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
78509+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
78510+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
78511+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
78512+#else
78513+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
78514+#endif
78515
78516 /* Minimum for 512 kiB + 1 user control page */
78517 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
78518@@ -184,7 +191,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
78519 return 0;
78520 }
78521
78522-static atomic64_t perf_event_id;
78523+static atomic64_unchecked_t perf_event_id;
78524
78525 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
78526 enum event_type_t event_type);
78527@@ -2747,7 +2754,7 @@ static void __perf_event_read(void *info)
78528
78529 static inline u64 perf_event_count(struct perf_event *event)
78530 {
78531- return local64_read(&event->count) + atomic64_read(&event->child_count);
78532+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
78533 }
78534
78535 static u64 perf_event_read(struct perf_event *event)
78536@@ -3093,9 +3100,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
78537 mutex_lock(&event->child_mutex);
78538 total += perf_event_read(event);
78539 *enabled += event->total_time_enabled +
78540- atomic64_read(&event->child_total_time_enabled);
78541+ atomic64_read_unchecked(&event->child_total_time_enabled);
78542 *running += event->total_time_running +
78543- atomic64_read(&event->child_total_time_running);
78544+ atomic64_read_unchecked(&event->child_total_time_running);
78545
78546 list_for_each_entry(child, &event->child_list, child_list) {
78547 total += perf_event_read(child);
78548@@ -3481,10 +3488,10 @@ void perf_event_update_userpage(struct perf_event *event)
78549 userpg->offset -= local64_read(&event->hw.prev_count);
78550
78551 userpg->time_enabled = enabled +
78552- atomic64_read(&event->child_total_time_enabled);
78553+ atomic64_read_unchecked(&event->child_total_time_enabled);
78554
78555 userpg->time_running = running +
78556- atomic64_read(&event->child_total_time_running);
78557+ atomic64_read_unchecked(&event->child_total_time_running);
78558
78559 arch_perf_update_userpage(userpg, now);
78560
78561@@ -4034,7 +4041,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
78562
78563 /* Data. */
78564 sp = perf_user_stack_pointer(regs);
78565- rem = __output_copy_user(handle, (void *) sp, dump_size);
78566+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
78567 dyn_size = dump_size - rem;
78568
78569 perf_output_skip(handle, rem);
78570@@ -4122,11 +4129,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
78571 values[n++] = perf_event_count(event);
78572 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
78573 values[n++] = enabled +
78574- atomic64_read(&event->child_total_time_enabled);
78575+ atomic64_read_unchecked(&event->child_total_time_enabled);
78576 }
78577 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
78578 values[n++] = running +
78579- atomic64_read(&event->child_total_time_running);
78580+ atomic64_read_unchecked(&event->child_total_time_running);
78581 }
78582 if (read_format & PERF_FORMAT_ID)
78583 values[n++] = primary_event_id(event);
78584@@ -4835,12 +4842,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
78585 * need to add enough zero bytes after the string to handle
78586 * the 64bit alignment we do later.
78587 */
78588- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
78589+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
78590 if (!buf) {
78591 name = strncpy(tmp, "//enomem", sizeof(tmp));
78592 goto got_name;
78593 }
78594- name = d_path(&file->f_path, buf, PATH_MAX);
78595+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
78596 if (IS_ERR(name)) {
78597 name = strncpy(tmp, "//toolong", sizeof(tmp));
78598 goto got_name;
78599@@ -6262,7 +6269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
78600 event->parent = parent_event;
78601
78602 event->ns = get_pid_ns(task_active_pid_ns(current));
78603- event->id = atomic64_inc_return(&perf_event_id);
78604+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
78605
78606 event->state = PERF_EVENT_STATE_INACTIVE;
78607
78608@@ -6572,6 +6579,11 @@ SYSCALL_DEFINE5(perf_event_open,
78609 if (flags & ~PERF_FLAG_ALL)
78610 return -EINVAL;
78611
78612+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
78613+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
78614+ return -EACCES;
78615+#endif
78616+
78617 err = perf_copy_attr(attr_uptr, &attr);
78618 if (err)
78619 return err;
78620@@ -6904,10 +6916,10 @@ static void sync_child_event(struct perf_event *child_event,
78621 /*
78622 * Add back the child's count to the parent's count:
78623 */
78624- atomic64_add(child_val, &parent_event->child_count);
78625- atomic64_add(child_event->total_time_enabled,
78626+ atomic64_add_unchecked(child_val, &parent_event->child_count);
78627+ atomic64_add_unchecked(child_event->total_time_enabled,
78628 &parent_event->child_total_time_enabled);
78629- atomic64_add(child_event->total_time_running,
78630+ atomic64_add_unchecked(child_event->total_time_running,
78631 &parent_event->child_total_time_running);
78632
78633 /*
78634diff --git a/kernel/events/internal.h b/kernel/events/internal.h
78635index ca65997..cc8cee4 100644
78636--- a/kernel/events/internal.h
78637+++ b/kernel/events/internal.h
78638@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
78639 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
78640 }
78641
78642-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
78643+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
78644 static inline unsigned int \
78645 func_name(struct perf_output_handle *handle, \
78646- const void *buf, unsigned int len) \
78647+ const void user *buf, unsigned int len) \
78648 { \
78649 unsigned long size, written; \
78650 \
78651@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
78652 return n;
78653 }
78654
78655-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
78656+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
78657
78658 #define MEMCPY_SKIP(dst, src, n) (n)
78659
78660-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
78661+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
78662
78663 #ifndef arch_perf_out_copy_user
78664 #define arch_perf_out_copy_user __copy_from_user_inatomic
78665 #endif
78666
78667-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
78668+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
78669
78670 /* Callchain handling */
78671 extern struct perf_callchain_entry *
78672diff --git a/kernel/exit.c b/kernel/exit.c
78673index 7bb73f9..d7978ed 100644
78674--- a/kernel/exit.c
78675+++ b/kernel/exit.c
78676@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
78677 struct task_struct *leader;
78678 int zap_leader;
78679 repeat:
78680+#ifdef CONFIG_NET
78681+ gr_del_task_from_ip_table(p);
78682+#endif
78683+
78684 /* don't need to get the RCU readlock here - the process is dead and
78685 * can't be modifying its own credentials. But shut RCU-lockdep up */
78686 rcu_read_lock();
78687@@ -340,7 +344,7 @@ int allow_signal(int sig)
78688 * know it'll be handled, so that they don't get converted to
78689 * SIGKILL or just silently dropped.
78690 */
78691- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
78692+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
78693 recalc_sigpending();
78694 spin_unlock_irq(&current->sighand->siglock);
78695 return 0;
78696@@ -709,6 +713,8 @@ void do_exit(long code)
78697 struct task_struct *tsk = current;
78698 int group_dead;
78699
78700+ set_fs(USER_DS);
78701+
78702 profile_task_exit(tsk);
78703
78704 WARN_ON(blk_needs_flush_plug(tsk));
78705@@ -725,7 +731,6 @@ void do_exit(long code)
78706 * mm_release()->clear_child_tid() from writing to a user-controlled
78707 * kernel address.
78708 */
78709- set_fs(USER_DS);
78710
78711 ptrace_event(PTRACE_EVENT_EXIT, code);
78712
78713@@ -784,6 +789,9 @@ void do_exit(long code)
78714 tsk->exit_code = code;
78715 taskstats_exit(tsk, group_dead);
78716
78717+ gr_acl_handle_psacct(tsk, code);
78718+ gr_acl_handle_exit();
78719+
78720 exit_mm(tsk);
78721
78722 if (group_dead)
78723@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
78724 * Take down every thread in the group. This is called by fatal signals
78725 * as well as by sys_exit_group (below).
78726 */
78727-void
78728+__noreturn void
78729 do_group_exit(int exit_code)
78730 {
78731 struct signal_struct *sig = current->signal;
78732diff --git a/kernel/fork.c b/kernel/fork.c
78733index 987b28a..11ee8a5 100644
78734--- a/kernel/fork.c
78735+++ b/kernel/fork.c
78736@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
78737 *stackend = STACK_END_MAGIC; /* for overflow detection */
78738
78739 #ifdef CONFIG_CC_STACKPROTECTOR
78740- tsk->stack_canary = get_random_int();
78741+ tsk->stack_canary = pax_get_random_long();
78742 #endif
78743
78744 /*
78745@@ -345,13 +345,81 @@ free_tsk:
78746 }
78747
78748 #ifdef CONFIG_MMU
78749+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
78750+{
78751+ struct vm_area_struct *tmp;
78752+ unsigned long charge;
78753+ struct mempolicy *pol;
78754+ struct file *file;
78755+
78756+ charge = 0;
78757+ if (mpnt->vm_flags & VM_ACCOUNT) {
78758+ unsigned long len = vma_pages(mpnt);
78759+
78760+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
78761+ goto fail_nomem;
78762+ charge = len;
78763+ }
78764+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78765+ if (!tmp)
78766+ goto fail_nomem;
78767+ *tmp = *mpnt;
78768+ tmp->vm_mm = mm;
78769+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
78770+ pol = mpol_dup(vma_policy(mpnt));
78771+ if (IS_ERR(pol))
78772+ goto fail_nomem_policy;
78773+ vma_set_policy(tmp, pol);
78774+ if (anon_vma_fork(tmp, mpnt))
78775+ goto fail_nomem_anon_vma_fork;
78776+ tmp->vm_flags &= ~VM_LOCKED;
78777+ tmp->vm_next = tmp->vm_prev = NULL;
78778+ tmp->vm_mirror = NULL;
78779+ file = tmp->vm_file;
78780+ if (file) {
78781+ struct inode *inode = file_inode(file);
78782+ struct address_space *mapping = file->f_mapping;
78783+
78784+ get_file(file);
78785+ if (tmp->vm_flags & VM_DENYWRITE)
78786+ atomic_dec(&inode->i_writecount);
78787+ mutex_lock(&mapping->i_mmap_mutex);
78788+ if (tmp->vm_flags & VM_SHARED)
78789+ mapping->i_mmap_writable++;
78790+ flush_dcache_mmap_lock(mapping);
78791+ /* insert tmp into the share list, just after mpnt */
78792+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
78793+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
78794+ else
78795+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
78796+ flush_dcache_mmap_unlock(mapping);
78797+ mutex_unlock(&mapping->i_mmap_mutex);
78798+ }
78799+
78800+ /*
78801+ * Clear hugetlb-related page reserves for children. This only
78802+ * affects MAP_PRIVATE mappings. Faults generated by the child
78803+ * are not guaranteed to succeed, even if read-only
78804+ */
78805+ if (is_vm_hugetlb_page(tmp))
78806+ reset_vma_resv_huge_pages(tmp);
78807+
78808+ return tmp;
78809+
78810+fail_nomem_anon_vma_fork:
78811+ mpol_put(pol);
78812+fail_nomem_policy:
78813+ kmem_cache_free(vm_area_cachep, tmp);
78814+fail_nomem:
78815+ vm_unacct_memory(charge);
78816+ return NULL;
78817+}
78818+
78819 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
78820 {
78821 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
78822 struct rb_node **rb_link, *rb_parent;
78823 int retval;
78824- unsigned long charge;
78825- struct mempolicy *pol;
78826
78827 uprobe_start_dup_mmap();
78828 down_write(&oldmm->mmap_sem);
78829@@ -365,8 +433,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
78830 mm->locked_vm = 0;
78831 mm->mmap = NULL;
78832 mm->mmap_cache = NULL;
78833- mm->free_area_cache = oldmm->mmap_base;
78834- mm->cached_hole_size = ~0UL;
78835+ mm->free_area_cache = oldmm->free_area_cache;
78836+ mm->cached_hole_size = oldmm->cached_hole_size;
78837 mm->map_count = 0;
78838 cpumask_clear(mm_cpumask(mm));
78839 mm->mm_rb = RB_ROOT;
78840@@ -382,57 +450,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
78841
78842 prev = NULL;
78843 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
78844- struct file *file;
78845-
78846 if (mpnt->vm_flags & VM_DONTCOPY) {
78847 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
78848 -vma_pages(mpnt));
78849 continue;
78850 }
78851- charge = 0;
78852- if (mpnt->vm_flags & VM_ACCOUNT) {
78853- unsigned long len = vma_pages(mpnt);
78854-
78855- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
78856- goto fail_nomem;
78857- charge = len;
78858- }
78859- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78860- if (!tmp)
78861- goto fail_nomem;
78862- *tmp = *mpnt;
78863- INIT_LIST_HEAD(&tmp->anon_vma_chain);
78864- pol = mpol_dup(vma_policy(mpnt));
78865- retval = PTR_ERR(pol);
78866- if (IS_ERR(pol))
78867- goto fail_nomem_policy;
78868- vma_set_policy(tmp, pol);
78869- tmp->vm_mm = mm;
78870- if (anon_vma_fork(tmp, mpnt))
78871- goto fail_nomem_anon_vma_fork;
78872- tmp->vm_flags &= ~VM_LOCKED;
78873- tmp->vm_next = tmp->vm_prev = NULL;
78874- file = tmp->vm_file;
78875- if (file) {
78876- struct inode *inode = file_inode(file);
78877- struct address_space *mapping = file->f_mapping;
78878-
78879- get_file(file);
78880- if (tmp->vm_flags & VM_DENYWRITE)
78881- atomic_dec(&inode->i_writecount);
78882- mutex_lock(&mapping->i_mmap_mutex);
78883- if (tmp->vm_flags & VM_SHARED)
78884- mapping->i_mmap_writable++;
78885- flush_dcache_mmap_lock(mapping);
78886- /* insert tmp into the share list, just after mpnt */
78887- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
78888- vma_nonlinear_insert(tmp,
78889- &mapping->i_mmap_nonlinear);
78890- else
78891- vma_interval_tree_insert_after(tmp, mpnt,
78892- &mapping->i_mmap);
78893- flush_dcache_mmap_unlock(mapping);
78894- mutex_unlock(&mapping->i_mmap_mutex);
78895+ tmp = dup_vma(mm, oldmm, mpnt);
78896+ if (!tmp) {
78897+ retval = -ENOMEM;
78898+ goto out;
78899 }
78900
78901 /*
78902@@ -464,6 +490,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
78903 if (retval)
78904 goto out;
78905 }
78906+
78907+#ifdef CONFIG_PAX_SEGMEXEC
78908+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
78909+ struct vm_area_struct *mpnt_m;
78910+
78911+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
78912+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
78913+
78914+ if (!mpnt->vm_mirror)
78915+ continue;
78916+
78917+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
78918+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
78919+ mpnt->vm_mirror = mpnt_m;
78920+ } else {
78921+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
78922+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
78923+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
78924+ mpnt->vm_mirror->vm_mirror = mpnt;
78925+ }
78926+ }
78927+ BUG_ON(mpnt_m);
78928+ }
78929+#endif
78930+
78931 /* a new mm has just been created */
78932 arch_dup_mmap(oldmm, mm);
78933 retval = 0;
78934@@ -473,14 +524,6 @@ out:
78935 up_write(&oldmm->mmap_sem);
78936 uprobe_end_dup_mmap();
78937 return retval;
78938-fail_nomem_anon_vma_fork:
78939- mpol_put(pol);
78940-fail_nomem_policy:
78941- kmem_cache_free(vm_area_cachep, tmp);
78942-fail_nomem:
78943- retval = -ENOMEM;
78944- vm_unacct_memory(charge);
78945- goto out;
78946 }
78947
78948 static inline int mm_alloc_pgd(struct mm_struct *mm)
78949@@ -695,8 +738,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
78950 return ERR_PTR(err);
78951
78952 mm = get_task_mm(task);
78953- if (mm && mm != current->mm &&
78954- !ptrace_may_access(task, mode)) {
78955+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
78956+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
78957 mmput(mm);
78958 mm = ERR_PTR(-EACCES);
78959 }
78960@@ -918,13 +961,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
78961 spin_unlock(&fs->lock);
78962 return -EAGAIN;
78963 }
78964- fs->users++;
78965+ atomic_inc(&fs->users);
78966 spin_unlock(&fs->lock);
78967 return 0;
78968 }
78969 tsk->fs = copy_fs_struct(fs);
78970 if (!tsk->fs)
78971 return -ENOMEM;
78972+ /* Carry through gr_chroot_dentry and is_chrooted instead
78973+ of recomputing it here. Already copied when the task struct
78974+ is duplicated. This allows pivot_root to not be treated as
78975+ a chroot
78976+ */
78977+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
78978+
78979 return 0;
78980 }
78981
78982@@ -1197,10 +1247,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
78983 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
78984 #endif
78985 retval = -EAGAIN;
78986+
78987+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
78988+
78989 if (atomic_read(&p->real_cred->user->processes) >=
78990 task_rlimit(p, RLIMIT_NPROC)) {
78991- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
78992- p->real_cred->user != INIT_USER)
78993+ if (p->real_cred->user != INIT_USER &&
78994+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
78995 goto bad_fork_free;
78996 }
78997 current->flags &= ~PF_NPROC_EXCEEDED;
78998@@ -1446,6 +1499,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
78999 goto bad_fork_free_pid;
79000 }
79001
79002+ /* synchronizes with gr_set_acls()
79003+ we need to call this past the point of no return for fork()
79004+ */
79005+ gr_copy_label(p);
79006+
79007 if (clone_flags & CLONE_THREAD) {
79008 current->signal->nr_threads++;
79009 atomic_inc(&current->signal->live);
79010@@ -1529,6 +1587,8 @@ bad_fork_cleanup_count:
79011 bad_fork_free:
79012 free_task(p);
79013 fork_out:
79014+ gr_log_forkfail(retval);
79015+
79016 return ERR_PTR(retval);
79017 }
79018
79019@@ -1613,6 +1673,8 @@ long do_fork(unsigned long clone_flags,
79020 if (clone_flags & CLONE_PARENT_SETTID)
79021 put_user(nr, parent_tidptr);
79022
79023+ gr_handle_brute_check();
79024+
79025 if (clone_flags & CLONE_VFORK) {
79026 p->vfork_done = &vfork;
79027 init_completion(&vfork);
79028@@ -1723,7 +1785,7 @@ void __init proc_caches_init(void)
79029 mm_cachep = kmem_cache_create("mm_struct",
79030 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
79031 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
79032- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
79033+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
79034 mmap_init();
79035 nsproxy_cache_init();
79036 }
79037@@ -1763,7 +1825,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
79038 return 0;
79039
79040 /* don't need lock here; in the worst case we'll do useless copy */
79041- if (fs->users == 1)
79042+ if (atomic_read(&fs->users) == 1)
79043 return 0;
79044
79045 *new_fsp = copy_fs_struct(fs);
79046@@ -1875,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
79047 fs = current->fs;
79048 spin_lock(&fs->lock);
79049 current->fs = new_fs;
79050- if (--fs->users)
79051+ gr_set_chroot_entries(current, &current->fs->root);
79052+ if (atomic_dec_return(&fs->users))
79053 new_fs = NULL;
79054 else
79055 new_fs = fs;
79056diff --git a/kernel/futex.c b/kernel/futex.c
79057index 49dacfb..5c6b450 100644
79058--- a/kernel/futex.c
79059+++ b/kernel/futex.c
79060@@ -54,6 +54,7 @@
79061 #include <linux/mount.h>
79062 #include <linux/pagemap.h>
79063 #include <linux/syscalls.h>
79064+#include <linux/ptrace.h>
79065 #include <linux/signal.h>
79066 #include <linux/export.h>
79067 #include <linux/magic.h>
79068@@ -242,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
79069 struct page *page, *page_head;
79070 int err, ro = 0;
79071
79072+#ifdef CONFIG_PAX_SEGMEXEC
79073+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
79074+ return -EFAULT;
79075+#endif
79076+
79077 /*
79078 * The futex address must be "naturally" aligned.
79079 */
79080@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
79081 {
79082 u32 curval;
79083 int i;
79084+ mm_segment_t oldfs;
79085
79086 /*
79087 * This will fail and we want it. Some arch implementations do
79088@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
79089 * implementation, the non-functional ones will return
79090 * -ENOSYS.
79091 */
79092+ oldfs = get_fs();
79093+ set_fs(USER_DS);
79094 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
79095 futex_cmpxchg_enabled = 1;
79096+ set_fs(oldfs);
79097
79098 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
79099 plist_head_init(&futex_queues[i].chain);
79100diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
79101index f9f44fd..29885e4 100644
79102--- a/kernel/futex_compat.c
79103+++ b/kernel/futex_compat.c
79104@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
79105 return 0;
79106 }
79107
79108-static void __user *futex_uaddr(struct robust_list __user *entry,
79109+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
79110 compat_long_t futex_offset)
79111 {
79112 compat_uptr_t base = ptr_to_compat(entry);
79113diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
79114index 9b22d03..6295b62 100644
79115--- a/kernel/gcov/base.c
79116+++ b/kernel/gcov/base.c
79117@@ -102,11 +102,6 @@ void gcov_enable_events(void)
79118 }
79119
79120 #ifdef CONFIG_MODULES
79121-static inline int within(void *addr, void *start, unsigned long size)
79122-{
79123- return ((addr >= start) && (addr < start + size));
79124-}
79125-
79126 /* Update list and generate events when modules are unloaded. */
79127 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
79128 void *data)
79129@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
79130 prev = NULL;
79131 /* Remove entries located in module from linked list. */
79132 for (info = gcov_info_head; info; info = info->next) {
79133- if (within(info, mod->module_core, mod->core_size)) {
79134+ if (within_module_core_rw((unsigned long)info, mod)) {
79135 if (prev)
79136 prev->next = info->next;
79137 else
79138diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
79139index 2288fbd..0f3941f 100644
79140--- a/kernel/hrtimer.c
79141+++ b/kernel/hrtimer.c
79142@@ -1435,7 +1435,7 @@ void hrtimer_peek_ahead_timers(void)
79143 local_irq_restore(flags);
79144 }
79145
79146-static void run_hrtimer_softirq(struct softirq_action *h)
79147+static void run_hrtimer_softirq(void)
79148 {
79149 hrtimer_peek_ahead_timers();
79150 }
79151@@ -1770,7 +1770,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
79152 return NOTIFY_OK;
79153 }
79154
79155-static struct notifier_block __cpuinitdata hrtimers_nb = {
79156+static struct notifier_block hrtimers_nb = {
79157 .notifier_call = hrtimer_cpu_notify,
79158 };
79159
79160diff --git a/kernel/irq_work.c b/kernel/irq_work.c
79161index 55fcce6..0e4cf34 100644
79162--- a/kernel/irq_work.c
79163+++ b/kernel/irq_work.c
79164@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
79165 return NOTIFY_OK;
79166 }
79167
79168-static struct notifier_block cpu_notify;
79169+static struct notifier_block cpu_notify = {
79170+ .notifier_call = irq_work_cpu_notify,
79171+ .priority = 0,
79172+};
79173
79174 static __init int irq_work_init_cpu_notifier(void)
79175 {
79176- cpu_notify.notifier_call = irq_work_cpu_notify;
79177- cpu_notify.priority = 0;
79178 register_cpu_notifier(&cpu_notify);
79179 return 0;
79180 }
79181diff --git a/kernel/jump_label.c b/kernel/jump_label.c
79182index 60f48fa..7f3a770 100644
79183--- a/kernel/jump_label.c
79184+++ b/kernel/jump_label.c
79185@@ -13,6 +13,7 @@
79186 #include <linux/sort.h>
79187 #include <linux/err.h>
79188 #include <linux/static_key.h>
79189+#include <linux/mm.h>
79190
79191 #ifdef HAVE_JUMP_LABEL
79192
79193@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
79194
79195 size = (((unsigned long)stop - (unsigned long)start)
79196 / sizeof(struct jump_entry));
79197+ pax_open_kernel();
79198 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
79199+ pax_close_kernel();
79200 }
79201
79202 static void jump_label_update(struct static_key *key, int enable);
79203@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
79204 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
79205 struct jump_entry *iter;
79206
79207+ pax_open_kernel();
79208 for (iter = iter_start; iter < iter_stop; iter++) {
79209 if (within_module_init(iter->code, mod))
79210 iter->code = 0;
79211 }
79212+ pax_close_kernel();
79213 }
79214
79215 static int
79216diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
79217index 3127ad5..159d880 100644
79218--- a/kernel/kallsyms.c
79219+++ b/kernel/kallsyms.c
79220@@ -11,6 +11,9 @@
79221 * Changed the compression method from stem compression to "table lookup"
79222 * compression (see scripts/kallsyms.c for a more complete description)
79223 */
79224+#ifdef CONFIG_GRKERNSEC_HIDESYM
79225+#define __INCLUDED_BY_HIDESYM 1
79226+#endif
79227 #include <linux/kallsyms.h>
79228 #include <linux/module.h>
79229 #include <linux/init.h>
79230@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
79231
79232 static inline int is_kernel_inittext(unsigned long addr)
79233 {
79234+ if (system_state != SYSTEM_BOOTING)
79235+ return 0;
79236+
79237 if (addr >= (unsigned long)_sinittext
79238 && addr <= (unsigned long)_einittext)
79239 return 1;
79240 return 0;
79241 }
79242
79243+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79244+#ifdef CONFIG_MODULES
79245+static inline int is_module_text(unsigned long addr)
79246+{
79247+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
79248+ return 1;
79249+
79250+ addr = ktla_ktva(addr);
79251+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
79252+}
79253+#else
79254+static inline int is_module_text(unsigned long addr)
79255+{
79256+ return 0;
79257+}
79258+#endif
79259+#endif
79260+
79261 static inline int is_kernel_text(unsigned long addr)
79262 {
79263 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
79264@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
79265
79266 static inline int is_kernel(unsigned long addr)
79267 {
79268+
79269+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79270+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
79271+ return 1;
79272+
79273+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
79274+#else
79275 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
79276+#endif
79277+
79278 return 1;
79279 return in_gate_area_no_mm(addr);
79280 }
79281
79282 static int is_ksym_addr(unsigned long addr)
79283 {
79284+
79285+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
79286+ if (is_module_text(addr))
79287+ return 0;
79288+#endif
79289+
79290 if (all_var)
79291 return is_kernel(addr);
79292
79293@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
79294
79295 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
79296 {
79297- iter->name[0] = '\0';
79298 iter->nameoff = get_symbol_offset(new_pos);
79299 iter->pos = new_pos;
79300 }
79301@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
79302 {
79303 struct kallsym_iter *iter = m->private;
79304
79305+#ifdef CONFIG_GRKERNSEC_HIDESYM
79306+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
79307+ return 0;
79308+#endif
79309+
79310 /* Some debugging symbols have no name. Ignore them. */
79311 if (!iter->name[0])
79312 return 0;
79313@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
79314 */
79315 type = iter->exported ? toupper(iter->type) :
79316 tolower(iter->type);
79317+
79318 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
79319 type, iter->name, iter->module_name);
79320 } else
79321@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
79322 struct kallsym_iter *iter;
79323 int ret;
79324
79325- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
79326+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
79327 if (!iter)
79328 return -ENOMEM;
79329 reset_iter(iter, 0);
79330diff --git a/kernel/kcmp.c b/kernel/kcmp.c
79331index e30ac0f..3528cac 100644
79332--- a/kernel/kcmp.c
79333+++ b/kernel/kcmp.c
79334@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
79335 struct task_struct *task1, *task2;
79336 int ret;
79337
79338+#ifdef CONFIG_GRKERNSEC
79339+ return -ENOSYS;
79340+#endif
79341+
79342 rcu_read_lock();
79343
79344 /*
79345diff --git a/kernel/kexec.c b/kernel/kexec.c
79346index 59f7b55..4022f65 100644
79347--- a/kernel/kexec.c
79348+++ b/kernel/kexec.c
79349@@ -1041,7 +1041,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
79350 unsigned long flags)
79351 {
79352 struct compat_kexec_segment in;
79353- struct kexec_segment out, __user *ksegments;
79354+ struct kexec_segment out;
79355+ struct kexec_segment __user *ksegments;
79356 unsigned long i, result;
79357
79358 /* Don't allow clients that don't understand the native
79359diff --git a/kernel/kmod.c b/kernel/kmod.c
79360index 8241906..d625f2c 100644
79361--- a/kernel/kmod.c
79362+++ b/kernel/kmod.c
79363@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
79364 kfree(info->argv);
79365 }
79366
79367-static int call_modprobe(char *module_name, int wait)
79368+static int call_modprobe(char *module_name, char *module_param, int wait)
79369 {
79370 struct subprocess_info *info;
79371 static char *envp[] = {
79372@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
79373 NULL
79374 };
79375
79376- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
79377+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
79378 if (!argv)
79379 goto out;
79380
79381@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
79382 argv[1] = "-q";
79383 argv[2] = "--";
79384 argv[3] = module_name; /* check free_modprobe_argv() */
79385- argv[4] = NULL;
79386+ argv[4] = module_param;
79387+ argv[5] = NULL;
79388
79389 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
79390 NULL, free_modprobe_argv, NULL);
79391@@ -129,9 +130,8 @@ out:
79392 * If module auto-loading support is disabled then this function
79393 * becomes a no-operation.
79394 */
79395-int __request_module(bool wait, const char *fmt, ...)
79396+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
79397 {
79398- va_list args;
79399 char module_name[MODULE_NAME_LEN];
79400 unsigned int max_modprobes;
79401 int ret;
79402@@ -147,9 +147,7 @@ int __request_module(bool wait, const char *fmt, ...)
79403 */
79404 WARN_ON_ONCE(wait && current_is_async());
79405
79406- va_start(args, fmt);
79407- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
79408- va_end(args);
79409+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
79410 if (ret >= MODULE_NAME_LEN)
79411 return -ENAMETOOLONG;
79412
79413@@ -157,6 +155,20 @@ int __request_module(bool wait, const char *fmt, ...)
79414 if (ret)
79415 return ret;
79416
79417+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79418+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
79419+ /* hack to workaround consolekit/udisks stupidity */
79420+ read_lock(&tasklist_lock);
79421+ if (!strcmp(current->comm, "mount") &&
79422+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
79423+ read_unlock(&tasklist_lock);
79424+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
79425+ return -EPERM;
79426+ }
79427+ read_unlock(&tasklist_lock);
79428+ }
79429+#endif
79430+
79431 /* If modprobe needs a service that is in a module, we get a recursive
79432 * loop. Limit the number of running kmod threads to max_threads/2 or
79433 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
79434@@ -185,11 +197,52 @@ int __request_module(bool wait, const char *fmt, ...)
79435
79436 trace_module_request(module_name, wait, _RET_IP_);
79437
79438- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
79439+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
79440
79441 atomic_dec(&kmod_concurrent);
79442 return ret;
79443 }
79444+
79445+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
79446+{
79447+ va_list args;
79448+ int ret;
79449+
79450+ va_start(args, fmt);
79451+ ret = ____request_module(wait, module_param, fmt, args);
79452+ va_end(args);
79453+
79454+ return ret;
79455+}
79456+
79457+int __request_module(bool wait, const char *fmt, ...)
79458+{
79459+ va_list args;
79460+ int ret;
79461+
79462+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79463+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
79464+ char module_param[MODULE_NAME_LEN];
79465+
79466+ memset(module_param, 0, sizeof(module_param));
79467+
79468+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
79469+
79470+ va_start(args, fmt);
79471+ ret = ____request_module(wait, module_param, fmt, args);
79472+ va_end(args);
79473+
79474+ return ret;
79475+ }
79476+#endif
79477+
79478+ va_start(args, fmt);
79479+ ret = ____request_module(wait, NULL, fmt, args);
79480+ va_end(args);
79481+
79482+ return ret;
79483+}
79484+
79485 EXPORT_SYMBOL(__request_module);
79486 #endif /* CONFIG_MODULES */
79487
79488@@ -300,7 +353,7 @@ static int wait_for_helper(void *data)
79489 *
79490 * Thus the __user pointer cast is valid here.
79491 */
79492- sys_wait4(pid, (int __user *)&ret, 0, NULL);
79493+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
79494
79495 /*
79496 * If ret is 0, either ____call_usermodehelper failed and the
79497@@ -651,7 +704,7 @@ EXPORT_SYMBOL(call_usermodehelper);
79498 static int proc_cap_handler(struct ctl_table *table, int write,
79499 void __user *buffer, size_t *lenp, loff_t *ppos)
79500 {
79501- struct ctl_table t;
79502+ ctl_table_no_const t;
79503 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
79504 kernel_cap_t new_cap;
79505 int err, i;
79506diff --git a/kernel/kprobes.c b/kernel/kprobes.c
79507index bddf3b2..233bf40 100644
79508--- a/kernel/kprobes.c
79509+++ b/kernel/kprobes.c
79510@@ -31,6 +31,9 @@
79511 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
79512 * <prasanna@in.ibm.com> added function-return probes.
79513 */
79514+#ifdef CONFIG_GRKERNSEC_HIDESYM
79515+#define __INCLUDED_BY_HIDESYM 1
79516+#endif
79517 #include <linux/kprobes.h>
79518 #include <linux/hash.h>
79519 #include <linux/init.h>
79520@@ -185,7 +188,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
79521 * kernel image and loaded module images reside. This is required
79522 * so x86_64 can correctly handle the %rip-relative fixups.
79523 */
79524- kip->insns = module_alloc(PAGE_SIZE);
79525+ kip->insns = module_alloc_exec(PAGE_SIZE);
79526 if (!kip->insns) {
79527 kfree(kip);
79528 return NULL;
79529@@ -225,7 +228,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
79530 */
79531 if (!list_is_singular(&kip->list)) {
79532 list_del(&kip->list);
79533- module_free(NULL, kip->insns);
79534+ module_free_exec(NULL, kip->insns);
79535 kfree(kip);
79536 }
79537 return 1;
79538@@ -2083,7 +2086,7 @@ static int __init init_kprobes(void)
79539 {
79540 int i, err = 0;
79541 unsigned long offset = 0, size = 0;
79542- char *modname, namebuf[128];
79543+ char *modname, namebuf[KSYM_NAME_LEN];
79544 const char *symbol_name;
79545 void *addr;
79546 struct kprobe_blackpoint *kb;
79547@@ -2168,11 +2171,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
79548 kprobe_type = "k";
79549
79550 if (sym)
79551- seq_printf(pi, "%p %s %s+0x%x %s ",
79552+ seq_printf(pi, "%pK %s %s+0x%x %s ",
79553 p->addr, kprobe_type, sym, offset,
79554 (modname ? modname : " "));
79555 else
79556- seq_printf(pi, "%p %s %p ",
79557+ seq_printf(pi, "%pK %s %pK ",
79558 p->addr, kprobe_type, p->addr);
79559
79560 if (!pp)
79561@@ -2209,7 +2212,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
79562 const char *sym = NULL;
79563 unsigned int i = *(loff_t *) v;
79564 unsigned long offset = 0;
79565- char *modname, namebuf[128];
79566+ char *modname, namebuf[KSYM_NAME_LEN];
79567
79568 head = &kprobe_table[i];
79569 preempt_disable();
79570diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
79571index 6ada93c..dce7d5d 100644
79572--- a/kernel/ksysfs.c
79573+++ b/kernel/ksysfs.c
79574@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
79575 {
79576 if (count+1 > UEVENT_HELPER_PATH_LEN)
79577 return -ENOENT;
79578+ if (!capable(CAP_SYS_ADMIN))
79579+ return -EPERM;
79580 memcpy(uevent_helper, buf, count);
79581 uevent_helper[count] = '\0';
79582 if (count && uevent_helper[count-1] == '\n')
79583@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
79584 return count;
79585 }
79586
79587-static struct bin_attribute notes_attr = {
79588+static bin_attribute_no_const notes_attr __read_only = {
79589 .attr = {
79590 .name = "notes",
79591 .mode = S_IRUGO,
79592diff --git a/kernel/lockdep.c b/kernel/lockdep.c
79593index 1f3186b..bb7dbc6 100644
79594--- a/kernel/lockdep.c
79595+++ b/kernel/lockdep.c
79596@@ -596,6 +596,10 @@ static int static_obj(void *obj)
79597 end = (unsigned long) &_end,
79598 addr = (unsigned long) obj;
79599
79600+#ifdef CONFIG_PAX_KERNEXEC
79601+ start = ktla_ktva(start);
79602+#endif
79603+
79604 /*
79605 * static variable?
79606 */
79607@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
79608 if (!static_obj(lock->key)) {
79609 debug_locks_off();
79610 printk("INFO: trying to register non-static key.\n");
79611+ printk("lock:%pS key:%pS.\n", lock, lock->key);
79612 printk("the code is fine but needs lockdep annotation.\n");
79613 printk("turning off the locking correctness validator.\n");
79614 dump_stack();
79615@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
79616 if (!class)
79617 return 0;
79618 }
79619- atomic_inc((atomic_t *)&class->ops);
79620+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
79621 if (very_verbose(class)) {
79622 printk("\nacquire class [%p] %s", class->key, class->name);
79623 if (class->name_version > 1)
79624diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
79625index b2c71c5..7b88d63 100644
79626--- a/kernel/lockdep_proc.c
79627+++ b/kernel/lockdep_proc.c
79628@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
79629 return 0;
79630 }
79631
79632- seq_printf(m, "%p", class->key);
79633+ seq_printf(m, "%pK", class->key);
79634 #ifdef CONFIG_DEBUG_LOCKDEP
79635 seq_printf(m, " OPS:%8ld", class->ops);
79636 #endif
79637@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
79638
79639 list_for_each_entry(entry, &class->locks_after, entry) {
79640 if (entry->distance == 1) {
79641- seq_printf(m, " -> [%p] ", entry->class->key);
79642+ seq_printf(m, " -> [%pK] ", entry->class->key);
79643 print_name(m, entry->class);
79644 seq_puts(m, "\n");
79645 }
79646@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
79647 if (!class->key)
79648 continue;
79649
79650- seq_printf(m, "[%p] ", class->key);
79651+ seq_printf(m, "[%pK] ", class->key);
79652 print_name(m, class);
79653 seq_puts(m, "\n");
79654 }
79655@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
79656 if (!i)
79657 seq_line(m, '-', 40-namelen, namelen);
79658
79659- snprintf(ip, sizeof(ip), "[<%p>]",
79660+ snprintf(ip, sizeof(ip), "[<%pK>]",
79661 (void *)class->contention_point[i]);
79662 seq_printf(m, "%40s %14lu %29s %pS\n",
79663 name, stats->contention_point[i],
79664@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
79665 if (!i)
79666 seq_line(m, '-', 40-namelen, namelen);
79667
79668- snprintf(ip, sizeof(ip), "[<%p>]",
79669+ snprintf(ip, sizeof(ip), "[<%pK>]",
79670 (void *)class->contending_point[i]);
79671 seq_printf(m, "%40s %14lu %29s %pS\n",
79672 name, stats->contending_point[i],
79673diff --git a/kernel/module.c b/kernel/module.c
79674index fa53db8..6f17200 100644
79675--- a/kernel/module.c
79676+++ b/kernel/module.c
79677@@ -61,6 +61,7 @@
79678 #include <linux/pfn.h>
79679 #include <linux/bsearch.h>
79680 #include <linux/fips.h>
79681+#include <linux/grsecurity.h>
79682 #include <uapi/linux/module.h>
79683 #include "module-internal.h"
79684
79685@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
79686
79687 /* Bounds of module allocation, for speeding __module_address.
79688 * Protected by module_mutex. */
79689-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
79690+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
79691+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
79692
79693 int register_module_notifier(struct notifier_block * nb)
79694 {
79695@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
79696 return true;
79697
79698 list_for_each_entry_rcu(mod, &modules, list) {
79699- struct symsearch arr[] = {
79700+ struct symsearch modarr[] = {
79701 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
79702 NOT_GPL_ONLY, false },
79703 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
79704@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
79705 if (mod->state == MODULE_STATE_UNFORMED)
79706 continue;
79707
79708- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
79709+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
79710 return true;
79711 }
79712 return false;
79713@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
79714 static int percpu_modalloc(struct module *mod,
79715 unsigned long size, unsigned long align)
79716 {
79717- if (align > PAGE_SIZE) {
79718+ if (align-1 >= PAGE_SIZE) {
79719 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
79720 mod->name, align, PAGE_SIZE);
79721 align = PAGE_SIZE;
79722@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
79723 static ssize_t show_coresize(struct module_attribute *mattr,
79724 struct module_kobject *mk, char *buffer)
79725 {
79726- return sprintf(buffer, "%u\n", mk->mod->core_size);
79727+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
79728 }
79729
79730 static struct module_attribute modinfo_coresize =
79731@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
79732 static ssize_t show_initsize(struct module_attribute *mattr,
79733 struct module_kobject *mk, char *buffer)
79734 {
79735- return sprintf(buffer, "%u\n", mk->mod->init_size);
79736+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
79737 }
79738
79739 static struct module_attribute modinfo_initsize =
79740@@ -1313,7 +1315,7 @@ resolve_symbol_wait(struct module *mod,
79741 */
79742 #ifdef CONFIG_SYSFS
79743
79744-#ifdef CONFIG_KALLSYMS
79745+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
79746 static inline bool sect_empty(const Elf_Shdr *sect)
79747 {
79748 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
79749@@ -1453,7 +1455,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
79750 {
79751 unsigned int notes, loaded, i;
79752 struct module_notes_attrs *notes_attrs;
79753- struct bin_attribute *nattr;
79754+ bin_attribute_no_const *nattr;
79755
79756 /* failed to create section attributes, so can't create notes */
79757 if (!mod->sect_attrs)
79758@@ -1565,7 +1567,7 @@ static void del_usage_links(struct module *mod)
79759 static int module_add_modinfo_attrs(struct module *mod)
79760 {
79761 struct module_attribute *attr;
79762- struct module_attribute *temp_attr;
79763+ module_attribute_no_const *temp_attr;
79764 int error = 0;
79765 int i;
79766
79767@@ -1779,21 +1781,21 @@ static void set_section_ro_nx(void *base,
79768
79769 static void unset_module_core_ro_nx(struct module *mod)
79770 {
79771- set_page_attributes(mod->module_core + mod->core_text_size,
79772- mod->module_core + mod->core_size,
79773+ set_page_attributes(mod->module_core_rw,
79774+ mod->module_core_rw + mod->core_size_rw,
79775 set_memory_x);
79776- set_page_attributes(mod->module_core,
79777- mod->module_core + mod->core_ro_size,
79778+ set_page_attributes(mod->module_core_rx,
79779+ mod->module_core_rx + mod->core_size_rx,
79780 set_memory_rw);
79781 }
79782
79783 static void unset_module_init_ro_nx(struct module *mod)
79784 {
79785- set_page_attributes(mod->module_init + mod->init_text_size,
79786- mod->module_init + mod->init_size,
79787+ set_page_attributes(mod->module_init_rw,
79788+ mod->module_init_rw + mod->init_size_rw,
79789 set_memory_x);
79790- set_page_attributes(mod->module_init,
79791- mod->module_init + mod->init_ro_size,
79792+ set_page_attributes(mod->module_init_rx,
79793+ mod->module_init_rx + mod->init_size_rx,
79794 set_memory_rw);
79795 }
79796
79797@@ -1806,14 +1808,14 @@ void set_all_modules_text_rw(void)
79798 list_for_each_entry_rcu(mod, &modules, list) {
79799 if (mod->state == MODULE_STATE_UNFORMED)
79800 continue;
79801- if ((mod->module_core) && (mod->core_text_size)) {
79802- set_page_attributes(mod->module_core,
79803- mod->module_core + mod->core_text_size,
79804+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
79805+ set_page_attributes(mod->module_core_rx,
79806+ mod->module_core_rx + mod->core_size_rx,
79807 set_memory_rw);
79808 }
79809- if ((mod->module_init) && (mod->init_text_size)) {
79810- set_page_attributes(mod->module_init,
79811- mod->module_init + mod->init_text_size,
79812+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
79813+ set_page_attributes(mod->module_init_rx,
79814+ mod->module_init_rx + mod->init_size_rx,
79815 set_memory_rw);
79816 }
79817 }
79818@@ -1829,14 +1831,14 @@ void set_all_modules_text_ro(void)
79819 list_for_each_entry_rcu(mod, &modules, list) {
79820 if (mod->state == MODULE_STATE_UNFORMED)
79821 continue;
79822- if ((mod->module_core) && (mod->core_text_size)) {
79823- set_page_attributes(mod->module_core,
79824- mod->module_core + mod->core_text_size,
79825+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
79826+ set_page_attributes(mod->module_core_rx,
79827+ mod->module_core_rx + mod->core_size_rx,
79828 set_memory_ro);
79829 }
79830- if ((mod->module_init) && (mod->init_text_size)) {
79831- set_page_attributes(mod->module_init,
79832- mod->module_init + mod->init_text_size,
79833+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
79834+ set_page_attributes(mod->module_init_rx,
79835+ mod->module_init_rx + mod->init_size_rx,
79836 set_memory_ro);
79837 }
79838 }
79839@@ -1887,16 +1889,19 @@ static void free_module(struct module *mod)
79840
79841 /* This may be NULL, but that's OK */
79842 unset_module_init_ro_nx(mod);
79843- module_free(mod, mod->module_init);
79844+ module_free(mod, mod->module_init_rw);
79845+ module_free_exec(mod, mod->module_init_rx);
79846 kfree(mod->args);
79847 percpu_modfree(mod);
79848
79849 /* Free lock-classes: */
79850- lockdep_free_key_range(mod->module_core, mod->core_size);
79851+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
79852+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
79853
79854 /* Finally, free the core (containing the module structure) */
79855 unset_module_core_ro_nx(mod);
79856- module_free(mod, mod->module_core);
79857+ module_free_exec(mod, mod->module_core_rx);
79858+ module_free(mod, mod->module_core_rw);
79859
79860 #ifdef CONFIG_MPU
79861 update_protections(current->mm);
79862@@ -1966,9 +1971,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
79863 int ret = 0;
79864 const struct kernel_symbol *ksym;
79865
79866+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79867+ int is_fs_load = 0;
79868+ int register_filesystem_found = 0;
79869+ char *p;
79870+
79871+ p = strstr(mod->args, "grsec_modharden_fs");
79872+ if (p) {
79873+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
79874+ /* copy \0 as well */
79875+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
79876+ is_fs_load = 1;
79877+ }
79878+#endif
79879+
79880 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
79881 const char *name = info->strtab + sym[i].st_name;
79882
79883+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79884+ /* it's a real shame this will never get ripped and copied
79885+ upstream! ;(
79886+ */
79887+ if (is_fs_load && !strcmp(name, "register_filesystem"))
79888+ register_filesystem_found = 1;
79889+#endif
79890+
79891 switch (sym[i].st_shndx) {
79892 case SHN_COMMON:
79893 /* We compiled with -fno-common. These are not
79894@@ -1989,7 +2016,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
79895 ksym = resolve_symbol_wait(mod, info, name);
79896 /* Ok if resolved. */
79897 if (ksym && !IS_ERR(ksym)) {
79898+ pax_open_kernel();
79899 sym[i].st_value = ksym->value;
79900+ pax_close_kernel();
79901 break;
79902 }
79903
79904@@ -2008,11 +2037,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
79905 secbase = (unsigned long)mod_percpu(mod);
79906 else
79907 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
79908+ pax_open_kernel();
79909 sym[i].st_value += secbase;
79910+ pax_close_kernel();
79911 break;
79912 }
79913 }
79914
79915+#ifdef CONFIG_GRKERNSEC_MODHARDEN
79916+ if (is_fs_load && !register_filesystem_found) {
79917+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
79918+ ret = -EPERM;
79919+ }
79920+#endif
79921+
79922 return ret;
79923 }
79924
79925@@ -2096,22 +2134,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
79926 || s->sh_entsize != ~0UL
79927 || strstarts(sname, ".init"))
79928 continue;
79929- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
79930+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
79931+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
79932+ else
79933+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
79934 pr_debug("\t%s\n", sname);
79935 }
79936- switch (m) {
79937- case 0: /* executable */
79938- mod->core_size = debug_align(mod->core_size);
79939- mod->core_text_size = mod->core_size;
79940- break;
79941- case 1: /* RO: text and ro-data */
79942- mod->core_size = debug_align(mod->core_size);
79943- mod->core_ro_size = mod->core_size;
79944- break;
79945- case 3: /* whole core */
79946- mod->core_size = debug_align(mod->core_size);
79947- break;
79948- }
79949 }
79950
79951 pr_debug("Init section allocation order:\n");
79952@@ -2125,23 +2153,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
79953 || s->sh_entsize != ~0UL
79954 || !strstarts(sname, ".init"))
79955 continue;
79956- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
79957- | INIT_OFFSET_MASK);
79958+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
79959+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
79960+ else
79961+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
79962+ s->sh_entsize |= INIT_OFFSET_MASK;
79963 pr_debug("\t%s\n", sname);
79964 }
79965- switch (m) {
79966- case 0: /* executable */
79967- mod->init_size = debug_align(mod->init_size);
79968- mod->init_text_size = mod->init_size;
79969- break;
79970- case 1: /* RO: text and ro-data */
79971- mod->init_size = debug_align(mod->init_size);
79972- mod->init_ro_size = mod->init_size;
79973- break;
79974- case 3: /* whole init */
79975- mod->init_size = debug_align(mod->init_size);
79976- break;
79977- }
79978 }
79979 }
79980
79981@@ -2314,7 +2332,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
79982
79983 /* Put symbol section at end of init part of module. */
79984 symsect->sh_flags |= SHF_ALLOC;
79985- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
79986+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
79987 info->index.sym) | INIT_OFFSET_MASK;
79988 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
79989
79990@@ -2331,13 +2349,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
79991 }
79992
79993 /* Append room for core symbols at end of core part. */
79994- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
79995- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
79996- mod->core_size += strtab_size;
79997+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
79998+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
79999+ mod->core_size_rx += strtab_size;
80000
80001 /* Put string table section at end of init part of module. */
80002 strsect->sh_flags |= SHF_ALLOC;
80003- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
80004+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
80005 info->index.str) | INIT_OFFSET_MASK;
80006 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
80007 }
80008@@ -2355,12 +2373,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
80009 /* Make sure we get permanent strtab: don't use info->strtab. */
80010 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
80011
80012+ pax_open_kernel();
80013+
80014 /* Set types up while we still have access to sections. */
80015 for (i = 0; i < mod->num_symtab; i++)
80016 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
80017
80018- mod->core_symtab = dst = mod->module_core + info->symoffs;
80019- mod->core_strtab = s = mod->module_core + info->stroffs;
80020+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
80021+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
80022 src = mod->symtab;
80023 for (ndst = i = 0; i < mod->num_symtab; i++) {
80024 if (i == 0 ||
80025@@ -2372,6 +2392,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
80026 }
80027 }
80028 mod->core_num_syms = ndst;
80029+
80030+ pax_close_kernel();
80031 }
80032 #else
80033 static inline void layout_symtab(struct module *mod, struct load_info *info)
80034@@ -2405,17 +2427,33 @@ void * __weak module_alloc(unsigned long size)
80035 return vmalloc_exec(size);
80036 }
80037
80038-static void *module_alloc_update_bounds(unsigned long size)
80039+static void *module_alloc_update_bounds_rw(unsigned long size)
80040 {
80041 void *ret = module_alloc(size);
80042
80043 if (ret) {
80044 mutex_lock(&module_mutex);
80045 /* Update module bounds. */
80046- if ((unsigned long)ret < module_addr_min)
80047- module_addr_min = (unsigned long)ret;
80048- if ((unsigned long)ret + size > module_addr_max)
80049- module_addr_max = (unsigned long)ret + size;
80050+ if ((unsigned long)ret < module_addr_min_rw)
80051+ module_addr_min_rw = (unsigned long)ret;
80052+ if ((unsigned long)ret + size > module_addr_max_rw)
80053+ module_addr_max_rw = (unsigned long)ret + size;
80054+ mutex_unlock(&module_mutex);
80055+ }
80056+ return ret;
80057+}
80058+
80059+static void *module_alloc_update_bounds_rx(unsigned long size)
80060+{
80061+ void *ret = module_alloc_exec(size);
80062+
80063+ if (ret) {
80064+ mutex_lock(&module_mutex);
80065+ /* Update module bounds. */
80066+ if ((unsigned long)ret < module_addr_min_rx)
80067+ module_addr_min_rx = (unsigned long)ret;
80068+ if ((unsigned long)ret + size > module_addr_max_rx)
80069+ module_addr_max_rx = (unsigned long)ret + size;
80070 mutex_unlock(&module_mutex);
80071 }
80072 return ret;
80073@@ -2691,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
80074 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
80075 {
80076 const char *modmagic = get_modinfo(info, "vermagic");
80077+ const char *license = get_modinfo(info, "license");
80078 int err;
80079
80080+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
80081+ if (!license || !license_is_gpl_compatible(license))
80082+ return -ENOEXEC;
80083+#endif
80084+
80085 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
80086 modmagic = NULL;
80087
80088@@ -2718,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
80089 }
80090
80091 /* Set up license info based on the info section */
80092- set_license(mod, get_modinfo(info, "license"));
80093+ set_license(mod, license);
80094
80095 return 0;
80096 }
80097@@ -2799,7 +2843,7 @@ static int move_module(struct module *mod, struct load_info *info)
80098 void *ptr;
80099
80100 /* Do the allocs. */
80101- ptr = module_alloc_update_bounds(mod->core_size);
80102+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
80103 /*
80104 * The pointer to this block is stored in the module structure
80105 * which is inside the block. Just mark it as not being a
80106@@ -2809,11 +2853,11 @@ static int move_module(struct module *mod, struct load_info *info)
80107 if (!ptr)
80108 return -ENOMEM;
80109
80110- memset(ptr, 0, mod->core_size);
80111- mod->module_core = ptr;
80112+ memset(ptr, 0, mod->core_size_rw);
80113+ mod->module_core_rw = ptr;
80114
80115- if (mod->init_size) {
80116- ptr = module_alloc_update_bounds(mod->init_size);
80117+ if (mod->init_size_rw) {
80118+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
80119 /*
80120 * The pointer to this block is stored in the module structure
80121 * which is inside the block. This block doesn't need to be
80122@@ -2822,13 +2866,45 @@ static int move_module(struct module *mod, struct load_info *info)
80123 */
80124 kmemleak_ignore(ptr);
80125 if (!ptr) {
80126- module_free(mod, mod->module_core);
80127+ module_free(mod, mod->module_core_rw);
80128 return -ENOMEM;
80129 }
80130- memset(ptr, 0, mod->init_size);
80131- mod->module_init = ptr;
80132+ memset(ptr, 0, mod->init_size_rw);
80133+ mod->module_init_rw = ptr;
80134 } else
80135- mod->module_init = NULL;
80136+ mod->module_init_rw = NULL;
80137+
80138+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
80139+ kmemleak_not_leak(ptr);
80140+ if (!ptr) {
80141+ if (mod->module_init_rw)
80142+ module_free(mod, mod->module_init_rw);
80143+ module_free(mod, mod->module_core_rw);
80144+ return -ENOMEM;
80145+ }
80146+
80147+ pax_open_kernel();
80148+ memset(ptr, 0, mod->core_size_rx);
80149+ pax_close_kernel();
80150+ mod->module_core_rx = ptr;
80151+
80152+ if (mod->init_size_rx) {
80153+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
80154+ kmemleak_ignore(ptr);
80155+ if (!ptr && mod->init_size_rx) {
80156+ module_free_exec(mod, mod->module_core_rx);
80157+ if (mod->module_init_rw)
80158+ module_free(mod, mod->module_init_rw);
80159+ module_free(mod, mod->module_core_rw);
80160+ return -ENOMEM;
80161+ }
80162+
80163+ pax_open_kernel();
80164+ memset(ptr, 0, mod->init_size_rx);
80165+ pax_close_kernel();
80166+ mod->module_init_rx = ptr;
80167+ } else
80168+ mod->module_init_rx = NULL;
80169
80170 /* Transfer each section which specifies SHF_ALLOC */
80171 pr_debug("final section addresses:\n");
80172@@ -2839,16 +2915,45 @@ static int move_module(struct module *mod, struct load_info *info)
80173 if (!(shdr->sh_flags & SHF_ALLOC))
80174 continue;
80175
80176- if (shdr->sh_entsize & INIT_OFFSET_MASK)
80177- dest = mod->module_init
80178- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
80179- else
80180- dest = mod->module_core + shdr->sh_entsize;
80181+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
80182+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
80183+ dest = mod->module_init_rw
80184+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
80185+ else
80186+ dest = mod->module_init_rx
80187+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
80188+ } else {
80189+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
80190+ dest = mod->module_core_rw + shdr->sh_entsize;
80191+ else
80192+ dest = mod->module_core_rx + shdr->sh_entsize;
80193+ }
80194+
80195+ if (shdr->sh_type != SHT_NOBITS) {
80196+
80197+#ifdef CONFIG_PAX_KERNEXEC
80198+#ifdef CONFIG_X86_64
80199+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
80200+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
80201+#endif
80202+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
80203+ pax_open_kernel();
80204+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
80205+ pax_close_kernel();
80206+ } else
80207+#endif
80208
80209- if (shdr->sh_type != SHT_NOBITS)
80210 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
80211+ }
80212 /* Update sh_addr to point to copy in image. */
80213- shdr->sh_addr = (unsigned long)dest;
80214+
80215+#ifdef CONFIG_PAX_KERNEXEC
80216+ if (shdr->sh_flags & SHF_EXECINSTR)
80217+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
80218+ else
80219+#endif
80220+
80221+ shdr->sh_addr = (unsigned long)dest;
80222 pr_debug("\t0x%lx %s\n",
80223 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
80224 }
80225@@ -2905,12 +3010,12 @@ static void flush_module_icache(const struct module *mod)
80226 * Do it before processing of module parameters, so the module
80227 * can provide parameter accessor functions of its own.
80228 */
80229- if (mod->module_init)
80230- flush_icache_range((unsigned long)mod->module_init,
80231- (unsigned long)mod->module_init
80232- + mod->init_size);
80233- flush_icache_range((unsigned long)mod->module_core,
80234- (unsigned long)mod->module_core + mod->core_size);
80235+ if (mod->module_init_rx)
80236+ flush_icache_range((unsigned long)mod->module_init_rx,
80237+ (unsigned long)mod->module_init_rx
80238+ + mod->init_size_rx);
80239+ flush_icache_range((unsigned long)mod->module_core_rx,
80240+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
80241
80242 set_fs(old_fs);
80243 }
80244@@ -2977,8 +3082,10 @@ static int alloc_module_percpu(struct module *mod, struct load_info *info)
80245 static void module_deallocate(struct module *mod, struct load_info *info)
80246 {
80247 percpu_modfree(mod);
80248- module_free(mod, mod->module_init);
80249- module_free(mod, mod->module_core);
80250+ module_free_exec(mod, mod->module_init_rx);
80251+ module_free_exec(mod, mod->module_core_rx);
80252+ module_free(mod, mod->module_init_rw);
80253+ module_free(mod, mod->module_core_rw);
80254 }
80255
80256 int __weak module_finalize(const Elf_Ehdr *hdr,
80257@@ -2991,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
80258 static int post_relocation(struct module *mod, const struct load_info *info)
80259 {
80260 /* Sort exception table now relocations are done. */
80261+ pax_open_kernel();
80262 sort_extable(mod->extable, mod->extable + mod->num_exentries);
80263+ pax_close_kernel();
80264
80265 /* Copy relocated percpu area over. */
80266 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
80267@@ -3045,16 +3154,16 @@ static int do_init_module(struct module *mod)
80268 MODULE_STATE_COMING, mod);
80269
80270 /* Set RO and NX regions for core */
80271- set_section_ro_nx(mod->module_core,
80272- mod->core_text_size,
80273- mod->core_ro_size,
80274- mod->core_size);
80275+ set_section_ro_nx(mod->module_core_rx,
80276+ mod->core_size_rx,
80277+ mod->core_size_rx,
80278+ mod->core_size_rx);
80279
80280 /* Set RO and NX regions for init */
80281- set_section_ro_nx(mod->module_init,
80282- mod->init_text_size,
80283- mod->init_ro_size,
80284- mod->init_size);
80285+ set_section_ro_nx(mod->module_init_rx,
80286+ mod->init_size_rx,
80287+ mod->init_size_rx,
80288+ mod->init_size_rx);
80289
80290 do_mod_ctors(mod);
80291 /* Start the module */
80292@@ -3116,11 +3225,12 @@ static int do_init_module(struct module *mod)
80293 mod->strtab = mod->core_strtab;
80294 #endif
80295 unset_module_init_ro_nx(mod);
80296- module_free(mod, mod->module_init);
80297- mod->module_init = NULL;
80298- mod->init_size = 0;
80299- mod->init_ro_size = 0;
80300- mod->init_text_size = 0;
80301+ module_free(mod, mod->module_init_rw);
80302+ module_free_exec(mod, mod->module_init_rx);
80303+ mod->module_init_rw = NULL;
80304+ mod->module_init_rx = NULL;
80305+ mod->init_size_rw = 0;
80306+ mod->init_size_rx = 0;
80307 mutex_unlock(&module_mutex);
80308 wake_up_all(&module_wq);
80309
80310@@ -3252,9 +3362,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
80311 if (err)
80312 goto free_unload;
80313
80314+ /* Now copy in args */
80315+ mod->args = strndup_user(uargs, ~0UL >> 1);
80316+ if (IS_ERR(mod->args)) {
80317+ err = PTR_ERR(mod->args);
80318+ goto free_unload;
80319+ }
80320+
80321 /* Set up MODINFO_ATTR fields */
80322 setup_modinfo(mod, info);
80323
80324+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80325+ {
80326+ char *p, *p2;
80327+
80328+ if (strstr(mod->args, "grsec_modharden_netdev")) {
80329+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
80330+ err = -EPERM;
80331+ goto free_modinfo;
80332+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
80333+ p += sizeof("grsec_modharden_normal") - 1;
80334+ p2 = strstr(p, "_");
80335+ if (p2) {
80336+ *p2 = '\0';
80337+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
80338+ *p2 = '_';
80339+ }
80340+ err = -EPERM;
80341+ goto free_modinfo;
80342+ }
80343+ }
80344+#endif
80345+
80346 /* Fix up syms, so that st_value is a pointer to location. */
80347 err = simplify_symbols(mod, info);
80348 if (err < 0)
80349@@ -3270,13 +3409,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
80350
80351 flush_module_icache(mod);
80352
80353- /* Now copy in args */
80354- mod->args = strndup_user(uargs, ~0UL >> 1);
80355- if (IS_ERR(mod->args)) {
80356- err = PTR_ERR(mod->args);
80357- goto free_arch_cleanup;
80358- }
80359-
80360 dynamic_debug_setup(info->debug, info->num_debug);
80361
80362 /* Finally it's fully formed, ready to start executing. */
80363@@ -3311,11 +3443,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
80364 ddebug_cleanup:
80365 dynamic_debug_remove(info->debug);
80366 synchronize_sched();
80367- kfree(mod->args);
80368- free_arch_cleanup:
80369 module_arch_cleanup(mod);
80370 free_modinfo:
80371 free_modinfo(mod);
80372+ kfree(mod->args);
80373 free_unload:
80374 module_unload_free(mod);
80375 unlink_mod:
80376@@ -3398,10 +3529,16 @@ static const char *get_ksymbol(struct module *mod,
80377 unsigned long nextval;
80378
80379 /* At worse, next value is at end of module */
80380- if (within_module_init(addr, mod))
80381- nextval = (unsigned long)mod->module_init+mod->init_text_size;
80382+ if (within_module_init_rx(addr, mod))
80383+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
80384+ else if (within_module_init_rw(addr, mod))
80385+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
80386+ else if (within_module_core_rx(addr, mod))
80387+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
80388+ else if (within_module_core_rw(addr, mod))
80389+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
80390 else
80391- nextval = (unsigned long)mod->module_core+mod->core_text_size;
80392+ return NULL;
80393
80394 /* Scan for closest preceding symbol, and next symbol. (ELF
80395 starts real symbols at 1). */
80396@@ -3654,7 +3791,7 @@ static int m_show(struct seq_file *m, void *p)
80397 return 0;
80398
80399 seq_printf(m, "%s %u",
80400- mod->name, mod->init_size + mod->core_size);
80401+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
80402 print_unload_info(m, mod);
80403
80404 /* Informative for users. */
80405@@ -3663,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
80406 mod->state == MODULE_STATE_COMING ? "Loading":
80407 "Live");
80408 /* Used by oprofile and other similar tools. */
80409- seq_printf(m, " 0x%pK", mod->module_core);
80410+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
80411
80412 /* Taints info */
80413 if (mod->taints)
80414@@ -3699,7 +3836,17 @@ static const struct file_operations proc_modules_operations = {
80415
80416 static int __init proc_modules_init(void)
80417 {
80418+#ifndef CONFIG_GRKERNSEC_HIDESYM
80419+#ifdef CONFIG_GRKERNSEC_PROC_USER
80420+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
80421+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80422+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
80423+#else
80424 proc_create("modules", 0, NULL, &proc_modules_operations);
80425+#endif
80426+#else
80427+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
80428+#endif
80429 return 0;
80430 }
80431 module_init(proc_modules_init);
80432@@ -3760,14 +3907,14 @@ struct module *__module_address(unsigned long addr)
80433 {
80434 struct module *mod;
80435
80436- if (addr < module_addr_min || addr > module_addr_max)
80437+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
80438+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
80439 return NULL;
80440
80441 list_for_each_entry_rcu(mod, &modules, list) {
80442 if (mod->state == MODULE_STATE_UNFORMED)
80443 continue;
80444- if (within_module_core(addr, mod)
80445- || within_module_init(addr, mod))
80446+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
80447 return mod;
80448 }
80449 return NULL;
80450@@ -3802,11 +3949,20 @@ bool is_module_text_address(unsigned long addr)
80451 */
80452 struct module *__module_text_address(unsigned long addr)
80453 {
80454- struct module *mod = __module_address(addr);
80455+ struct module *mod;
80456+
80457+#ifdef CONFIG_X86_32
80458+ addr = ktla_ktva(addr);
80459+#endif
80460+
80461+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
80462+ return NULL;
80463+
80464+ mod = __module_address(addr);
80465+
80466 if (mod) {
80467 /* Make sure it's within the text section. */
80468- if (!within(addr, mod->module_init, mod->init_text_size)
80469- && !within(addr, mod->module_core, mod->core_text_size))
80470+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
80471 mod = NULL;
80472 }
80473 return mod;
80474diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
80475index 7e3443f..b2a1e6b 100644
80476--- a/kernel/mutex-debug.c
80477+++ b/kernel/mutex-debug.c
80478@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
80479 }
80480
80481 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
80482- struct thread_info *ti)
80483+ struct task_struct *task)
80484 {
80485 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
80486
80487 /* Mark the current thread as blocked on the lock: */
80488- ti->task->blocked_on = waiter;
80489+ task->blocked_on = waiter;
80490 }
80491
80492 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
80493- struct thread_info *ti)
80494+ struct task_struct *task)
80495 {
80496 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
80497- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
80498- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
80499- ti->task->blocked_on = NULL;
80500+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
80501+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
80502+ task->blocked_on = NULL;
80503
80504 list_del_init(&waiter->list);
80505 waiter->task = NULL;
80506diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
80507index 0799fd3..d06ae3b 100644
80508--- a/kernel/mutex-debug.h
80509+++ b/kernel/mutex-debug.h
80510@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
80511 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
80512 extern void debug_mutex_add_waiter(struct mutex *lock,
80513 struct mutex_waiter *waiter,
80514- struct thread_info *ti);
80515+ struct task_struct *task);
80516 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
80517- struct thread_info *ti);
80518+ struct task_struct *task);
80519 extern void debug_mutex_unlock(struct mutex *lock);
80520 extern void debug_mutex_init(struct mutex *lock, const char *name,
80521 struct lock_class_key *key);
80522diff --git a/kernel/mutex.c b/kernel/mutex.c
80523index ad53a66..f1bf8bc 100644
80524--- a/kernel/mutex.c
80525+++ b/kernel/mutex.c
80526@@ -134,7 +134,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
80527 node->locked = 1;
80528 return;
80529 }
80530- ACCESS_ONCE(prev->next) = node;
80531+ ACCESS_ONCE_RW(prev->next) = node;
80532 smp_wmb();
80533 /* Wait until the lock holder passes the lock down */
80534 while (!ACCESS_ONCE(node->locked))
80535@@ -155,7 +155,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
80536 while (!(next = ACCESS_ONCE(node->next)))
80537 arch_mutex_cpu_relax();
80538 }
80539- ACCESS_ONCE(next->locked) = 1;
80540+ ACCESS_ONCE_RW(next->locked) = 1;
80541 smp_wmb();
80542 }
80543
80544@@ -341,7 +341,7 @@ slowpath:
80545 spin_lock_mutex(&lock->wait_lock, flags);
80546
80547 debug_mutex_lock_common(lock, &waiter);
80548- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
80549+ debug_mutex_add_waiter(lock, &waiter, task);
80550
80551 /* add waiting tasks to the end of the waitqueue (FIFO): */
80552 list_add_tail(&waiter.list, &lock->wait_list);
80553@@ -371,8 +371,7 @@ slowpath:
80554 * TASK_UNINTERRUPTIBLE case.)
80555 */
80556 if (unlikely(signal_pending_state(state, task))) {
80557- mutex_remove_waiter(lock, &waiter,
80558- task_thread_info(task));
80559+ mutex_remove_waiter(lock, &waiter, task);
80560 mutex_release(&lock->dep_map, 1, ip);
80561 spin_unlock_mutex(&lock->wait_lock, flags);
80562
80563@@ -391,7 +390,7 @@ slowpath:
80564 done:
80565 lock_acquired(&lock->dep_map, ip);
80566 /* got the lock - rejoice! */
80567- mutex_remove_waiter(lock, &waiter, current_thread_info());
80568+ mutex_remove_waiter(lock, &waiter, task);
80569 mutex_set_owner(lock);
80570
80571 /* set it to 0 if there are no waiters left: */
80572diff --git a/kernel/notifier.c b/kernel/notifier.c
80573index 2d5cc4c..d9ea600 100644
80574--- a/kernel/notifier.c
80575+++ b/kernel/notifier.c
80576@@ -5,6 +5,7 @@
80577 #include <linux/rcupdate.h>
80578 #include <linux/vmalloc.h>
80579 #include <linux/reboot.h>
80580+#include <linux/mm.h>
80581
80582 /*
80583 * Notifier list for kernel code which wants to be called
80584@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
80585 while ((*nl) != NULL) {
80586 if (n->priority > (*nl)->priority)
80587 break;
80588- nl = &((*nl)->next);
80589+ nl = (struct notifier_block **)&((*nl)->next);
80590 }
80591- n->next = *nl;
80592+ pax_open_kernel();
80593+ *(const void **)&n->next = *nl;
80594 rcu_assign_pointer(*nl, n);
80595+ pax_close_kernel();
80596 return 0;
80597 }
80598
80599@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
80600 return 0;
80601 if (n->priority > (*nl)->priority)
80602 break;
80603- nl = &((*nl)->next);
80604+ nl = (struct notifier_block **)&((*nl)->next);
80605 }
80606- n->next = *nl;
80607+ pax_open_kernel();
80608+ *(const void **)&n->next = *nl;
80609 rcu_assign_pointer(*nl, n);
80610+ pax_close_kernel();
80611 return 0;
80612 }
80613
80614@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
80615 {
80616 while ((*nl) != NULL) {
80617 if ((*nl) == n) {
80618+ pax_open_kernel();
80619 rcu_assign_pointer(*nl, n->next);
80620+ pax_close_kernel();
80621 return 0;
80622 }
80623- nl = &((*nl)->next);
80624+ nl = (struct notifier_block **)&((*nl)->next);
80625 }
80626 return -ENOENT;
80627 }
80628diff --git a/kernel/panic.c b/kernel/panic.c
80629index 167ec09..0dda5f9 100644
80630--- a/kernel/panic.c
80631+++ b/kernel/panic.c
80632@@ -400,7 +400,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
80633 unsigned taint, struct slowpath_args *args)
80634 {
80635 printk(KERN_WARNING "------------[ cut here ]------------\n");
80636- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
80637+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
80638
80639 if (args)
80640 vprintk(args->fmt, args->args);
80641@@ -453,7 +453,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
80642 */
80643 void __stack_chk_fail(void)
80644 {
80645- panic("stack-protector: Kernel stack is corrupted in: %p\n",
80646+ dump_stack();
80647+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
80648 __builtin_return_address(0));
80649 }
80650 EXPORT_SYMBOL(__stack_chk_fail);
80651diff --git a/kernel/pid.c b/kernel/pid.c
80652index 0db3e79..95b9dc2 100644
80653--- a/kernel/pid.c
80654+++ b/kernel/pid.c
80655@@ -33,6 +33,7 @@
80656 #include <linux/rculist.h>
80657 #include <linux/bootmem.h>
80658 #include <linux/hash.h>
80659+#include <linux/security.h>
80660 #include <linux/pid_namespace.h>
80661 #include <linux/init_task.h>
80662 #include <linux/syscalls.h>
80663@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
80664
80665 int pid_max = PID_MAX_DEFAULT;
80666
80667-#define RESERVED_PIDS 300
80668+#define RESERVED_PIDS 500
80669
80670 int pid_max_min = RESERVED_PIDS + 1;
80671 int pid_max_max = PID_MAX_LIMIT;
80672@@ -442,10 +443,18 @@ EXPORT_SYMBOL(pid_task);
80673 */
80674 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
80675 {
80676+ struct task_struct *task;
80677+
80678 rcu_lockdep_assert(rcu_read_lock_held(),
80679 "find_task_by_pid_ns() needs rcu_read_lock()"
80680 " protection");
80681- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
80682+
80683+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
80684+
80685+ if (gr_pid_is_chrooted(task))
80686+ return NULL;
80687+
80688+ return task;
80689 }
80690
80691 struct task_struct *find_task_by_vpid(pid_t vnr)
80692@@ -453,6 +462,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
80693 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
80694 }
80695
80696+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
80697+{
80698+ rcu_lockdep_assert(rcu_read_lock_held(),
80699+ "find_task_by_pid_ns() needs rcu_read_lock()"
80700+ " protection");
80701+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
80702+}
80703+
80704 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
80705 {
80706 struct pid *pid;
80707diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
80708index 6917e8e..9909aeb 100644
80709--- a/kernel/pid_namespace.c
80710+++ b/kernel/pid_namespace.c
80711@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
80712 void __user *buffer, size_t *lenp, loff_t *ppos)
80713 {
80714 struct pid_namespace *pid_ns = task_active_pid_ns(current);
80715- struct ctl_table tmp = *table;
80716+ ctl_table_no_const tmp = *table;
80717
80718 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
80719 return -EPERM;
80720diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
80721index 42670e9..8719c2f 100644
80722--- a/kernel/posix-cpu-timers.c
80723+++ b/kernel/posix-cpu-timers.c
80724@@ -1636,14 +1636,14 @@ struct k_clock clock_posix_cpu = {
80725
80726 static __init int init_posix_cpu_timers(void)
80727 {
80728- struct k_clock process = {
80729+ static struct k_clock process = {
80730 .clock_getres = process_cpu_clock_getres,
80731 .clock_get = process_cpu_clock_get,
80732 .timer_create = process_cpu_timer_create,
80733 .nsleep = process_cpu_nsleep,
80734 .nsleep_restart = process_cpu_nsleep_restart,
80735 };
80736- struct k_clock thread = {
80737+ static struct k_clock thread = {
80738 .clock_getres = thread_cpu_clock_getres,
80739 .clock_get = thread_cpu_clock_get,
80740 .timer_create = thread_cpu_timer_create,
80741diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
80742index 424c2d4..a9194f7 100644
80743--- a/kernel/posix-timers.c
80744+++ b/kernel/posix-timers.c
80745@@ -43,6 +43,7 @@
80746 #include <linux/hash.h>
80747 #include <linux/posix-clock.h>
80748 #include <linux/posix-timers.h>
80749+#include <linux/grsecurity.h>
80750 #include <linux/syscalls.h>
80751 #include <linux/wait.h>
80752 #include <linux/workqueue.h>
80753@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
80754 * which we beg off on and pass to do_sys_settimeofday().
80755 */
80756
80757-static struct k_clock posix_clocks[MAX_CLOCKS];
80758+static struct k_clock *posix_clocks[MAX_CLOCKS];
80759
80760 /*
80761 * These ones are defined below.
80762@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
80763 */
80764 static __init int init_posix_timers(void)
80765 {
80766- struct k_clock clock_realtime = {
80767+ static struct k_clock clock_realtime = {
80768 .clock_getres = hrtimer_get_res,
80769 .clock_get = posix_clock_realtime_get,
80770 .clock_set = posix_clock_realtime_set,
80771@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
80772 .timer_get = common_timer_get,
80773 .timer_del = common_timer_del,
80774 };
80775- struct k_clock clock_monotonic = {
80776+ static struct k_clock clock_monotonic = {
80777 .clock_getres = hrtimer_get_res,
80778 .clock_get = posix_ktime_get_ts,
80779 .nsleep = common_nsleep,
80780@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
80781 .timer_get = common_timer_get,
80782 .timer_del = common_timer_del,
80783 };
80784- struct k_clock clock_monotonic_raw = {
80785+ static struct k_clock clock_monotonic_raw = {
80786 .clock_getres = hrtimer_get_res,
80787 .clock_get = posix_get_monotonic_raw,
80788 };
80789- struct k_clock clock_realtime_coarse = {
80790+ static struct k_clock clock_realtime_coarse = {
80791 .clock_getres = posix_get_coarse_res,
80792 .clock_get = posix_get_realtime_coarse,
80793 };
80794- struct k_clock clock_monotonic_coarse = {
80795+ static struct k_clock clock_monotonic_coarse = {
80796 .clock_getres = posix_get_coarse_res,
80797 .clock_get = posix_get_monotonic_coarse,
80798 };
80799- struct k_clock clock_tai = {
80800+ static struct k_clock clock_tai = {
80801 .clock_getres = hrtimer_get_res,
80802 .clock_get = posix_get_tai,
80803 .nsleep = common_nsleep,
80804@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
80805 .timer_get = common_timer_get,
80806 .timer_del = common_timer_del,
80807 };
80808- struct k_clock clock_boottime = {
80809+ static struct k_clock clock_boottime = {
80810 .clock_getres = hrtimer_get_res,
80811 .clock_get = posix_get_boottime,
80812 .nsleep = common_nsleep,
80813@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
80814 return;
80815 }
80816
80817- posix_clocks[clock_id] = *new_clock;
80818+ posix_clocks[clock_id] = new_clock;
80819 }
80820 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
80821
80822@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
80823 return (id & CLOCKFD_MASK) == CLOCKFD ?
80824 &clock_posix_dynamic : &clock_posix_cpu;
80825
80826- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
80827+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
80828 return NULL;
80829- return &posix_clocks[id];
80830+ return posix_clocks[id];
80831 }
80832
80833 static int common_timer_create(struct k_itimer *new_timer)
80834@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
80835 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
80836 return -EFAULT;
80837
80838+ /* only the CLOCK_REALTIME clock can be set, all other clocks
80839+ have their clock_set fptr set to a nosettime dummy function
80840+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
80841+ call common_clock_set, which calls do_sys_settimeofday, which
80842+ we hook
80843+ */
80844+
80845 return kc->clock_set(which_clock, &new_tp);
80846 }
80847
80848diff --git a/kernel/power/process.c b/kernel/power/process.c
80849index 98088e0..aaf95c0 100644
80850--- a/kernel/power/process.c
80851+++ b/kernel/power/process.c
80852@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
80853 u64 elapsed_csecs64;
80854 unsigned int elapsed_csecs;
80855 bool wakeup = false;
80856+ bool timedout = false;
80857
80858 do_gettimeofday(&start);
80859
80860@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
80861
80862 while (true) {
80863 todo = 0;
80864+ if (time_after(jiffies, end_time))
80865+ timedout = true;
80866 read_lock(&tasklist_lock);
80867 do_each_thread(g, p) {
80868 if (p == current || !freeze_task(p))
80869 continue;
80870
80871- if (!freezer_should_skip(p))
80872+ if (!freezer_should_skip(p)) {
80873 todo++;
80874+ if (timedout) {
80875+ printk(KERN_ERR "Task refusing to freeze:\n");
80876+ sched_show_task(p);
80877+ }
80878+ }
80879 } while_each_thread(g, p);
80880 read_unlock(&tasklist_lock);
80881
80882@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
80883 todo += wq_busy;
80884 }
80885
80886- if (!todo || time_after(jiffies, end_time))
80887+ if (!todo || timedout)
80888 break;
80889
80890 if (pm_wakeup_pending()) {
80891diff --git a/kernel/printk.c b/kernel/printk.c
80892index d37d45c..ab918b3 100644
80893--- a/kernel/printk.c
80894+++ b/kernel/printk.c
80895@@ -390,6 +390,11 @@ static int check_syslog_permissions(int type, bool from_file)
80896 if (from_file && type != SYSLOG_ACTION_OPEN)
80897 return 0;
80898
80899+#ifdef CONFIG_GRKERNSEC_DMESG
80900+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
80901+ return -EPERM;
80902+#endif
80903+
80904 if (syslog_action_restricted(type)) {
80905 if (capable(CAP_SYSLOG))
80906 return 0;
80907diff --git a/kernel/profile.c b/kernel/profile.c
80908index 0bf4007..6234708 100644
80909--- a/kernel/profile.c
80910+++ b/kernel/profile.c
80911@@ -37,7 +37,7 @@ struct profile_hit {
80912 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
80913 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
80914
80915-static atomic_t *prof_buffer;
80916+static atomic_unchecked_t *prof_buffer;
80917 static unsigned long prof_len, prof_shift;
80918
80919 int prof_on __read_mostly;
80920@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
80921 hits[i].pc = 0;
80922 continue;
80923 }
80924- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
80925+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
80926 hits[i].hits = hits[i].pc = 0;
80927 }
80928 }
80929@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
80930 * Add the current hit(s) and flush the write-queue out
80931 * to the global buffer:
80932 */
80933- atomic_add(nr_hits, &prof_buffer[pc]);
80934+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
80935 for (i = 0; i < NR_PROFILE_HIT; ++i) {
80936- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
80937+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
80938 hits[i].pc = hits[i].hits = 0;
80939 }
80940 out:
80941@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
80942 {
80943 unsigned long pc;
80944 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
80945- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
80946+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
80947 }
80948 #endif /* !CONFIG_SMP */
80949
80950@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
80951 return -EFAULT;
80952 buf++; p++; count--; read++;
80953 }
80954- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
80955+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
80956 if (copy_to_user(buf, (void *)pnt, count))
80957 return -EFAULT;
80958 read += count;
80959@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
80960 }
80961 #endif
80962 profile_discard_flip_buffers();
80963- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
80964+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
80965 return count;
80966 }
80967
80968diff --git a/kernel/ptrace.c b/kernel/ptrace.c
80969index 335a7ae..3bbbceb 100644
80970--- a/kernel/ptrace.c
80971+++ b/kernel/ptrace.c
80972@@ -326,7 +326,7 @@ static int ptrace_attach(struct task_struct *task, long request,
80973 if (seize)
80974 flags |= PT_SEIZED;
80975 rcu_read_lock();
80976- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
80977+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
80978 flags |= PT_PTRACE_CAP;
80979 rcu_read_unlock();
80980 task->ptrace = flags;
80981@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
80982 break;
80983 return -EIO;
80984 }
80985- if (copy_to_user(dst, buf, retval))
80986+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
80987 return -EFAULT;
80988 copied += retval;
80989 src += retval;
80990@@ -805,7 +805,7 @@ int ptrace_request(struct task_struct *child, long request,
80991 bool seized = child->ptrace & PT_SEIZED;
80992 int ret = -EIO;
80993 siginfo_t siginfo, *si;
80994- void __user *datavp = (void __user *) data;
80995+ void __user *datavp = (__force void __user *) data;
80996 unsigned long __user *datalp = datavp;
80997 unsigned long flags;
80998
80999@@ -1011,14 +1011,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
81000 goto out;
81001 }
81002
81003+ if (gr_handle_ptrace(child, request)) {
81004+ ret = -EPERM;
81005+ goto out_put_task_struct;
81006+ }
81007+
81008 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
81009 ret = ptrace_attach(child, request, addr, data);
81010 /*
81011 * Some architectures need to do book-keeping after
81012 * a ptrace attach.
81013 */
81014- if (!ret)
81015+ if (!ret) {
81016 arch_ptrace_attach(child);
81017+ gr_audit_ptrace(child);
81018+ }
81019 goto out_put_task_struct;
81020 }
81021
81022@@ -1046,7 +1053,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
81023 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
81024 if (copied != sizeof(tmp))
81025 return -EIO;
81026- return put_user(tmp, (unsigned long __user *)data);
81027+ return put_user(tmp, (__force unsigned long __user *)data);
81028 }
81029
81030 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
81031@@ -1140,7 +1147,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
81032 }
81033
81034 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81035- compat_long_t addr, compat_long_t data)
81036+ compat_ulong_t addr, compat_ulong_t data)
81037 {
81038 struct task_struct *child;
81039 long ret;
81040@@ -1156,14 +1163,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81041 goto out;
81042 }
81043
81044+ if (gr_handle_ptrace(child, request)) {
81045+ ret = -EPERM;
81046+ goto out_put_task_struct;
81047+ }
81048+
81049 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
81050 ret = ptrace_attach(child, request, addr, data);
81051 /*
81052 * Some architectures need to do book-keeping after
81053 * a ptrace attach.
81054 */
81055- if (!ret)
81056+ if (!ret) {
81057 arch_ptrace_attach(child);
81058+ gr_audit_ptrace(child);
81059+ }
81060 goto out_put_task_struct;
81061 }
81062
81063diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
81064index 48ab703..07561d4 100644
81065--- a/kernel/rcupdate.c
81066+++ b/kernel/rcupdate.c
81067@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
81068 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
81069 */
81070 if (till_stall_check < 3) {
81071- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
81072+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
81073 till_stall_check = 3;
81074 } else if (till_stall_check > 300) {
81075- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
81076+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
81077 till_stall_check = 300;
81078 }
81079 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
81080diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
81081index a0714a5..2ab5e34 100644
81082--- a/kernel/rcutiny.c
81083+++ b/kernel/rcutiny.c
81084@@ -46,7 +46,7 @@
81085 struct rcu_ctrlblk;
81086 static void invoke_rcu_callbacks(void);
81087 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
81088-static void rcu_process_callbacks(struct softirq_action *unused);
81089+static void rcu_process_callbacks(void);
81090 static void __call_rcu(struct rcu_head *head,
81091 void (*func)(struct rcu_head *rcu),
81092 struct rcu_ctrlblk *rcp);
81093@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
81094 rcu_is_callbacks_kthread()));
81095 }
81096
81097-static void rcu_process_callbacks(struct softirq_action *unused)
81098+static void rcu_process_callbacks(void)
81099 {
81100 __rcu_process_callbacks(&rcu_sched_ctrlblk);
81101 __rcu_process_callbacks(&rcu_bh_ctrlblk);
81102diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
81103index 8a23300..4255818 100644
81104--- a/kernel/rcutiny_plugin.h
81105+++ b/kernel/rcutiny_plugin.h
81106@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
81107 have_rcu_kthread_work = morework;
81108 local_irq_restore(flags);
81109 if (work)
81110- rcu_process_callbacks(NULL);
81111+ rcu_process_callbacks();
81112 schedule_timeout_interruptible(1); /* Leave CPU for others. */
81113 }
81114
81115diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
81116index e1f3a8c..42c94a2 100644
81117--- a/kernel/rcutorture.c
81118+++ b/kernel/rcutorture.c
81119@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
81120 { 0 };
81121 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
81122 { 0 };
81123-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
81124-static atomic_t n_rcu_torture_alloc;
81125-static atomic_t n_rcu_torture_alloc_fail;
81126-static atomic_t n_rcu_torture_free;
81127-static atomic_t n_rcu_torture_mberror;
81128-static atomic_t n_rcu_torture_error;
81129+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
81130+static atomic_unchecked_t n_rcu_torture_alloc;
81131+static atomic_unchecked_t n_rcu_torture_alloc_fail;
81132+static atomic_unchecked_t n_rcu_torture_free;
81133+static atomic_unchecked_t n_rcu_torture_mberror;
81134+static atomic_unchecked_t n_rcu_torture_error;
81135 static long n_rcu_torture_barrier_error;
81136 static long n_rcu_torture_boost_ktrerror;
81137 static long n_rcu_torture_boost_rterror;
81138@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
81139
81140 spin_lock_bh(&rcu_torture_lock);
81141 if (list_empty(&rcu_torture_freelist)) {
81142- atomic_inc(&n_rcu_torture_alloc_fail);
81143+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
81144 spin_unlock_bh(&rcu_torture_lock);
81145 return NULL;
81146 }
81147- atomic_inc(&n_rcu_torture_alloc);
81148+ atomic_inc_unchecked(&n_rcu_torture_alloc);
81149 p = rcu_torture_freelist.next;
81150 list_del_init(p);
81151 spin_unlock_bh(&rcu_torture_lock);
81152@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
81153 static void
81154 rcu_torture_free(struct rcu_torture *p)
81155 {
81156- atomic_inc(&n_rcu_torture_free);
81157+ atomic_inc_unchecked(&n_rcu_torture_free);
81158 spin_lock_bh(&rcu_torture_lock);
81159 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
81160 spin_unlock_bh(&rcu_torture_lock);
81161@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
81162 i = rp->rtort_pipe_count;
81163 if (i > RCU_TORTURE_PIPE_LEN)
81164 i = RCU_TORTURE_PIPE_LEN;
81165- atomic_inc(&rcu_torture_wcount[i]);
81166+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
81167 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
81168 rp->rtort_mbtest = 0;
81169 rcu_torture_free(rp);
81170@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
81171 i = rp->rtort_pipe_count;
81172 if (i > RCU_TORTURE_PIPE_LEN)
81173 i = RCU_TORTURE_PIPE_LEN;
81174- atomic_inc(&rcu_torture_wcount[i]);
81175+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
81176 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
81177 rp->rtort_mbtest = 0;
81178 list_del(&rp->rtort_free);
81179@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
81180 i = old_rp->rtort_pipe_count;
81181 if (i > RCU_TORTURE_PIPE_LEN)
81182 i = RCU_TORTURE_PIPE_LEN;
81183- atomic_inc(&rcu_torture_wcount[i]);
81184+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
81185 old_rp->rtort_pipe_count++;
81186 cur_ops->deferred_free(old_rp);
81187 }
81188@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
81189 return;
81190 }
81191 if (p->rtort_mbtest == 0)
81192- atomic_inc(&n_rcu_torture_mberror);
81193+ atomic_inc_unchecked(&n_rcu_torture_mberror);
81194 spin_lock(&rand_lock);
81195 cur_ops->read_delay(&rand);
81196 n_rcu_torture_timers++;
81197@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
81198 continue;
81199 }
81200 if (p->rtort_mbtest == 0)
81201- atomic_inc(&n_rcu_torture_mberror);
81202+ atomic_inc_unchecked(&n_rcu_torture_mberror);
81203 cur_ops->read_delay(&rand);
81204 preempt_disable();
81205 pipe_count = p->rtort_pipe_count;
81206@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
81207 rcu_torture_current,
81208 rcu_torture_current_version,
81209 list_empty(&rcu_torture_freelist),
81210- atomic_read(&n_rcu_torture_alloc),
81211- atomic_read(&n_rcu_torture_alloc_fail),
81212- atomic_read(&n_rcu_torture_free));
81213+ atomic_read_unchecked(&n_rcu_torture_alloc),
81214+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
81215+ atomic_read_unchecked(&n_rcu_torture_free));
81216 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
81217- atomic_read(&n_rcu_torture_mberror),
81218+ atomic_read_unchecked(&n_rcu_torture_mberror),
81219 n_rcu_torture_boost_ktrerror,
81220 n_rcu_torture_boost_rterror);
81221 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
81222@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
81223 n_barrier_attempts,
81224 n_rcu_torture_barrier_error);
81225 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
81226- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
81227+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
81228 n_rcu_torture_barrier_error != 0 ||
81229 n_rcu_torture_boost_ktrerror != 0 ||
81230 n_rcu_torture_boost_rterror != 0 ||
81231 n_rcu_torture_boost_failure != 0 ||
81232 i > 1) {
81233 cnt += sprintf(&page[cnt], "!!! ");
81234- atomic_inc(&n_rcu_torture_error);
81235+ atomic_inc_unchecked(&n_rcu_torture_error);
81236 WARN_ON_ONCE(1);
81237 }
81238 cnt += sprintf(&page[cnt], "Reader Pipe: ");
81239@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
81240 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
81241 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
81242 cnt += sprintf(&page[cnt], " %d",
81243- atomic_read(&rcu_torture_wcount[i]));
81244+ atomic_read_unchecked(&rcu_torture_wcount[i]));
81245 }
81246 cnt += sprintf(&page[cnt], "\n");
81247 if (cur_ops->stats)
81248@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
81249
81250 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
81251
81252- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
81253+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
81254 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
81255 else if (n_online_successes != n_online_attempts ||
81256 n_offline_successes != n_offline_attempts)
81257@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
81258
81259 rcu_torture_current = NULL;
81260 rcu_torture_current_version = 0;
81261- atomic_set(&n_rcu_torture_alloc, 0);
81262- atomic_set(&n_rcu_torture_alloc_fail, 0);
81263- atomic_set(&n_rcu_torture_free, 0);
81264- atomic_set(&n_rcu_torture_mberror, 0);
81265- atomic_set(&n_rcu_torture_error, 0);
81266+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
81267+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
81268+ atomic_set_unchecked(&n_rcu_torture_free, 0);
81269+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
81270+ atomic_set_unchecked(&n_rcu_torture_error, 0);
81271 n_rcu_torture_barrier_error = 0;
81272 n_rcu_torture_boost_ktrerror = 0;
81273 n_rcu_torture_boost_rterror = 0;
81274 n_rcu_torture_boost_failure = 0;
81275 n_rcu_torture_boosts = 0;
81276 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
81277- atomic_set(&rcu_torture_wcount[i], 0);
81278+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
81279 for_each_possible_cpu(cpu) {
81280 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
81281 per_cpu(rcu_torture_count, cpu)[i] = 0;
81282diff --git a/kernel/rcutree.c b/kernel/rcutree.c
81283index 3538001..e379e0b 100644
81284--- a/kernel/rcutree.c
81285+++ b/kernel/rcutree.c
81286@@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
81287 rcu_prepare_for_idle(smp_processor_id());
81288 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
81289 smp_mb__before_atomic_inc(); /* See above. */
81290- atomic_inc(&rdtp->dynticks);
81291+ atomic_inc_unchecked(&rdtp->dynticks);
81292 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
81293- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
81294+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
81295
81296 /*
81297 * It is illegal to enter an extended quiescent state while
81298@@ -496,10 +496,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
81299 int user)
81300 {
81301 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
81302- atomic_inc(&rdtp->dynticks);
81303+ atomic_inc_unchecked(&rdtp->dynticks);
81304 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
81305 smp_mb__after_atomic_inc(); /* See above. */
81306- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
81307+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
81308 rcu_cleanup_after_idle(smp_processor_id());
81309 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
81310 if (!user && !is_idle_task(current)) {
81311@@ -638,14 +638,14 @@ void rcu_nmi_enter(void)
81312 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
81313
81314 if (rdtp->dynticks_nmi_nesting == 0 &&
81315- (atomic_read(&rdtp->dynticks) & 0x1))
81316+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
81317 return;
81318 rdtp->dynticks_nmi_nesting++;
81319 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
81320- atomic_inc(&rdtp->dynticks);
81321+ atomic_inc_unchecked(&rdtp->dynticks);
81322 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
81323 smp_mb__after_atomic_inc(); /* See above. */
81324- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
81325+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
81326 }
81327
81328 /**
81329@@ -664,9 +664,9 @@ void rcu_nmi_exit(void)
81330 return;
81331 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
81332 smp_mb__before_atomic_inc(); /* See above. */
81333- atomic_inc(&rdtp->dynticks);
81334+ atomic_inc_unchecked(&rdtp->dynticks);
81335 smp_mb__after_atomic_inc(); /* Force delay to next write. */
81336- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
81337+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
81338 }
81339
81340 /**
81341@@ -680,7 +680,7 @@ int rcu_is_cpu_idle(void)
81342 int ret;
81343
81344 preempt_disable();
81345- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
81346+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
81347 preempt_enable();
81348 return ret;
81349 }
81350@@ -748,7 +748,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
81351 */
81352 static int dyntick_save_progress_counter(struct rcu_data *rdp)
81353 {
81354- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
81355+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
81356 return (rdp->dynticks_snap & 0x1) == 0;
81357 }
81358
81359@@ -763,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
81360 unsigned int curr;
81361 unsigned int snap;
81362
81363- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
81364+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
81365 snap = (unsigned int)rdp->dynticks_snap;
81366
81367 /*
81368@@ -1440,9 +1440,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
81369 rdp = this_cpu_ptr(rsp->rda);
81370 rcu_preempt_check_blocked_tasks(rnp);
81371 rnp->qsmask = rnp->qsmaskinit;
81372- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
81373+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
81374 WARN_ON_ONCE(rnp->completed != rsp->completed);
81375- ACCESS_ONCE(rnp->completed) = rsp->completed;
81376+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
81377 if (rnp == rdp->mynode)
81378 rcu_start_gp_per_cpu(rsp, rnp, rdp);
81379 rcu_preempt_boost_start_gp(rnp);
81380@@ -1524,7 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
81381 */
81382 rcu_for_each_node_breadth_first(rsp, rnp) {
81383 raw_spin_lock_irq(&rnp->lock);
81384- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
81385+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
81386 rdp = this_cpu_ptr(rsp->rda);
81387 if (rnp == rdp->mynode)
81388 __rcu_process_gp_end(rsp, rnp, rdp);
81389@@ -1855,7 +1855,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
81390 rsp->qlen += rdp->qlen;
81391 rdp->n_cbs_orphaned += rdp->qlen;
81392 rdp->qlen_lazy = 0;
81393- ACCESS_ONCE(rdp->qlen) = 0;
81394+ ACCESS_ONCE_RW(rdp->qlen) = 0;
81395 }
81396
81397 /*
81398@@ -2101,7 +2101,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
81399 }
81400 smp_mb(); /* List handling before counting for rcu_barrier(). */
81401 rdp->qlen_lazy -= count_lazy;
81402- ACCESS_ONCE(rdp->qlen) -= count;
81403+ ACCESS_ONCE_RW(rdp->qlen) -= count;
81404 rdp->n_cbs_invoked += count;
81405
81406 /* Reinstate batch limit if we have worked down the excess. */
81407@@ -2295,7 +2295,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
81408 /*
81409 * Do RCU core processing for the current CPU.
81410 */
81411-static void rcu_process_callbacks(struct softirq_action *unused)
81412+static void rcu_process_callbacks(void)
81413 {
81414 struct rcu_state *rsp;
81415
81416@@ -2419,7 +2419,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
81417 local_irq_restore(flags);
81418 return;
81419 }
81420- ACCESS_ONCE(rdp->qlen)++;
81421+ ACCESS_ONCE_RW(rdp->qlen)++;
81422 if (lazy)
81423 rdp->qlen_lazy++;
81424 else
81425@@ -2628,11 +2628,11 @@ void synchronize_sched_expedited(void)
81426 * counter wrap on a 32-bit system. Quite a few more CPUs would of
81427 * course be required on a 64-bit system.
81428 */
81429- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
81430+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
81431 (ulong)atomic_long_read(&rsp->expedited_done) +
81432 ULONG_MAX / 8)) {
81433 synchronize_sched();
81434- atomic_long_inc(&rsp->expedited_wrap);
81435+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
81436 return;
81437 }
81438
81439@@ -2640,7 +2640,7 @@ void synchronize_sched_expedited(void)
81440 * Take a ticket. Note that atomic_inc_return() implies a
81441 * full memory barrier.
81442 */
81443- snap = atomic_long_inc_return(&rsp->expedited_start);
81444+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
81445 firstsnap = snap;
81446 get_online_cpus();
81447 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
81448@@ -2653,14 +2653,14 @@ void synchronize_sched_expedited(void)
81449 synchronize_sched_expedited_cpu_stop,
81450 NULL) == -EAGAIN) {
81451 put_online_cpus();
81452- atomic_long_inc(&rsp->expedited_tryfail);
81453+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
81454
81455 /* Check to see if someone else did our work for us. */
81456 s = atomic_long_read(&rsp->expedited_done);
81457 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
81458 /* ensure test happens before caller kfree */
81459 smp_mb__before_atomic_inc(); /* ^^^ */
81460- atomic_long_inc(&rsp->expedited_workdone1);
81461+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
81462 return;
81463 }
81464
81465@@ -2669,7 +2669,7 @@ void synchronize_sched_expedited(void)
81466 udelay(trycount * num_online_cpus());
81467 } else {
81468 wait_rcu_gp(call_rcu_sched);
81469- atomic_long_inc(&rsp->expedited_normal);
81470+ atomic_long_inc_unchecked(&rsp->expedited_normal);
81471 return;
81472 }
81473
81474@@ -2678,7 +2678,7 @@ void synchronize_sched_expedited(void)
81475 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
81476 /* ensure test happens before caller kfree */
81477 smp_mb__before_atomic_inc(); /* ^^^ */
81478- atomic_long_inc(&rsp->expedited_workdone2);
81479+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
81480 return;
81481 }
81482
81483@@ -2690,10 +2690,10 @@ void synchronize_sched_expedited(void)
81484 * period works for us.
81485 */
81486 get_online_cpus();
81487- snap = atomic_long_read(&rsp->expedited_start);
81488+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
81489 smp_mb(); /* ensure read is before try_stop_cpus(). */
81490 }
81491- atomic_long_inc(&rsp->expedited_stoppedcpus);
81492+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
81493
81494 /*
81495 * Everyone up to our most recent fetch is covered by our grace
81496@@ -2702,16 +2702,16 @@ void synchronize_sched_expedited(void)
81497 * than we did already did their update.
81498 */
81499 do {
81500- atomic_long_inc(&rsp->expedited_done_tries);
81501+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
81502 s = atomic_long_read(&rsp->expedited_done);
81503 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
81504 /* ensure test happens before caller kfree */
81505 smp_mb__before_atomic_inc(); /* ^^^ */
81506- atomic_long_inc(&rsp->expedited_done_lost);
81507+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
81508 break;
81509 }
81510 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
81511- atomic_long_inc(&rsp->expedited_done_exit);
81512+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
81513
81514 put_online_cpus();
81515 }
81516@@ -2893,7 +2893,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
81517 * ACCESS_ONCE() to prevent the compiler from speculating
81518 * the increment to precede the early-exit check.
81519 */
81520- ACCESS_ONCE(rsp->n_barrier_done)++;
81521+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
81522 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
81523 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
81524 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
81525@@ -2943,7 +2943,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
81526
81527 /* Increment ->n_barrier_done to prevent duplicate work. */
81528 smp_mb(); /* Keep increment after above mechanism. */
81529- ACCESS_ONCE(rsp->n_barrier_done)++;
81530+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
81531 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
81532 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
81533 smp_mb(); /* Keep increment before caller's subsequent code. */
81534@@ -2988,10 +2988,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
81535 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
81536 init_callback_list(rdp);
81537 rdp->qlen_lazy = 0;
81538- ACCESS_ONCE(rdp->qlen) = 0;
81539+ ACCESS_ONCE_RW(rdp->qlen) = 0;
81540 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
81541 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
81542- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
81543+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
81544 rdp->cpu = cpu;
81545 rdp->rsp = rsp;
81546 rcu_boot_init_nocb_percpu_data(rdp);
81547@@ -3024,8 +3024,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
81548 rdp->blimit = blimit;
81549 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
81550 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
81551- atomic_set(&rdp->dynticks->dynticks,
81552- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
81553+ atomic_set_unchecked(&rdp->dynticks->dynticks,
81554+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
81555 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
81556
81557 /* Add CPU to rcu_node bitmasks. */
81558@@ -3120,7 +3120,7 @@ static int __init rcu_spawn_gp_kthread(void)
81559 struct task_struct *t;
81560
81561 for_each_rcu_flavor(rsp) {
81562- t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
81563+ t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
81564 BUG_ON(IS_ERR(t));
81565 rnp = rcu_get_root(rsp);
81566 raw_spin_lock_irqsave(&rnp->lock, flags);
81567diff --git a/kernel/rcutree.h b/kernel/rcutree.h
81568index 4df5034..5ee93f2 100644
81569--- a/kernel/rcutree.h
81570+++ b/kernel/rcutree.h
81571@@ -87,7 +87,7 @@ struct rcu_dynticks {
81572 long long dynticks_nesting; /* Track irq/process nesting level. */
81573 /* Process level is worth LLONG_MAX/2. */
81574 int dynticks_nmi_nesting; /* Track NMI nesting level. */
81575- atomic_t dynticks; /* Even value for idle, else odd. */
81576+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
81577 #ifdef CONFIG_RCU_FAST_NO_HZ
81578 bool all_lazy; /* Are all CPU's CBs lazy? */
81579 unsigned long nonlazy_posted;
81580@@ -414,17 +414,17 @@ struct rcu_state {
81581 /* _rcu_barrier(). */
81582 /* End of fields guarded by barrier_mutex. */
81583
81584- atomic_long_t expedited_start; /* Starting ticket. */
81585- atomic_long_t expedited_done; /* Done ticket. */
81586- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
81587- atomic_long_t expedited_tryfail; /* # acquisition failures. */
81588- atomic_long_t expedited_workdone1; /* # done by others #1. */
81589- atomic_long_t expedited_workdone2; /* # done by others #2. */
81590- atomic_long_t expedited_normal; /* # fallbacks to normal. */
81591- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
81592- atomic_long_t expedited_done_tries; /* # tries to update _done. */
81593- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
81594- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
81595+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
81596+ atomic_long_t expedited_done; /* Done ticket. */
81597+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
81598+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
81599+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
81600+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
81601+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
81602+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
81603+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
81604+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
81605+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
81606
81607 unsigned long jiffies_force_qs; /* Time at which to invoke */
81608 /* force_quiescent_state(). */
81609diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
81610index 3db5a37..b395fb35 100644
81611--- a/kernel/rcutree_plugin.h
81612+++ b/kernel/rcutree_plugin.h
81613@@ -903,7 +903,7 @@ void synchronize_rcu_expedited(void)
81614
81615 /* Clean up and exit. */
81616 smp_mb(); /* ensure expedited GP seen before counter increment. */
81617- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
81618+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
81619 unlock_mb_ret:
81620 mutex_unlock(&sync_rcu_preempt_exp_mutex);
81621 mb_ret:
81622@@ -1451,7 +1451,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
81623 free_cpumask_var(cm);
81624 }
81625
81626-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
81627+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
81628 .store = &rcu_cpu_kthread_task,
81629 .thread_should_run = rcu_cpu_kthread_should_run,
81630 .thread_fn = rcu_cpu_kthread,
81631@@ -1916,7 +1916,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
81632 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
81633 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
81634 cpu, ticks_value, ticks_title,
81635- atomic_read(&rdtp->dynticks) & 0xfff,
81636+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
81637 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
81638 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
81639 fast_no_hz);
81640@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
81641
81642 /* Enqueue the callback on the nocb list and update counts. */
81643 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
81644- ACCESS_ONCE(*old_rhpp) = rhp;
81645+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
81646 atomic_long_add(rhcount, &rdp->nocb_q_count);
81647 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
81648
81649@@ -2219,12 +2219,12 @@ static int rcu_nocb_kthread(void *arg)
81650 * Extract queued callbacks, update counts, and wait
81651 * for a grace period to elapse.
81652 */
81653- ACCESS_ONCE(rdp->nocb_head) = NULL;
81654+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
81655 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
81656 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
81657 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
81658- ACCESS_ONCE(rdp->nocb_p_count) += c;
81659- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
81660+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
81661+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
81662 rcu_nocb_wait_gp(rdp);
81663
81664 /* Each pass through the following loop invokes a callback. */
81665@@ -2246,8 +2246,8 @@ static int rcu_nocb_kthread(void *arg)
81666 list = next;
81667 }
81668 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
81669- ACCESS_ONCE(rdp->nocb_p_count) -= c;
81670- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
81671+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
81672+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
81673 rdp->n_nocbs_invoked += c;
81674 }
81675 return 0;
81676@@ -2274,7 +2274,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
81677 t = kthread_run(rcu_nocb_kthread, rdp,
81678 "rcuo%c/%d", rsp->abbr, cpu);
81679 BUG_ON(IS_ERR(t));
81680- ACCESS_ONCE(rdp->nocb_kthread) = t;
81681+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
81682 }
81683 }
81684
81685diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
81686index cf6c174..a8f4b50 100644
81687--- a/kernel/rcutree_trace.c
81688+++ b/kernel/rcutree_trace.c
81689@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
81690 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
81691 rdp->passed_quiesce, rdp->qs_pending);
81692 seq_printf(m, " dt=%d/%llx/%d df=%lu",
81693- atomic_read(&rdp->dynticks->dynticks),
81694+ atomic_read_unchecked(&rdp->dynticks->dynticks),
81695 rdp->dynticks->dynticks_nesting,
81696 rdp->dynticks->dynticks_nmi_nesting,
81697 rdp->dynticks_fqs);
81698@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
81699 struct rcu_state *rsp = (struct rcu_state *)m->private;
81700
81701 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
81702- atomic_long_read(&rsp->expedited_start),
81703+ atomic_long_read_unchecked(&rsp->expedited_start),
81704 atomic_long_read(&rsp->expedited_done),
81705- atomic_long_read(&rsp->expedited_wrap),
81706- atomic_long_read(&rsp->expedited_tryfail),
81707- atomic_long_read(&rsp->expedited_workdone1),
81708- atomic_long_read(&rsp->expedited_workdone2),
81709- atomic_long_read(&rsp->expedited_normal),
81710- atomic_long_read(&rsp->expedited_stoppedcpus),
81711- atomic_long_read(&rsp->expedited_done_tries),
81712- atomic_long_read(&rsp->expedited_done_lost),
81713- atomic_long_read(&rsp->expedited_done_exit));
81714+ atomic_long_read_unchecked(&rsp->expedited_wrap),
81715+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
81716+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
81717+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
81718+ atomic_long_read_unchecked(&rsp->expedited_normal),
81719+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
81720+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
81721+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
81722+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
81723 return 0;
81724 }
81725
81726diff --git a/kernel/resource.c b/kernel/resource.c
81727index d738698..5f8e60a 100644
81728--- a/kernel/resource.c
81729+++ b/kernel/resource.c
81730@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
81731
81732 static int __init ioresources_init(void)
81733 {
81734+#ifdef CONFIG_GRKERNSEC_PROC_ADD
81735+#ifdef CONFIG_GRKERNSEC_PROC_USER
81736+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
81737+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
81738+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
81739+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
81740+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
81741+#endif
81742+#else
81743 proc_create("ioports", 0, NULL, &proc_ioports_operations);
81744 proc_create("iomem", 0, NULL, &proc_iomem_operations);
81745+#endif
81746 return 0;
81747 }
81748 __initcall(ioresources_init);
81749diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
81750index 1d96dd0..994ff19 100644
81751--- a/kernel/rtmutex-tester.c
81752+++ b/kernel/rtmutex-tester.c
81753@@ -22,7 +22,7 @@
81754 #define MAX_RT_TEST_MUTEXES 8
81755
81756 static spinlock_t rttest_lock;
81757-static atomic_t rttest_event;
81758+static atomic_unchecked_t rttest_event;
81759
81760 struct test_thread_data {
81761 int opcode;
81762@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81763
81764 case RTTEST_LOCKCONT:
81765 td->mutexes[td->opdata] = 1;
81766- td->event = atomic_add_return(1, &rttest_event);
81767+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81768 return 0;
81769
81770 case RTTEST_RESET:
81771@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81772 return 0;
81773
81774 case RTTEST_RESETEVENT:
81775- atomic_set(&rttest_event, 0);
81776+ atomic_set_unchecked(&rttest_event, 0);
81777 return 0;
81778
81779 default:
81780@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81781 return ret;
81782
81783 td->mutexes[id] = 1;
81784- td->event = atomic_add_return(1, &rttest_event);
81785+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81786 rt_mutex_lock(&mutexes[id]);
81787- td->event = atomic_add_return(1, &rttest_event);
81788+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81789 td->mutexes[id] = 4;
81790 return 0;
81791
81792@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81793 return ret;
81794
81795 td->mutexes[id] = 1;
81796- td->event = atomic_add_return(1, &rttest_event);
81797+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81798 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
81799- td->event = atomic_add_return(1, &rttest_event);
81800+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81801 td->mutexes[id] = ret ? 0 : 4;
81802 return ret ? -EINTR : 0;
81803
81804@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
81805 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
81806 return ret;
81807
81808- td->event = atomic_add_return(1, &rttest_event);
81809+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81810 rt_mutex_unlock(&mutexes[id]);
81811- td->event = atomic_add_return(1, &rttest_event);
81812+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81813 td->mutexes[id] = 0;
81814 return 0;
81815
81816@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
81817 break;
81818
81819 td->mutexes[dat] = 2;
81820- td->event = atomic_add_return(1, &rttest_event);
81821+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81822 break;
81823
81824 default:
81825@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
81826 return;
81827
81828 td->mutexes[dat] = 3;
81829- td->event = atomic_add_return(1, &rttest_event);
81830+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81831 break;
81832
81833 case RTTEST_LOCKNOWAIT:
81834@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
81835 return;
81836
81837 td->mutexes[dat] = 1;
81838- td->event = atomic_add_return(1, &rttest_event);
81839+ td->event = atomic_add_return_unchecked(1, &rttest_event);
81840 return;
81841
81842 default:
81843diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
81844index 64de5f8..7735e12 100644
81845--- a/kernel/sched/auto_group.c
81846+++ b/kernel/sched/auto_group.c
81847@@ -11,7 +11,7 @@
81848
81849 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
81850 static struct autogroup autogroup_default;
81851-static atomic_t autogroup_seq_nr;
81852+static atomic_unchecked_t autogroup_seq_nr;
81853
81854 void __init autogroup_init(struct task_struct *init_task)
81855 {
81856@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
81857
81858 kref_init(&ag->kref);
81859 init_rwsem(&ag->lock);
81860- ag->id = atomic_inc_return(&autogroup_seq_nr);
81861+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
81862 ag->tg = tg;
81863 #ifdef CONFIG_RT_GROUP_SCHED
81864 /*
81865diff --git a/kernel/sched/core.c b/kernel/sched/core.c
81866index e8b3350..d83d44e 100644
81867--- a/kernel/sched/core.c
81868+++ b/kernel/sched/core.c
81869@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
81870 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
81871 * positive (at least 1, or number of jiffies left till timeout) if completed.
81872 */
81873-long __sched
81874+long __sched __intentional_overflow(-1)
81875 wait_for_completion_interruptible_timeout(struct completion *x,
81876 unsigned long timeout)
81877 {
81878@@ -3457,7 +3457,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
81879 *
81880 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
81881 */
81882-int __sched wait_for_completion_killable(struct completion *x)
81883+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
81884 {
81885 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
81886 if (t == -ERESTARTSYS)
81887@@ -3478,7 +3478,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
81888 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
81889 * positive (at least 1, or number of jiffies left till timeout) if completed.
81890 */
81891-long __sched
81892+long __sched __intentional_overflow(-1)
81893 wait_for_completion_killable_timeout(struct completion *x,
81894 unsigned long timeout)
81895 {
81896@@ -3704,6 +3704,8 @@ int can_nice(const struct task_struct *p, const int nice)
81897 /* convert nice value [19,-20] to rlimit style value [1,40] */
81898 int nice_rlim = 20 - nice;
81899
81900+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
81901+
81902 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
81903 capable(CAP_SYS_NICE));
81904 }
81905@@ -3737,7 +3739,8 @@ SYSCALL_DEFINE1(nice, int, increment)
81906 if (nice > 19)
81907 nice = 19;
81908
81909- if (increment < 0 && !can_nice(current, nice))
81910+ if (increment < 0 && (!can_nice(current, nice) ||
81911+ gr_handle_chroot_nice()))
81912 return -EPERM;
81913
81914 retval = security_task_setnice(current, nice);
81915@@ -3891,6 +3894,7 @@ recheck:
81916 unsigned long rlim_rtprio =
81917 task_rlimit(p, RLIMIT_RTPRIO);
81918
81919+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
81920 /* can't set/change the rt policy */
81921 if (policy != p->policy && !rlim_rtprio)
81922 return -EPERM;
81923@@ -4988,7 +4992,7 @@ static void migrate_tasks(unsigned int dead_cpu)
81924
81925 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
81926
81927-static struct ctl_table sd_ctl_dir[] = {
81928+static ctl_table_no_const sd_ctl_dir[] __read_only = {
81929 {
81930 .procname = "sched_domain",
81931 .mode = 0555,
81932@@ -5005,17 +5009,17 @@ static struct ctl_table sd_ctl_root[] = {
81933 {}
81934 };
81935
81936-static struct ctl_table *sd_alloc_ctl_entry(int n)
81937+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
81938 {
81939- struct ctl_table *entry =
81940+ ctl_table_no_const *entry =
81941 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
81942
81943 return entry;
81944 }
81945
81946-static void sd_free_ctl_entry(struct ctl_table **tablep)
81947+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
81948 {
81949- struct ctl_table *entry;
81950+ ctl_table_no_const *entry;
81951
81952 /*
81953 * In the intermediate directories, both the child directory and
81954@@ -5023,22 +5027,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
81955 * will always be set. In the lowest directory the names are
81956 * static strings and all have proc handlers.
81957 */
81958- for (entry = *tablep; entry->mode; entry++) {
81959- if (entry->child)
81960- sd_free_ctl_entry(&entry->child);
81961+ for (entry = tablep; entry->mode; entry++) {
81962+ if (entry->child) {
81963+ sd_free_ctl_entry(entry->child);
81964+ pax_open_kernel();
81965+ entry->child = NULL;
81966+ pax_close_kernel();
81967+ }
81968 if (entry->proc_handler == NULL)
81969 kfree(entry->procname);
81970 }
81971
81972- kfree(*tablep);
81973- *tablep = NULL;
81974+ kfree(tablep);
81975 }
81976
81977 static int min_load_idx = 0;
81978 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
81979
81980 static void
81981-set_table_entry(struct ctl_table *entry,
81982+set_table_entry(ctl_table_no_const *entry,
81983 const char *procname, void *data, int maxlen,
81984 umode_t mode, proc_handler *proc_handler,
81985 bool load_idx)
81986@@ -5058,7 +5065,7 @@ set_table_entry(struct ctl_table *entry,
81987 static struct ctl_table *
81988 sd_alloc_ctl_domain_table(struct sched_domain *sd)
81989 {
81990- struct ctl_table *table = sd_alloc_ctl_entry(13);
81991+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
81992
81993 if (table == NULL)
81994 return NULL;
81995@@ -5093,9 +5100,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
81996 return table;
81997 }
81998
81999-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
82000+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
82001 {
82002- struct ctl_table *entry, *table;
82003+ ctl_table_no_const *entry, *table;
82004 struct sched_domain *sd;
82005 int domain_num = 0, i;
82006 char buf[32];
82007@@ -5122,11 +5129,13 @@ static struct ctl_table_header *sd_sysctl_header;
82008 static void register_sched_domain_sysctl(void)
82009 {
82010 int i, cpu_num = num_possible_cpus();
82011- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
82012+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
82013 char buf[32];
82014
82015 WARN_ON(sd_ctl_dir[0].child);
82016+ pax_open_kernel();
82017 sd_ctl_dir[0].child = entry;
82018+ pax_close_kernel();
82019
82020 if (entry == NULL)
82021 return;
82022@@ -5149,8 +5158,12 @@ static void unregister_sched_domain_sysctl(void)
82023 if (sd_sysctl_header)
82024 unregister_sysctl_table(sd_sysctl_header);
82025 sd_sysctl_header = NULL;
82026- if (sd_ctl_dir[0].child)
82027- sd_free_ctl_entry(&sd_ctl_dir[0].child);
82028+ if (sd_ctl_dir[0].child) {
82029+ sd_free_ctl_entry(sd_ctl_dir[0].child);
82030+ pax_open_kernel();
82031+ sd_ctl_dir[0].child = NULL;
82032+ pax_close_kernel();
82033+ }
82034 }
82035 #else
82036 static void register_sched_domain_sysctl(void)
82037@@ -5249,7 +5262,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
82038 * happens before everything else. This has to be lower priority than
82039 * the notifier in the perf_event subsystem, though.
82040 */
82041-static struct notifier_block __cpuinitdata migration_notifier = {
82042+static struct notifier_block migration_notifier = {
82043 .notifier_call = migration_call,
82044 .priority = CPU_PRI_MIGRATION,
82045 };
82046diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
82047index c61a614..d7f3d7e 100644
82048--- a/kernel/sched/fair.c
82049+++ b/kernel/sched/fair.c
82050@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
82051
82052 static void reset_ptenuma_scan(struct task_struct *p)
82053 {
82054- ACCESS_ONCE(p->mm->numa_scan_seq)++;
82055+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
82056 p->mm->numa_scan_offset = 0;
82057 }
82058
82059@@ -5686,7 +5686,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
82060 * run_rebalance_domains is triggered when needed from the scheduler tick.
82061 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
82062 */
82063-static void run_rebalance_domains(struct softirq_action *h)
82064+static void run_rebalance_domains(void)
82065 {
82066 int this_cpu = smp_processor_id();
82067 struct rq *this_rq = cpu_rq(this_cpu);
82068diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
82069index ce39224d..0e09343 100644
82070--- a/kernel/sched/sched.h
82071+++ b/kernel/sched/sched.h
82072@@ -1009,7 +1009,7 @@ struct sched_class {
82073 #ifdef CONFIG_FAIR_GROUP_SCHED
82074 void (*task_move_group) (struct task_struct *p, int on_rq);
82075 #endif
82076-};
82077+} __do_const;
82078
82079 #define sched_class_highest (&stop_sched_class)
82080 #define for_each_class(class) \
82081diff --git a/kernel/signal.c b/kernel/signal.c
82082index 113411b..20d0a99 100644
82083--- a/kernel/signal.c
82084+++ b/kernel/signal.c
82085@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
82086
82087 int print_fatal_signals __read_mostly;
82088
82089-static void __user *sig_handler(struct task_struct *t, int sig)
82090+static __sighandler_t sig_handler(struct task_struct *t, int sig)
82091 {
82092 return t->sighand->action[sig - 1].sa.sa_handler;
82093 }
82094
82095-static int sig_handler_ignored(void __user *handler, int sig)
82096+static int sig_handler_ignored(__sighandler_t handler, int sig)
82097 {
82098 /* Is it explicitly or implicitly ignored? */
82099 return handler == SIG_IGN ||
82100@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
82101
82102 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
82103 {
82104- void __user *handler;
82105+ __sighandler_t handler;
82106
82107 handler = sig_handler(t, sig);
82108
82109@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
82110 atomic_inc(&user->sigpending);
82111 rcu_read_unlock();
82112
82113+ if (!override_rlimit)
82114+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
82115+
82116 if (override_rlimit ||
82117 atomic_read(&user->sigpending) <=
82118 task_rlimit(t, RLIMIT_SIGPENDING)) {
82119@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
82120
82121 int unhandled_signal(struct task_struct *tsk, int sig)
82122 {
82123- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
82124+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
82125 if (is_global_init(tsk))
82126 return 1;
82127 if (handler != SIG_IGN && handler != SIG_DFL)
82128@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
82129 }
82130 }
82131
82132+ /* allow glibc communication via tgkill to other threads in our
82133+ thread group */
82134+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
82135+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
82136+ && gr_handle_signal(t, sig))
82137+ return -EPERM;
82138+
82139 return security_task_kill(t, info, sig, 0);
82140 }
82141
82142@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
82143 return send_signal(sig, info, p, 1);
82144 }
82145
82146-static int
82147+int
82148 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
82149 {
82150 return send_signal(sig, info, t, 0);
82151@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
82152 unsigned long int flags;
82153 int ret, blocked, ignored;
82154 struct k_sigaction *action;
82155+ int is_unhandled = 0;
82156
82157 spin_lock_irqsave(&t->sighand->siglock, flags);
82158 action = &t->sighand->action[sig-1];
82159@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
82160 }
82161 if (action->sa.sa_handler == SIG_DFL)
82162 t->signal->flags &= ~SIGNAL_UNKILLABLE;
82163+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
82164+ is_unhandled = 1;
82165 ret = specific_send_sig_info(sig, info, t);
82166 spin_unlock_irqrestore(&t->sighand->siglock, flags);
82167
82168+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
82169+ normal operation */
82170+ if (is_unhandled) {
82171+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
82172+ gr_handle_crash(t, sig);
82173+ }
82174+
82175 return ret;
82176 }
82177
82178@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
82179 ret = check_kill_permission(sig, info, p);
82180 rcu_read_unlock();
82181
82182- if (!ret && sig)
82183+ if (!ret && sig) {
82184 ret = do_send_sig_info(sig, info, p, true);
82185+ if (!ret)
82186+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
82187+ }
82188
82189 return ret;
82190 }
82191@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
82192 int error = -ESRCH;
82193
82194 rcu_read_lock();
82195- p = find_task_by_vpid(pid);
82196+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82197+ /* allow glibc communication via tgkill to other threads in our
82198+ thread group */
82199+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
82200+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
82201+ p = find_task_by_vpid_unrestricted(pid);
82202+ else
82203+#endif
82204+ p = find_task_by_vpid(pid);
82205 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
82206 error = check_kill_permission(sig, info, p);
82207 /*
82208@@ -3219,6 +3250,16 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
82209 __put_user(t->sas_ss_size, &uss->ss_size);
82210 }
82211
82212+#ifdef CONFIG_X86
82213+void __save_altstack_ex(stack_t __user *uss, unsigned long sp)
82214+{
82215+ struct task_struct *t = current;
82216+ put_user_ex((void __user *)t->sas_ss_sp, &uss->ss_sp);
82217+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
82218+ put_user_ex(t->sas_ss_size, &uss->ss_size);
82219+}
82220+#endif
82221+
82222 #ifdef CONFIG_COMPAT
82223 COMPAT_SYSCALL_DEFINE2(sigaltstack,
82224 const compat_stack_t __user *, uss_ptr,
82225@@ -3240,8 +3281,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
82226 }
82227 seg = get_fs();
82228 set_fs(KERNEL_DS);
82229- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
82230- (stack_t __force __user *) &uoss,
82231+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
82232+ (stack_t __force_user *) &uoss,
82233 compat_user_stack_pointer());
82234 set_fs(seg);
82235 if (ret >= 0 && uoss_ptr) {
82236@@ -3268,6 +3309,16 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
82237 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
82238 __put_user(t->sas_ss_size, &uss->ss_size);
82239 }
82240+
82241+#ifdef CONFIG_X86
82242+void __compat_save_altstack_ex(compat_stack_t __user *uss, unsigned long sp)
82243+{
82244+ struct task_struct *t = current;
82245+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp);
82246+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
82247+ put_user_ex(t->sas_ss_size, &uss->ss_size);
82248+}
82249+#endif
82250 #endif
82251
82252 #ifdef __ARCH_WANT_SYS_SIGPENDING
82253diff --git a/kernel/smp.c b/kernel/smp.c
82254index 4dba0f7..fe9f773 100644
82255--- a/kernel/smp.c
82256+++ b/kernel/smp.c
82257@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
82258 return NOTIFY_OK;
82259 }
82260
82261-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
82262+static struct notifier_block hotplug_cfd_notifier = {
82263 .notifier_call = hotplug_cfd,
82264 };
82265
82266diff --git a/kernel/smpboot.c b/kernel/smpboot.c
82267index 02fc5c9..e54c335 100644
82268--- a/kernel/smpboot.c
82269+++ b/kernel/smpboot.c
82270@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
82271 }
82272 smpboot_unpark_thread(plug_thread, cpu);
82273 }
82274- list_add(&plug_thread->list, &hotplug_threads);
82275+ pax_list_add(&plug_thread->list, &hotplug_threads);
82276 out:
82277 mutex_unlock(&smpboot_threads_lock);
82278 return ret;
82279@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
82280 {
82281 get_online_cpus();
82282 mutex_lock(&smpboot_threads_lock);
82283- list_del(&plug_thread->list);
82284+ pax_list_del(&plug_thread->list);
82285 smpboot_destroy_threads(plug_thread);
82286 mutex_unlock(&smpboot_threads_lock);
82287 put_online_cpus();
82288diff --git a/kernel/softirq.c b/kernel/softirq.c
82289index 3d6833f..da6d93d 100644
82290--- a/kernel/softirq.c
82291+++ b/kernel/softirq.c
82292@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
82293 EXPORT_SYMBOL(irq_stat);
82294 #endif
82295
82296-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
82297+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
82298
82299 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
82300
82301-char *softirq_to_name[NR_SOFTIRQS] = {
82302+const char * const softirq_to_name[NR_SOFTIRQS] = {
82303 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
82304 "TASKLET", "SCHED", "HRTIMER", "RCU"
82305 };
82306@@ -250,7 +250,7 @@ restart:
82307 kstat_incr_softirqs_this_cpu(vec_nr);
82308
82309 trace_softirq_entry(vec_nr);
82310- h->action(h);
82311+ h->action();
82312 trace_softirq_exit(vec_nr);
82313 if (unlikely(prev_count != preempt_count())) {
82314 printk(KERN_ERR "huh, entered softirq %u %s %p"
82315@@ -405,7 +405,7 @@ void __raise_softirq_irqoff(unsigned int nr)
82316 or_softirq_pending(1UL << nr);
82317 }
82318
82319-void open_softirq(int nr, void (*action)(struct softirq_action *))
82320+void __init open_softirq(int nr, void (*action)(void))
82321 {
82322 softirq_vec[nr].action = action;
82323 }
82324@@ -461,7 +461,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
82325
82326 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
82327
82328-static void tasklet_action(struct softirq_action *a)
82329+static void tasklet_action(void)
82330 {
82331 struct tasklet_struct *list;
82332
82333@@ -496,7 +496,7 @@ static void tasklet_action(struct softirq_action *a)
82334 }
82335 }
82336
82337-static void tasklet_hi_action(struct softirq_action *a)
82338+static void tasklet_hi_action(void)
82339 {
82340 struct tasklet_struct *list;
82341
82342@@ -730,7 +730,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
82343 return NOTIFY_OK;
82344 }
82345
82346-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
82347+static struct notifier_block remote_softirq_cpu_notifier = {
82348 .notifier_call = remote_softirq_cpu_notify,
82349 };
82350
82351@@ -847,11 +847,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
82352 return NOTIFY_OK;
82353 }
82354
82355-static struct notifier_block __cpuinitdata cpu_nfb = {
82356+static struct notifier_block cpu_nfb = {
82357 .notifier_call = cpu_callback
82358 };
82359
82360-static struct smp_hotplug_thread softirq_threads = {
82361+static struct smp_hotplug_thread softirq_threads __read_only = {
82362 .store = &ksoftirqd,
82363 .thread_should_run = ksoftirqd_should_run,
82364 .thread_fn = run_ksoftirqd,
82365diff --git a/kernel/srcu.c b/kernel/srcu.c
82366index 01d5ccb..cdcbee6 100644
82367--- a/kernel/srcu.c
82368+++ b/kernel/srcu.c
82369@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
82370
82371 idx = ACCESS_ONCE(sp->completed) & 0x1;
82372 preempt_disable();
82373- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
82374+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
82375 smp_mb(); /* B */ /* Avoid leaking the critical section. */
82376- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
82377+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
82378 preempt_enable();
82379 return idx;
82380 }
82381diff --git a/kernel/sys.c b/kernel/sys.c
82382index 2bbd9a7..0875671 100644
82383--- a/kernel/sys.c
82384+++ b/kernel/sys.c
82385@@ -163,6 +163,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
82386 error = -EACCES;
82387 goto out;
82388 }
82389+
82390+ if (gr_handle_chroot_setpriority(p, niceval)) {
82391+ error = -EACCES;
82392+ goto out;
82393+ }
82394+
82395 no_nice = security_task_setnice(p, niceval);
82396 if (no_nice) {
82397 error = no_nice;
82398@@ -626,6 +632,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
82399 goto error;
82400 }
82401
82402+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
82403+ goto error;
82404+
82405 if (rgid != (gid_t) -1 ||
82406 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
82407 new->sgid = new->egid;
82408@@ -661,6 +670,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
82409 old = current_cred();
82410
82411 retval = -EPERM;
82412+
82413+ if (gr_check_group_change(kgid, kgid, kgid))
82414+ goto error;
82415+
82416 if (nsown_capable(CAP_SETGID))
82417 new->gid = new->egid = new->sgid = new->fsgid = kgid;
82418 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
82419@@ -678,7 +691,7 @@ error:
82420 /*
82421 * change the user struct in a credentials set to match the new UID
82422 */
82423-static int set_user(struct cred *new)
82424+int set_user(struct cred *new)
82425 {
82426 struct user_struct *new_user;
82427
82428@@ -758,6 +771,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
82429 goto error;
82430 }
82431
82432+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
82433+ goto error;
82434+
82435 if (!uid_eq(new->uid, old->uid)) {
82436 retval = set_user(new);
82437 if (retval < 0)
82438@@ -808,6 +824,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
82439 old = current_cred();
82440
82441 retval = -EPERM;
82442+
82443+ if (gr_check_crash_uid(kuid))
82444+ goto error;
82445+ if (gr_check_user_change(kuid, kuid, kuid))
82446+ goto error;
82447+
82448 if (nsown_capable(CAP_SETUID)) {
82449 new->suid = new->uid = kuid;
82450 if (!uid_eq(kuid, old->uid)) {
82451@@ -877,6 +899,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
82452 goto error;
82453 }
82454
82455+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
82456+ goto error;
82457+
82458 if (ruid != (uid_t) -1) {
82459 new->uid = kruid;
82460 if (!uid_eq(kruid, old->uid)) {
82461@@ -959,6 +984,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
82462 goto error;
82463 }
82464
82465+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
82466+ goto error;
82467+
82468 if (rgid != (gid_t) -1)
82469 new->gid = krgid;
82470 if (egid != (gid_t) -1)
82471@@ -1020,12 +1048,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
82472 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
82473 nsown_capable(CAP_SETUID)) {
82474 if (!uid_eq(kuid, old->fsuid)) {
82475+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
82476+ goto error;
82477+
82478 new->fsuid = kuid;
82479 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
82480 goto change_okay;
82481 }
82482 }
82483
82484+error:
82485 abort_creds(new);
82486 return old_fsuid;
82487
82488@@ -1058,12 +1090,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
82489 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
82490 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
82491 nsown_capable(CAP_SETGID)) {
82492+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
82493+ goto error;
82494+
82495 if (!gid_eq(kgid, old->fsgid)) {
82496 new->fsgid = kgid;
82497 goto change_okay;
82498 }
82499 }
82500
82501+error:
82502 abort_creds(new);
82503 return old_fsgid;
82504
82505@@ -1432,19 +1468,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
82506 return -EFAULT;
82507
82508 down_read(&uts_sem);
82509- error = __copy_to_user(&name->sysname, &utsname()->sysname,
82510+ error = __copy_to_user(name->sysname, &utsname()->sysname,
82511 __OLD_UTS_LEN);
82512 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
82513- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
82514+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
82515 __OLD_UTS_LEN);
82516 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
82517- error |= __copy_to_user(&name->release, &utsname()->release,
82518+ error |= __copy_to_user(name->release, &utsname()->release,
82519 __OLD_UTS_LEN);
82520 error |= __put_user(0, name->release + __OLD_UTS_LEN);
82521- error |= __copy_to_user(&name->version, &utsname()->version,
82522+ error |= __copy_to_user(name->version, &utsname()->version,
82523 __OLD_UTS_LEN);
82524 error |= __put_user(0, name->version + __OLD_UTS_LEN);
82525- error |= __copy_to_user(&name->machine, &utsname()->machine,
82526+ error |= __copy_to_user(name->machine, &utsname()->machine,
82527 __OLD_UTS_LEN);
82528 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
82529 up_read(&uts_sem);
82530@@ -1646,6 +1682,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
82531 */
82532 new_rlim->rlim_cur = 1;
82533 }
82534+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
82535+ is changed to a lower value. Since tasks can be created by the same
82536+ user in between this limit change and an execve by this task, force
82537+ a recheck only for this task by setting PF_NPROC_EXCEEDED
82538+ */
82539+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
82540+ tsk->flags |= PF_NPROC_EXCEEDED;
82541 }
82542 if (!retval) {
82543 if (old_rlim)
82544diff --git a/kernel/sysctl.c b/kernel/sysctl.c
82545index 9edcf45..713c960 100644
82546--- a/kernel/sysctl.c
82547+++ b/kernel/sysctl.c
82548@@ -93,7 +93,6 @@
82549
82550
82551 #if defined(CONFIG_SYSCTL)
82552-
82553 /* External variables not in a header file. */
82554 extern int sysctl_overcommit_memory;
82555 extern int sysctl_overcommit_ratio;
82556@@ -119,18 +118,18 @@ extern int blk_iopoll_enabled;
82557
82558 /* Constants used for minimum and maximum */
82559 #ifdef CONFIG_LOCKUP_DETECTOR
82560-static int sixty = 60;
82561-static int neg_one = -1;
82562+static int sixty __read_only = 60;
82563 #endif
82564
82565-static int zero;
82566-static int __maybe_unused one = 1;
82567-static int __maybe_unused two = 2;
82568-static int __maybe_unused three = 3;
82569-static unsigned long one_ul = 1;
82570-static int one_hundred = 100;
82571+static int neg_one __read_only = -1;
82572+static int zero __read_only = 0;
82573+static int __maybe_unused one __read_only = 1;
82574+static int __maybe_unused two __read_only = 2;
82575+static int __maybe_unused three __read_only = 3;
82576+static unsigned long one_ul __read_only = 1;
82577+static int one_hundred __read_only = 100;
82578 #ifdef CONFIG_PRINTK
82579-static int ten_thousand = 10000;
82580+static int ten_thousand __read_only = 10000;
82581 #endif
82582
82583 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
82584@@ -177,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
82585 void __user *buffer, size_t *lenp, loff_t *ppos);
82586 #endif
82587
82588-#ifdef CONFIG_PRINTK
82589 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
82590 void __user *buffer, size_t *lenp, loff_t *ppos);
82591-#endif
82592
82593 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
82594 void __user *buffer, size_t *lenp, loff_t *ppos);
82595@@ -211,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
82596
82597 #endif
82598
82599+extern struct ctl_table grsecurity_table[];
82600+
82601 static struct ctl_table kern_table[];
82602 static struct ctl_table vm_table[];
82603 static struct ctl_table fs_table[];
82604@@ -225,6 +224,20 @@ extern struct ctl_table epoll_table[];
82605 int sysctl_legacy_va_layout;
82606 #endif
82607
82608+#ifdef CONFIG_PAX_SOFTMODE
82609+static ctl_table pax_table[] = {
82610+ {
82611+ .procname = "softmode",
82612+ .data = &pax_softmode,
82613+ .maxlen = sizeof(unsigned int),
82614+ .mode = 0600,
82615+ .proc_handler = &proc_dointvec,
82616+ },
82617+
82618+ { }
82619+};
82620+#endif
82621+
82622 /* The default sysctl tables: */
82623
82624 static struct ctl_table sysctl_base_table[] = {
82625@@ -273,6 +286,22 @@ static int max_extfrag_threshold = 1000;
82626 #endif
82627
82628 static struct ctl_table kern_table[] = {
82629+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
82630+ {
82631+ .procname = "grsecurity",
82632+ .mode = 0500,
82633+ .child = grsecurity_table,
82634+ },
82635+#endif
82636+
82637+#ifdef CONFIG_PAX_SOFTMODE
82638+ {
82639+ .procname = "pax",
82640+ .mode = 0500,
82641+ .child = pax_table,
82642+ },
82643+#endif
82644+
82645 {
82646 .procname = "sched_child_runs_first",
82647 .data = &sysctl_sched_child_runs_first,
82648@@ -607,7 +636,7 @@ static struct ctl_table kern_table[] = {
82649 .data = &modprobe_path,
82650 .maxlen = KMOD_PATH_LEN,
82651 .mode = 0644,
82652- .proc_handler = proc_dostring,
82653+ .proc_handler = proc_dostring_modpriv,
82654 },
82655 {
82656 .procname = "modules_disabled",
82657@@ -774,16 +803,20 @@ static struct ctl_table kern_table[] = {
82658 .extra1 = &zero,
82659 .extra2 = &one,
82660 },
82661+#endif
82662 {
82663 .procname = "kptr_restrict",
82664 .data = &kptr_restrict,
82665 .maxlen = sizeof(int),
82666 .mode = 0644,
82667 .proc_handler = proc_dointvec_minmax_sysadmin,
82668+#ifdef CONFIG_GRKERNSEC_HIDESYM
82669+ .extra1 = &two,
82670+#else
82671 .extra1 = &zero,
82672+#endif
82673 .extra2 = &two,
82674 },
82675-#endif
82676 {
82677 .procname = "ngroups_max",
82678 .data = &ngroups_max,
82679@@ -1025,10 +1058,17 @@ static struct ctl_table kern_table[] = {
82680 */
82681 {
82682 .procname = "perf_event_paranoid",
82683- .data = &sysctl_perf_event_paranoid,
82684- .maxlen = sizeof(sysctl_perf_event_paranoid),
82685+ .data = &sysctl_perf_event_legitimately_concerned,
82686+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
82687 .mode = 0644,
82688- .proc_handler = proc_dointvec,
82689+ /* go ahead, be a hero */
82690+ .proc_handler = proc_dointvec_minmax_sysadmin,
82691+ .extra1 = &neg_one,
82692+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
82693+ .extra2 = &three,
82694+#else
82695+ .extra2 = &two,
82696+#endif
82697 },
82698 {
82699 .procname = "perf_event_mlock_kb",
82700@@ -1282,6 +1322,13 @@ static struct ctl_table vm_table[] = {
82701 .proc_handler = proc_dointvec_minmax,
82702 .extra1 = &zero,
82703 },
82704+ {
82705+ .procname = "heap_stack_gap",
82706+ .data = &sysctl_heap_stack_gap,
82707+ .maxlen = sizeof(sysctl_heap_stack_gap),
82708+ .mode = 0644,
82709+ .proc_handler = proc_doulongvec_minmax,
82710+ },
82711 #else
82712 {
82713 .procname = "nr_trim_pages",
82714@@ -1746,6 +1793,16 @@ int proc_dostring(struct ctl_table *table, int write,
82715 buffer, lenp, ppos);
82716 }
82717
82718+int proc_dostring_modpriv(struct ctl_table *table, int write,
82719+ void __user *buffer, size_t *lenp, loff_t *ppos)
82720+{
82721+ if (write && !capable(CAP_SYS_MODULE))
82722+ return -EPERM;
82723+
82724+ return _proc_do_string(table->data, table->maxlen, write,
82725+ buffer, lenp, ppos);
82726+}
82727+
82728 static size_t proc_skip_spaces(char **buf)
82729 {
82730 size_t ret;
82731@@ -1851,6 +1908,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
82732 len = strlen(tmp);
82733 if (len > *size)
82734 len = *size;
82735+ if (len > sizeof(tmp))
82736+ len = sizeof(tmp);
82737 if (copy_to_user(*buf, tmp, len))
82738 return -EFAULT;
82739 *size -= len;
82740@@ -2015,7 +2074,7 @@ int proc_dointvec(struct ctl_table *table, int write,
82741 static int proc_taint(struct ctl_table *table, int write,
82742 void __user *buffer, size_t *lenp, loff_t *ppos)
82743 {
82744- struct ctl_table t;
82745+ ctl_table_no_const t;
82746 unsigned long tmptaint = get_taint();
82747 int err;
82748
82749@@ -2043,7 +2102,6 @@ static int proc_taint(struct ctl_table *table, int write,
82750 return err;
82751 }
82752
82753-#ifdef CONFIG_PRINTK
82754 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
82755 void __user *buffer, size_t *lenp, loff_t *ppos)
82756 {
82757@@ -2052,7 +2110,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
82758
82759 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
82760 }
82761-#endif
82762
82763 struct do_proc_dointvec_minmax_conv_param {
82764 int *min;
82765@@ -2199,8 +2256,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
82766 *i = val;
82767 } else {
82768 val = convdiv * (*i) / convmul;
82769- if (!first)
82770+ if (!first) {
82771 err = proc_put_char(&buffer, &left, '\t');
82772+ if (err)
82773+ break;
82774+ }
82775 err = proc_put_long(&buffer, &left, val, false);
82776 if (err)
82777 break;
82778@@ -2592,6 +2652,12 @@ int proc_dostring(struct ctl_table *table, int write,
82779 return -ENOSYS;
82780 }
82781
82782+int proc_dostring_modpriv(struct ctl_table *table, int write,
82783+ void __user *buffer, size_t *lenp, loff_t *ppos)
82784+{
82785+ return -ENOSYS;
82786+}
82787+
82788 int proc_dointvec(struct ctl_table *table, int write,
82789 void __user *buffer, size_t *lenp, loff_t *ppos)
82790 {
82791@@ -2648,5 +2714,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
82792 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
82793 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
82794 EXPORT_SYMBOL(proc_dostring);
82795+EXPORT_SYMBOL(proc_dostring_modpriv);
82796 EXPORT_SYMBOL(proc_doulongvec_minmax);
82797 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
82798diff --git a/kernel/taskstats.c b/kernel/taskstats.c
82799index 145bb4d..b2aa969 100644
82800--- a/kernel/taskstats.c
82801+++ b/kernel/taskstats.c
82802@@ -28,9 +28,12 @@
82803 #include <linux/fs.h>
82804 #include <linux/file.h>
82805 #include <linux/pid_namespace.h>
82806+#include <linux/grsecurity.h>
82807 #include <net/genetlink.h>
82808 #include <linux/atomic.h>
82809
82810+extern int gr_is_taskstats_denied(int pid);
82811+
82812 /*
82813 * Maximum length of a cpumask that can be specified in
82814 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
82815@@ -570,6 +573,9 @@ err:
82816
82817 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
82818 {
82819+ if (gr_is_taskstats_denied(current->pid))
82820+ return -EACCES;
82821+
82822 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
82823 return cmd_attr_register_cpumask(info);
82824 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
82825diff --git a/kernel/time.c b/kernel/time.c
82826index d3617db..c98bbe9 100644
82827--- a/kernel/time.c
82828+++ b/kernel/time.c
82829@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
82830 return error;
82831
82832 if (tz) {
82833+ /* we log in do_settimeofday called below, so don't log twice
82834+ */
82835+ if (!tv)
82836+ gr_log_timechange();
82837+
82838 sys_tz = *tz;
82839 update_vsyscall_tz();
82840 if (firsttime) {
82841@@ -502,7 +507,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
82842 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
82843 * value to a scaled second value.
82844 */
82845-unsigned long
82846+unsigned long __intentional_overflow(-1)
82847 timespec_to_jiffies(const struct timespec *value)
82848 {
82849 unsigned long sec = value->tv_sec;
82850diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
82851index f11d83b..d016d91 100644
82852--- a/kernel/time/alarmtimer.c
82853+++ b/kernel/time/alarmtimer.c
82854@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
82855 struct platform_device *pdev;
82856 int error = 0;
82857 int i;
82858- struct k_clock alarm_clock = {
82859+ static struct k_clock alarm_clock = {
82860 .clock_getres = alarm_clock_getres,
82861 .clock_get = alarm_clock_get,
82862 .timer_create = alarm_timer_create,
82863diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
82864index baeeb5c..c22704a 100644
82865--- a/kernel/time/timekeeping.c
82866+++ b/kernel/time/timekeeping.c
82867@@ -15,6 +15,7 @@
82868 #include <linux/init.h>
82869 #include <linux/mm.h>
82870 #include <linux/sched.h>
82871+#include <linux/grsecurity.h>
82872 #include <linux/syscore_ops.h>
82873 #include <linux/clocksource.h>
82874 #include <linux/jiffies.h>
82875@@ -495,6 +496,8 @@ int do_settimeofday(const struct timespec *tv)
82876 if (!timespec_valid_strict(tv))
82877 return -EINVAL;
82878
82879+ gr_log_timechange();
82880+
82881 raw_spin_lock_irqsave(&timekeeper_lock, flags);
82882 write_seqcount_begin(&timekeeper_seq);
82883
82884diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
82885index 3bdf283..cc68d83 100644
82886--- a/kernel/time/timer_list.c
82887+++ b/kernel/time/timer_list.c
82888@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
82889
82890 static void print_name_offset(struct seq_file *m, void *sym)
82891 {
82892+#ifdef CONFIG_GRKERNSEC_HIDESYM
82893+ SEQ_printf(m, "<%p>", NULL);
82894+#else
82895 char symname[KSYM_NAME_LEN];
82896
82897 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
82898 SEQ_printf(m, "<%pK>", sym);
82899 else
82900 SEQ_printf(m, "%s", symname);
82901+#endif
82902 }
82903
82904 static void
82905@@ -119,7 +123,11 @@ next_one:
82906 static void
82907 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
82908 {
82909+#ifdef CONFIG_GRKERNSEC_HIDESYM
82910+ SEQ_printf(m, " .base: %p\n", NULL);
82911+#else
82912 SEQ_printf(m, " .base: %pK\n", base);
82913+#endif
82914 SEQ_printf(m, " .index: %d\n",
82915 base->index);
82916 SEQ_printf(m, " .resolution: %Lu nsecs\n",
82917@@ -355,7 +363,11 @@ static int __init init_timer_list_procfs(void)
82918 {
82919 struct proc_dir_entry *pe;
82920
82921+#ifdef CONFIG_GRKERNSEC_PROC_ADD
82922+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
82923+#else
82924 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
82925+#endif
82926 if (!pe)
82927 return -ENOMEM;
82928 return 0;
82929diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
82930index 0b537f2..40d6c20 100644
82931--- a/kernel/time/timer_stats.c
82932+++ b/kernel/time/timer_stats.c
82933@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
82934 static unsigned long nr_entries;
82935 static struct entry entries[MAX_ENTRIES];
82936
82937-static atomic_t overflow_count;
82938+static atomic_unchecked_t overflow_count;
82939
82940 /*
82941 * The entries are in a hash-table, for fast lookup:
82942@@ -140,7 +140,7 @@ static void reset_entries(void)
82943 nr_entries = 0;
82944 memset(entries, 0, sizeof(entries));
82945 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
82946- atomic_set(&overflow_count, 0);
82947+ atomic_set_unchecked(&overflow_count, 0);
82948 }
82949
82950 static struct entry *alloc_entry(void)
82951@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
82952 if (likely(entry))
82953 entry->count++;
82954 else
82955- atomic_inc(&overflow_count);
82956+ atomic_inc_unchecked(&overflow_count);
82957
82958 out_unlock:
82959 raw_spin_unlock_irqrestore(lock, flags);
82960@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
82961
82962 static void print_name_offset(struct seq_file *m, unsigned long addr)
82963 {
82964+#ifdef CONFIG_GRKERNSEC_HIDESYM
82965+ seq_printf(m, "<%p>", NULL);
82966+#else
82967 char symname[KSYM_NAME_LEN];
82968
82969 if (lookup_symbol_name(addr, symname) < 0)
82970- seq_printf(m, "<%p>", (void *)addr);
82971+ seq_printf(m, "<%pK>", (void *)addr);
82972 else
82973 seq_printf(m, "%s", symname);
82974+#endif
82975 }
82976
82977 static int tstats_show(struct seq_file *m, void *v)
82978@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
82979
82980 seq_puts(m, "Timer Stats Version: v0.2\n");
82981 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
82982- if (atomic_read(&overflow_count))
82983+ if (atomic_read_unchecked(&overflow_count))
82984 seq_printf(m, "Overflow: %d entries\n",
82985- atomic_read(&overflow_count));
82986+ atomic_read_unchecked(&overflow_count));
82987
82988 for (i = 0; i < nr_entries; i++) {
82989 entry = entries + i;
82990@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
82991 {
82992 struct proc_dir_entry *pe;
82993
82994+#ifdef CONFIG_GRKERNSEC_PROC_ADD
82995+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
82996+#else
82997 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
82998+#endif
82999 if (!pe)
83000 return -ENOMEM;
83001 return 0;
83002diff --git a/kernel/timer.c b/kernel/timer.c
83003index 15bc1b4..32da49c 100644
83004--- a/kernel/timer.c
83005+++ b/kernel/timer.c
83006@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
83007 /*
83008 * This function runs timers and the timer-tq in bottom half context.
83009 */
83010-static void run_timer_softirq(struct softirq_action *h)
83011+static void run_timer_softirq(void)
83012 {
83013 struct tvec_base *base = __this_cpu_read(tvec_bases);
83014
83015@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
83016 *
83017 * In all cases the return value is guaranteed to be non-negative.
83018 */
83019-signed long __sched schedule_timeout(signed long timeout)
83020+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
83021 {
83022 struct timer_list timer;
83023 unsigned long expire;
83024@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
83025 return NOTIFY_OK;
83026 }
83027
83028-static struct notifier_block __cpuinitdata timers_nb = {
83029+static struct notifier_block timers_nb = {
83030 .notifier_call = timer_cpu_notify,
83031 };
83032
83033diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
83034index b8b8560..75b1a09 100644
83035--- a/kernel/trace/blktrace.c
83036+++ b/kernel/trace/blktrace.c
83037@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
83038 struct blk_trace *bt = filp->private_data;
83039 char buf[16];
83040
83041- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
83042+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
83043
83044 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
83045 }
83046@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
83047 return 1;
83048
83049 bt = buf->chan->private_data;
83050- atomic_inc(&bt->dropped);
83051+ atomic_inc_unchecked(&bt->dropped);
83052 return 0;
83053 }
83054
83055@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
83056
83057 bt->dir = dir;
83058 bt->dev = dev;
83059- atomic_set(&bt->dropped, 0);
83060+ atomic_set_unchecked(&bt->dropped, 0);
83061
83062 ret = -EIO;
83063 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
83064diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
83065index 6c508ff..ee55a13 100644
83066--- a/kernel/trace/ftrace.c
83067+++ b/kernel/trace/ftrace.c
83068@@ -1915,12 +1915,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
83069 if (unlikely(ftrace_disabled))
83070 return 0;
83071
83072+ ret = ftrace_arch_code_modify_prepare();
83073+ FTRACE_WARN_ON(ret);
83074+ if (ret)
83075+ return 0;
83076+
83077 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
83078+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
83079 if (ret) {
83080 ftrace_bug(ret, ip);
83081- return 0;
83082 }
83083- return 1;
83084+ return ret ? 0 : 1;
83085 }
83086
83087 /*
83088@@ -3931,8 +3936,10 @@ static int ftrace_process_locs(struct module *mod,
83089 if (!count)
83090 return 0;
83091
83092+ pax_open_kernel();
83093 sort(start, count, sizeof(*start),
83094 ftrace_cmp_ips, ftrace_swap_ips);
83095+ pax_close_kernel();
83096
83097 start_pg = ftrace_allocate_pages(count);
83098 if (!start_pg)
83099@@ -4655,8 +4662,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
83100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
83101
83102 static int ftrace_graph_active;
83103-static struct notifier_block ftrace_suspend_notifier;
83104-
83105 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
83106 {
83107 return 0;
83108@@ -4800,6 +4805,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
83109 return NOTIFY_DONE;
83110 }
83111
83112+static struct notifier_block ftrace_suspend_notifier = {
83113+ .notifier_call = ftrace_suspend_notifier_call
83114+};
83115+
83116 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
83117 trace_func_graph_ent_t entryfunc)
83118 {
83119@@ -4813,7 +4822,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
83120 goto out;
83121 }
83122
83123- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
83124 register_pm_notifier(&ftrace_suspend_notifier);
83125
83126 ftrace_graph_active++;
83127diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
83128index e444ff8..438b8f4 100644
83129--- a/kernel/trace/ring_buffer.c
83130+++ b/kernel/trace/ring_buffer.c
83131@@ -352,9 +352,9 @@ struct buffer_data_page {
83132 */
83133 struct buffer_page {
83134 struct list_head list; /* list of buffer pages */
83135- local_t write; /* index for next write */
83136+ local_unchecked_t write; /* index for next write */
83137 unsigned read; /* index for next read */
83138- local_t entries; /* entries on this page */
83139+ local_unchecked_t entries; /* entries on this page */
83140 unsigned long real_end; /* real end of data */
83141 struct buffer_data_page *page; /* Actual data page */
83142 };
83143@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
83144 unsigned long last_overrun;
83145 local_t entries_bytes;
83146 local_t entries;
83147- local_t overrun;
83148- local_t commit_overrun;
83149+ local_unchecked_t overrun;
83150+ local_unchecked_t commit_overrun;
83151 local_t dropped_events;
83152 local_t committing;
83153 local_t commits;
83154@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
83155 *
83156 * We add a counter to the write field to denote this.
83157 */
83158- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
83159- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
83160+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
83161+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
83162
83163 /*
83164 * Just make sure we have seen our old_write and synchronize
83165@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
83166 * cmpxchg to only update if an interrupt did not already
83167 * do it for us. If the cmpxchg fails, we don't care.
83168 */
83169- (void)local_cmpxchg(&next_page->write, old_write, val);
83170- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
83171+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
83172+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
83173
83174 /*
83175 * No need to worry about races with clearing out the commit.
83176@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
83177
83178 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
83179 {
83180- return local_read(&bpage->entries) & RB_WRITE_MASK;
83181+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
83182 }
83183
83184 static inline unsigned long rb_page_write(struct buffer_page *bpage)
83185 {
83186- return local_read(&bpage->write) & RB_WRITE_MASK;
83187+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
83188 }
83189
83190 static int
83191@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
83192 * bytes consumed in ring buffer from here.
83193 * Increment overrun to account for the lost events.
83194 */
83195- local_add(page_entries, &cpu_buffer->overrun);
83196+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
83197 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
83198 }
83199
83200@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
83201 * it is our responsibility to update
83202 * the counters.
83203 */
83204- local_add(entries, &cpu_buffer->overrun);
83205+ local_add_unchecked(entries, &cpu_buffer->overrun);
83206 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
83207
83208 /*
83209@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
83210 if (tail == BUF_PAGE_SIZE)
83211 tail_page->real_end = 0;
83212
83213- local_sub(length, &tail_page->write);
83214+ local_sub_unchecked(length, &tail_page->write);
83215 return;
83216 }
83217
83218@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
83219 rb_event_set_padding(event);
83220
83221 /* Set the write back to the previous setting */
83222- local_sub(length, &tail_page->write);
83223+ local_sub_unchecked(length, &tail_page->write);
83224 return;
83225 }
83226
83227@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
83228
83229 /* Set write to end of buffer */
83230 length = (tail + length) - BUF_PAGE_SIZE;
83231- local_sub(length, &tail_page->write);
83232+ local_sub_unchecked(length, &tail_page->write);
83233 }
83234
83235 /*
83236@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
83237 * about it.
83238 */
83239 if (unlikely(next_page == commit_page)) {
83240- local_inc(&cpu_buffer->commit_overrun);
83241+ local_inc_unchecked(&cpu_buffer->commit_overrun);
83242 goto out_reset;
83243 }
83244
83245@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
83246 cpu_buffer->tail_page) &&
83247 (cpu_buffer->commit_page ==
83248 cpu_buffer->reader_page))) {
83249- local_inc(&cpu_buffer->commit_overrun);
83250+ local_inc_unchecked(&cpu_buffer->commit_overrun);
83251 goto out_reset;
83252 }
83253 }
83254@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
83255 length += RB_LEN_TIME_EXTEND;
83256
83257 tail_page = cpu_buffer->tail_page;
83258- write = local_add_return(length, &tail_page->write);
83259+ write = local_add_return_unchecked(length, &tail_page->write);
83260
83261 /* set write to only the index of the write */
83262 write &= RB_WRITE_MASK;
83263@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
83264 kmemcheck_annotate_bitfield(event, bitfield);
83265 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
83266
83267- local_inc(&tail_page->entries);
83268+ local_inc_unchecked(&tail_page->entries);
83269
83270 /*
83271 * If this is the first commit on the page, then update
83272@@ -2440,7 +2440,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
83273
83274 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
83275 unsigned long write_mask =
83276- local_read(&bpage->write) & ~RB_WRITE_MASK;
83277+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
83278 unsigned long event_length = rb_event_length(event);
83279 /*
83280 * This is on the tail page. It is possible that
83281@@ -2450,7 +2450,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
83282 */
83283 old_index += write_mask;
83284 new_index += write_mask;
83285- index = local_cmpxchg(&bpage->write, old_index, new_index);
83286+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
83287 if (index == old_index) {
83288 /* update counters */
83289 local_sub(event_length, &cpu_buffer->entries_bytes);
83290@@ -2842,7 +2842,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
83291
83292 /* Do the likely case first */
83293 if (likely(bpage->page == (void *)addr)) {
83294- local_dec(&bpage->entries);
83295+ local_dec_unchecked(&bpage->entries);
83296 return;
83297 }
83298
83299@@ -2854,7 +2854,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
83300 start = bpage;
83301 do {
83302 if (bpage->page == (void *)addr) {
83303- local_dec(&bpage->entries);
83304+ local_dec_unchecked(&bpage->entries);
83305 return;
83306 }
83307 rb_inc_page(cpu_buffer, &bpage);
83308@@ -3138,7 +3138,7 @@ static inline unsigned long
83309 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
83310 {
83311 return local_read(&cpu_buffer->entries) -
83312- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
83313+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
83314 }
83315
83316 /**
83317@@ -3227,7 +3227,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
83318 return 0;
83319
83320 cpu_buffer = buffer->buffers[cpu];
83321- ret = local_read(&cpu_buffer->overrun);
83322+ ret = local_read_unchecked(&cpu_buffer->overrun);
83323
83324 return ret;
83325 }
83326@@ -3250,7 +3250,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
83327 return 0;
83328
83329 cpu_buffer = buffer->buffers[cpu];
83330- ret = local_read(&cpu_buffer->commit_overrun);
83331+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
83332
83333 return ret;
83334 }
83335@@ -3335,7 +3335,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
83336 /* if you care about this being correct, lock the buffer */
83337 for_each_buffer_cpu(buffer, cpu) {
83338 cpu_buffer = buffer->buffers[cpu];
83339- overruns += local_read(&cpu_buffer->overrun);
83340+ overruns += local_read_unchecked(&cpu_buffer->overrun);
83341 }
83342
83343 return overruns;
83344@@ -3511,8 +3511,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
83345 /*
83346 * Reset the reader page to size zero.
83347 */
83348- local_set(&cpu_buffer->reader_page->write, 0);
83349- local_set(&cpu_buffer->reader_page->entries, 0);
83350+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
83351+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
83352 local_set(&cpu_buffer->reader_page->page->commit, 0);
83353 cpu_buffer->reader_page->real_end = 0;
83354
83355@@ -3546,7 +3546,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
83356 * want to compare with the last_overrun.
83357 */
83358 smp_mb();
83359- overwrite = local_read(&(cpu_buffer->overrun));
83360+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
83361
83362 /*
83363 * Here's the tricky part.
83364@@ -4116,8 +4116,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
83365
83366 cpu_buffer->head_page
83367 = list_entry(cpu_buffer->pages, struct buffer_page, list);
83368- local_set(&cpu_buffer->head_page->write, 0);
83369- local_set(&cpu_buffer->head_page->entries, 0);
83370+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
83371+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
83372 local_set(&cpu_buffer->head_page->page->commit, 0);
83373
83374 cpu_buffer->head_page->read = 0;
83375@@ -4127,14 +4127,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
83376
83377 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
83378 INIT_LIST_HEAD(&cpu_buffer->new_pages);
83379- local_set(&cpu_buffer->reader_page->write, 0);
83380- local_set(&cpu_buffer->reader_page->entries, 0);
83381+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
83382+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
83383 local_set(&cpu_buffer->reader_page->page->commit, 0);
83384 cpu_buffer->reader_page->read = 0;
83385
83386 local_set(&cpu_buffer->entries_bytes, 0);
83387- local_set(&cpu_buffer->overrun, 0);
83388- local_set(&cpu_buffer->commit_overrun, 0);
83389+ local_set_unchecked(&cpu_buffer->overrun, 0);
83390+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
83391 local_set(&cpu_buffer->dropped_events, 0);
83392 local_set(&cpu_buffer->entries, 0);
83393 local_set(&cpu_buffer->committing, 0);
83394@@ -4538,8 +4538,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
83395 rb_init_page(bpage);
83396 bpage = reader->page;
83397 reader->page = *data_page;
83398- local_set(&reader->write, 0);
83399- local_set(&reader->entries, 0);
83400+ local_set_unchecked(&reader->write, 0);
83401+ local_set_unchecked(&reader->entries, 0);
83402 reader->read = 0;
83403 *data_page = bpage;
83404
83405diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
83406index 06a5bce..53ad6e7 100644
83407--- a/kernel/trace/trace.c
83408+++ b/kernel/trace/trace.c
83409@@ -3347,7 +3347,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
83410 return 0;
83411 }
83412
83413-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
83414+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
83415 {
83416 /* do nothing if flag is already set */
83417 if (!!(trace_flags & mask) == !!enabled)
83418diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
83419index 51b4448..7be601f 100644
83420--- a/kernel/trace/trace.h
83421+++ b/kernel/trace/trace.h
83422@@ -1035,7 +1035,7 @@ extern const char *__stop___trace_bprintk_fmt[];
83423 void trace_printk_init_buffers(void);
83424 void trace_printk_start_comm(void);
83425 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
83426-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
83427+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
83428
83429 /*
83430 * Normal trace_printk() and friends allocates special buffers
83431diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
83432index 6953263..2004e16 100644
83433--- a/kernel/trace/trace_events.c
83434+++ b/kernel/trace/trace_events.c
83435@@ -1748,10 +1748,6 @@ static LIST_HEAD(ftrace_module_file_list);
83436 struct ftrace_module_file_ops {
83437 struct list_head list;
83438 struct module *mod;
83439- struct file_operations id;
83440- struct file_operations enable;
83441- struct file_operations format;
83442- struct file_operations filter;
83443 };
83444
83445 static struct ftrace_module_file_ops *
83446@@ -1792,17 +1788,12 @@ trace_create_file_ops(struct module *mod)
83447
83448 file_ops->mod = mod;
83449
83450- file_ops->id = ftrace_event_id_fops;
83451- file_ops->id.owner = mod;
83452-
83453- file_ops->enable = ftrace_enable_fops;
83454- file_ops->enable.owner = mod;
83455-
83456- file_ops->filter = ftrace_event_filter_fops;
83457- file_ops->filter.owner = mod;
83458-
83459- file_ops->format = ftrace_event_format_fops;
83460- file_ops->format.owner = mod;
83461+ pax_open_kernel();
83462+ mod->trace_id.owner = mod;
83463+ mod->trace_enable.owner = mod;
83464+ mod->trace_filter.owner = mod;
83465+ mod->trace_format.owner = mod;
83466+ pax_close_kernel();
83467
83468 list_add(&file_ops->list, &ftrace_module_file_list);
83469
83470@@ -1895,8 +1886,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
83471 struct ftrace_module_file_ops *file_ops)
83472 {
83473 return __trace_add_new_event(call, tr,
83474- &file_ops->id, &file_ops->enable,
83475- &file_ops->filter, &file_ops->format);
83476+ &file_ops->mod->trace_id, &file_ops->mod->trace_enable,
83477+ &file_ops->mod->trace_filter, &file_ops->mod->trace_format);
83478 }
83479
83480 #else
83481diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
83482index a5e8f48..a9690d2 100644
83483--- a/kernel/trace/trace_mmiotrace.c
83484+++ b/kernel/trace/trace_mmiotrace.c
83485@@ -24,7 +24,7 @@ struct header_iter {
83486 static struct trace_array *mmio_trace_array;
83487 static bool overrun_detected;
83488 static unsigned long prev_overruns;
83489-static atomic_t dropped_count;
83490+static atomic_unchecked_t dropped_count;
83491
83492 static void mmio_reset_data(struct trace_array *tr)
83493 {
83494@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
83495
83496 static unsigned long count_overruns(struct trace_iterator *iter)
83497 {
83498- unsigned long cnt = atomic_xchg(&dropped_count, 0);
83499+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
83500 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
83501
83502 if (over > prev_overruns)
83503@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
83504 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
83505 sizeof(*entry), 0, pc);
83506 if (!event) {
83507- atomic_inc(&dropped_count);
83508+ atomic_inc_unchecked(&dropped_count);
83509 return;
83510 }
83511 entry = ring_buffer_event_data(event);
83512@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
83513 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
83514 sizeof(*entry), 0, pc);
83515 if (!event) {
83516- atomic_inc(&dropped_count);
83517+ atomic_inc_unchecked(&dropped_count);
83518 return;
83519 }
83520 entry = ring_buffer_event_data(event);
83521diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
83522index bb922d9..2a54a257 100644
83523--- a/kernel/trace/trace_output.c
83524+++ b/kernel/trace/trace_output.c
83525@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
83526
83527 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
83528 if (!IS_ERR(p)) {
83529- p = mangle_path(s->buffer + s->len, p, "\n");
83530+ p = mangle_path(s->buffer + s->len, p, "\n\\");
83531 if (p) {
83532 s->len = p - s->buffer;
83533 return 1;
83534@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
83535 goto out;
83536 }
83537
83538+ pax_open_kernel();
83539 if (event->funcs->trace == NULL)
83540- event->funcs->trace = trace_nop_print;
83541+ *(void **)&event->funcs->trace = trace_nop_print;
83542 if (event->funcs->raw == NULL)
83543- event->funcs->raw = trace_nop_print;
83544+ *(void **)&event->funcs->raw = trace_nop_print;
83545 if (event->funcs->hex == NULL)
83546- event->funcs->hex = trace_nop_print;
83547+ *(void **)&event->funcs->hex = trace_nop_print;
83548 if (event->funcs->binary == NULL)
83549- event->funcs->binary = trace_nop_print;
83550+ *(void **)&event->funcs->binary = trace_nop_print;
83551+ pax_close_kernel();
83552
83553 key = event->type & (EVENT_HASHSIZE - 1);
83554
83555diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
83556index b20428c..4845a10 100644
83557--- a/kernel/trace/trace_stack.c
83558+++ b/kernel/trace/trace_stack.c
83559@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
83560 return;
83561
83562 /* we do not handle interrupt stacks yet */
83563- if (!object_is_on_stack(stack))
83564+ if (!object_starts_on_stack(stack))
83565 return;
83566
83567 local_irq_save(flags);
83568diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
83569index 9064b91..1f5d2f8 100644
83570--- a/kernel/user_namespace.c
83571+++ b/kernel/user_namespace.c
83572@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
83573 !kgid_has_mapping(parent_ns, group))
83574 return -EPERM;
83575
83576+#ifdef CONFIG_GRKERNSEC
83577+ /*
83578+ * This doesn't really inspire confidence:
83579+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
83580+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
83581+ * Increases kernel attack surface in areas developers
83582+ * previously cared little about ("low importance due
83583+ * to requiring "root" capability")
83584+ * To be removed when this code receives *proper* review
83585+ */
83586+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
83587+ !capable(CAP_SETGID))
83588+ return -EPERM;
83589+#endif
83590+
83591 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
83592 if (!ns)
83593 return -ENOMEM;
83594@@ -862,7 +877,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
83595 if (atomic_read(&current->mm->mm_users) > 1)
83596 return -EINVAL;
83597
83598- if (current->fs->users != 1)
83599+ if (atomic_read(&current->fs->users) != 1)
83600 return -EINVAL;
83601
83602 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
83603diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
83604index 4f69f9a..7c6f8f8 100644
83605--- a/kernel/utsname_sysctl.c
83606+++ b/kernel/utsname_sysctl.c
83607@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
83608 static int proc_do_uts_string(ctl_table *table, int write,
83609 void __user *buffer, size_t *lenp, loff_t *ppos)
83610 {
83611- struct ctl_table uts_table;
83612+ ctl_table_no_const uts_table;
83613 int r;
83614 memcpy(&uts_table, table, sizeof(uts_table));
83615 uts_table.data = get_uts(table, write);
83616diff --git a/kernel/watchdog.c b/kernel/watchdog.c
83617index 05039e3..17490c7 100644
83618--- a/kernel/watchdog.c
83619+++ b/kernel/watchdog.c
83620@@ -531,7 +531,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
83621 }
83622 #endif /* CONFIG_SYSCTL */
83623
83624-static struct smp_hotplug_thread watchdog_threads = {
83625+static struct smp_hotplug_thread watchdog_threads __read_only = {
83626 .store = &softlockup_watchdog,
83627 .thread_should_run = watchdog_should_run,
83628 .thread_fn = watchdog,
83629diff --git a/kernel/workqueue.c b/kernel/workqueue.c
83630index 6f01921..139869b 100644
83631--- a/kernel/workqueue.c
83632+++ b/kernel/workqueue.c
83633@@ -4596,7 +4596,7 @@ static void rebind_workers(struct worker_pool *pool)
83634 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
83635 worker_flags |= WORKER_REBOUND;
83636 worker_flags &= ~WORKER_UNBOUND;
83637- ACCESS_ONCE(worker->flags) = worker_flags;
83638+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
83639 }
83640
83641 spin_unlock_irq(&pool->lock);
83642diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
83643index 74fdc5c..3310593 100644
83644--- a/lib/Kconfig.debug
83645+++ b/lib/Kconfig.debug
83646@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
83647
83648 config DEBUG_LOCK_ALLOC
83649 bool "Lock debugging: detect incorrect freeing of live locks"
83650- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
83651+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
83652 select DEBUG_SPINLOCK
83653 select DEBUG_MUTEXES
83654 select LOCKDEP
83655@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
83656
83657 config PROVE_LOCKING
83658 bool "Lock debugging: prove locking correctness"
83659- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
83660+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
83661 select LOCKDEP
83662 select DEBUG_SPINLOCK
83663 select DEBUG_MUTEXES
83664@@ -614,7 +614,7 @@ config LOCKDEP
83665
83666 config LOCK_STAT
83667 bool "Lock usage statistics"
83668- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
83669+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
83670 select LOCKDEP
83671 select DEBUG_SPINLOCK
83672 select DEBUG_MUTEXES
83673@@ -1282,6 +1282,7 @@ config LATENCYTOP
83674 depends on DEBUG_KERNEL
83675 depends on STACKTRACE_SUPPORT
83676 depends on PROC_FS
83677+ depends on !GRKERNSEC_HIDESYM
83678 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
83679 select KALLSYMS
83680 select KALLSYMS_ALL
83681@@ -1298,7 +1299,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
83682 config DEBUG_STRICT_USER_COPY_CHECKS
83683 bool "Strict user copy size checks"
83684 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
83685- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
83686+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
83687 help
83688 Enabling this option turns a certain set of sanity checks for user
83689 copy operations into compile time failures.
83690@@ -1328,7 +1329,7 @@ config INTERVAL_TREE_TEST
83691
83692 config PROVIDE_OHCI1394_DMA_INIT
83693 bool "Remote debugging over FireWire early on boot"
83694- depends on PCI && X86
83695+ depends on PCI && X86 && !GRKERNSEC
83696 help
83697 If you want to debug problems which hang or crash the kernel early
83698 on boot and the crashing machine has a FireWire port, you can use
83699@@ -1357,7 +1358,7 @@ config PROVIDE_OHCI1394_DMA_INIT
83700
83701 config FIREWIRE_OHCI_REMOTE_DMA
83702 bool "Remote debugging over FireWire with firewire-ohci"
83703- depends on FIREWIRE_OHCI
83704+ depends on FIREWIRE_OHCI && !GRKERNSEC
83705 help
83706 This option lets you use the FireWire bus for remote debugging
83707 with help of the firewire-ohci driver. It enables unfiltered
83708diff --git a/lib/Makefile b/lib/Makefile
83709index c55a037..fb46e3b 100644
83710--- a/lib/Makefile
83711+++ b/lib/Makefile
83712@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
83713
83714 obj-$(CONFIG_BTREE) += btree.o
83715 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
83716-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
83717+obj-y += list_debug.o
83718 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
83719
83720 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
83721diff --git a/lib/bitmap.c b/lib/bitmap.c
83722index 06f7e4f..f3cf2b0 100644
83723--- a/lib/bitmap.c
83724+++ b/lib/bitmap.c
83725@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
83726 {
83727 int c, old_c, totaldigits, ndigits, nchunks, nbits;
83728 u32 chunk;
83729- const char __user __force *ubuf = (const char __user __force *)buf;
83730+ const char __user *ubuf = (const char __force_user *)buf;
83731
83732 bitmap_zero(maskp, nmaskbits);
83733
83734@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
83735 {
83736 if (!access_ok(VERIFY_READ, ubuf, ulen))
83737 return -EFAULT;
83738- return __bitmap_parse((const char __force *)ubuf,
83739+ return __bitmap_parse((const char __force_kernel *)ubuf,
83740 ulen, 1, maskp, nmaskbits);
83741
83742 }
83743@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
83744 {
83745 unsigned a, b;
83746 int c, old_c, totaldigits;
83747- const char __user __force *ubuf = (const char __user __force *)buf;
83748+ const char __user *ubuf = (const char __force_user *)buf;
83749 int exp_digit, in_range;
83750
83751 totaldigits = c = 0;
83752@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
83753 {
83754 if (!access_ok(VERIFY_READ, ubuf, ulen))
83755 return -EFAULT;
83756- return __bitmap_parselist((const char __force *)ubuf,
83757+ return __bitmap_parselist((const char __force_kernel *)ubuf,
83758 ulen, 1, maskp, nmaskbits);
83759 }
83760 EXPORT_SYMBOL(bitmap_parselist_user);
83761diff --git a/lib/bug.c b/lib/bug.c
83762index 1686034..a9c00c8 100644
83763--- a/lib/bug.c
83764+++ b/lib/bug.c
83765@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
83766 return BUG_TRAP_TYPE_NONE;
83767
83768 bug = find_bug(bugaddr);
83769+ if (!bug)
83770+ return BUG_TRAP_TYPE_NONE;
83771
83772 file = NULL;
83773 line = 0;
83774diff --git a/lib/debugobjects.c b/lib/debugobjects.c
83775index 37061ed..da83f48 100644
83776--- a/lib/debugobjects.c
83777+++ b/lib/debugobjects.c
83778@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
83779 if (limit > 4)
83780 return;
83781
83782- is_on_stack = object_is_on_stack(addr);
83783+ is_on_stack = object_starts_on_stack(addr);
83784 if (is_on_stack == onstack)
83785 return;
83786
83787diff --git a/lib/devres.c b/lib/devres.c
83788index 8235331..5881053 100644
83789--- a/lib/devres.c
83790+++ b/lib/devres.c
83791@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
83792 void devm_iounmap(struct device *dev, void __iomem *addr)
83793 {
83794 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
83795- (void *)addr));
83796+ (void __force *)addr));
83797 iounmap(addr);
83798 }
83799 EXPORT_SYMBOL(devm_iounmap);
83800@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
83801 {
83802 ioport_unmap(addr);
83803 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
83804- devm_ioport_map_match, (void *)addr));
83805+ devm_ioport_map_match, (void __force *)addr));
83806 }
83807 EXPORT_SYMBOL(devm_ioport_unmap);
83808 #endif /* CONFIG_HAS_IOPORT */
83809diff --git a/lib/div64.c b/lib/div64.c
83810index a163b6c..9618fa5 100644
83811--- a/lib/div64.c
83812+++ b/lib/div64.c
83813@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
83814 EXPORT_SYMBOL(__div64_32);
83815
83816 #ifndef div_s64_rem
83817-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
83818+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
83819 {
83820 u64 quotient;
83821
83822@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
83823 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
83824 */
83825 #ifndef div64_u64
83826-u64 div64_u64(u64 dividend, u64 divisor)
83827+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83828 {
83829 u32 high = divisor >> 32;
83830 u64 quot;
83831diff --git a/lib/dma-debug.c b/lib/dma-debug.c
83832index d87a17a..ac0d79a 100644
83833--- a/lib/dma-debug.c
83834+++ b/lib/dma-debug.c
83835@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
83836
83837 void dma_debug_add_bus(struct bus_type *bus)
83838 {
83839- struct notifier_block *nb;
83840+ notifier_block_no_const *nb;
83841
83842 if (global_disable)
83843 return;
83844@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
83845
83846 static void check_for_stack(struct device *dev, void *addr)
83847 {
83848- if (object_is_on_stack(addr))
83849+ if (object_starts_on_stack(addr))
83850 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
83851 "stack [addr=%p]\n", addr);
83852 }
83853diff --git a/lib/inflate.c b/lib/inflate.c
83854index 013a761..c28f3fc 100644
83855--- a/lib/inflate.c
83856+++ b/lib/inflate.c
83857@@ -269,7 +269,7 @@ static void free(void *where)
83858 malloc_ptr = free_mem_ptr;
83859 }
83860 #else
83861-#define malloc(a) kmalloc(a, GFP_KERNEL)
83862+#define malloc(a) kmalloc((a), GFP_KERNEL)
83863 #define free(a) kfree(a)
83864 #endif
83865
83866diff --git a/lib/ioremap.c b/lib/ioremap.c
83867index 0c9216c..863bd89 100644
83868--- a/lib/ioremap.c
83869+++ b/lib/ioremap.c
83870@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
83871 unsigned long next;
83872
83873 phys_addr -= addr;
83874- pmd = pmd_alloc(&init_mm, pud, addr);
83875+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
83876 if (!pmd)
83877 return -ENOMEM;
83878 do {
83879@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
83880 unsigned long next;
83881
83882 phys_addr -= addr;
83883- pud = pud_alloc(&init_mm, pgd, addr);
83884+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
83885 if (!pud)
83886 return -ENOMEM;
83887 do {
83888diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
83889index bd2bea9..6b3c95e 100644
83890--- a/lib/is_single_threaded.c
83891+++ b/lib/is_single_threaded.c
83892@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
83893 struct task_struct *p, *t;
83894 bool ret;
83895
83896+ if (!mm)
83897+ return true;
83898+
83899 if (atomic_read(&task->signal->live) != 1)
83900 return false;
83901
83902diff --git a/lib/kobject.c b/lib/kobject.c
83903index b7e29a6..2f3ca75 100644
83904--- a/lib/kobject.c
83905+++ b/lib/kobject.c
83906@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
83907 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
83908 if (!kset)
83909 return NULL;
83910- retval = kobject_set_name(&kset->kobj, name);
83911+ retval = kobject_set_name(&kset->kobj, "%s", name);
83912 if (retval) {
83913 kfree(kset);
83914 return NULL;
83915@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
83916
83917
83918 static DEFINE_SPINLOCK(kobj_ns_type_lock);
83919-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
83920+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
83921
83922-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
83923+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
83924 {
83925 enum kobj_ns_type type = ops->type;
83926 int error;
83927diff --git a/lib/list_debug.c b/lib/list_debug.c
83928index c24c2f7..06e070b 100644
83929--- a/lib/list_debug.c
83930+++ b/lib/list_debug.c
83931@@ -11,7 +11,9 @@
83932 #include <linux/bug.h>
83933 #include <linux/kernel.h>
83934 #include <linux/rculist.h>
83935+#include <linux/mm.h>
83936
83937+#ifdef CONFIG_DEBUG_LIST
83938 /*
83939 * Insert a new entry between two known consecutive entries.
83940 *
83941@@ -19,21 +21,32 @@
83942 * the prev/next entries already!
83943 */
83944
83945-void __list_add(struct list_head *new,
83946- struct list_head *prev,
83947- struct list_head *next)
83948+static bool __list_add_debug(struct list_head *new,
83949+ struct list_head *prev,
83950+ struct list_head *next)
83951 {
83952- WARN(next->prev != prev,
83953+ if (WARN(next->prev != prev,
83954 "list_add corruption. next->prev should be "
83955 "prev (%p), but was %p. (next=%p).\n",
83956- prev, next->prev, next);
83957- WARN(prev->next != next,
83958+ prev, next->prev, next) ||
83959+ WARN(prev->next != next,
83960 "list_add corruption. prev->next should be "
83961 "next (%p), but was %p. (prev=%p).\n",
83962- next, prev->next, prev);
83963- WARN(new == prev || new == next,
83964- "list_add double add: new=%p, prev=%p, next=%p.\n",
83965- new, prev, next);
83966+ next, prev->next, prev) ||
83967+ WARN(new == prev || new == next,
83968+ "list_add double add: new=%p, prev=%p, next=%p.\n",
83969+ new, prev, next))
83970+ return false;
83971+ return true;
83972+}
83973+
83974+void __list_add(struct list_head *new,
83975+ struct list_head *prev,
83976+ struct list_head *next)
83977+{
83978+ if (!__list_add_debug(new, prev, next))
83979+ return;
83980+
83981 next->prev = new;
83982 new->next = next;
83983 new->prev = prev;
83984@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
83985 }
83986 EXPORT_SYMBOL(__list_add);
83987
83988-void __list_del_entry(struct list_head *entry)
83989+static bool __list_del_entry_debug(struct list_head *entry)
83990 {
83991 struct list_head *prev, *next;
83992
83993@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
83994 WARN(next->prev != entry,
83995 "list_del corruption. next->prev should be %p, "
83996 "but was %p\n", entry, next->prev))
83997+ return false;
83998+ return true;
83999+}
84000+
84001+void __list_del_entry(struct list_head *entry)
84002+{
84003+ if (!__list_del_entry_debug(entry))
84004 return;
84005
84006- __list_del(prev, next);
84007+ __list_del(entry->prev, entry->next);
84008 }
84009 EXPORT_SYMBOL(__list_del_entry);
84010
84011@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
84012 void __list_add_rcu(struct list_head *new,
84013 struct list_head *prev, struct list_head *next)
84014 {
84015- WARN(next->prev != prev,
84016- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
84017- prev, next->prev, next);
84018- WARN(prev->next != next,
84019- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
84020- next, prev->next, prev);
84021+ if (!__list_add_debug(new, prev, next))
84022+ return;
84023+
84024 new->next = next;
84025 new->prev = prev;
84026 rcu_assign_pointer(list_next_rcu(prev), new);
84027 next->prev = new;
84028 }
84029 EXPORT_SYMBOL(__list_add_rcu);
84030+#endif
84031+
84032+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
84033+{
84034+#ifdef CONFIG_DEBUG_LIST
84035+ if (!__list_add_debug(new, prev, next))
84036+ return;
84037+#endif
84038+
84039+ pax_open_kernel();
84040+ next->prev = new;
84041+ new->next = next;
84042+ new->prev = prev;
84043+ prev->next = new;
84044+ pax_close_kernel();
84045+}
84046+EXPORT_SYMBOL(__pax_list_add);
84047+
84048+void pax_list_del(struct list_head *entry)
84049+{
84050+#ifdef CONFIG_DEBUG_LIST
84051+ if (!__list_del_entry_debug(entry))
84052+ return;
84053+#endif
84054+
84055+ pax_open_kernel();
84056+ __list_del(entry->prev, entry->next);
84057+ entry->next = LIST_POISON1;
84058+ entry->prev = LIST_POISON2;
84059+ pax_close_kernel();
84060+}
84061+EXPORT_SYMBOL(pax_list_del);
84062+
84063+void pax_list_del_init(struct list_head *entry)
84064+{
84065+ pax_open_kernel();
84066+ __list_del(entry->prev, entry->next);
84067+ INIT_LIST_HEAD(entry);
84068+ pax_close_kernel();
84069+}
84070+EXPORT_SYMBOL(pax_list_del_init);
84071+
84072+void __pax_list_add_rcu(struct list_head *new,
84073+ struct list_head *prev, struct list_head *next)
84074+{
84075+#ifdef CONFIG_DEBUG_LIST
84076+ if (!__list_add_debug(new, prev, next))
84077+ return;
84078+#endif
84079+
84080+ pax_open_kernel();
84081+ new->next = next;
84082+ new->prev = prev;
84083+ rcu_assign_pointer(list_next_rcu(prev), new);
84084+ next->prev = new;
84085+ pax_close_kernel();
84086+}
84087+EXPORT_SYMBOL(__pax_list_add_rcu);
84088+
84089+void pax_list_del_rcu(struct list_head *entry)
84090+{
84091+#ifdef CONFIG_DEBUG_LIST
84092+ if (!__list_del_entry_debug(entry))
84093+ return;
84094+#endif
84095+
84096+ pax_open_kernel();
84097+ __list_del(entry->prev, entry->next);
84098+ entry->next = LIST_POISON1;
84099+ entry->prev = LIST_POISON2;
84100+ pax_close_kernel();
84101+}
84102+EXPORT_SYMBOL(pax_list_del_rcu);
84103diff --git a/lib/radix-tree.c b/lib/radix-tree.c
84104index e796429..6e38f9f 100644
84105--- a/lib/radix-tree.c
84106+++ b/lib/radix-tree.c
84107@@ -92,7 +92,7 @@ struct radix_tree_preload {
84108 int nr;
84109 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
84110 };
84111-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
84112+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
84113
84114 static inline void *ptr_to_indirect(void *ptr)
84115 {
84116diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
84117index bb2b201..46abaf9 100644
84118--- a/lib/strncpy_from_user.c
84119+++ b/lib/strncpy_from_user.c
84120@@ -21,7 +21,7 @@
84121 */
84122 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
84123 {
84124- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
84125+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
84126 long res = 0;
84127
84128 /*
84129diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
84130index a28df52..3d55877 100644
84131--- a/lib/strnlen_user.c
84132+++ b/lib/strnlen_user.c
84133@@ -26,7 +26,7 @@
84134 */
84135 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
84136 {
84137- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
84138+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
84139 long align, res = 0;
84140 unsigned long c;
84141
84142diff --git a/lib/swiotlb.c b/lib/swiotlb.c
84143index d23762e..e21eab2 100644
84144--- a/lib/swiotlb.c
84145+++ b/lib/swiotlb.c
84146@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
84147
84148 void
84149 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
84150- dma_addr_t dev_addr)
84151+ dma_addr_t dev_addr, struct dma_attrs *attrs)
84152 {
84153 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
84154
84155diff --git a/lib/usercopy.c b/lib/usercopy.c
84156index 4f5b1dd..7cab418 100644
84157--- a/lib/usercopy.c
84158+++ b/lib/usercopy.c
84159@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
84160 WARN(1, "Buffer overflow detected!\n");
84161 }
84162 EXPORT_SYMBOL(copy_from_user_overflow);
84163+
84164+void copy_to_user_overflow(void)
84165+{
84166+ WARN(1, "Buffer overflow detected!\n");
84167+}
84168+EXPORT_SYMBOL(copy_to_user_overflow);
84169diff --git a/lib/vsprintf.c b/lib/vsprintf.c
84170index e149c64..24aa71a 100644
84171--- a/lib/vsprintf.c
84172+++ b/lib/vsprintf.c
84173@@ -16,6 +16,9 @@
84174 * - scnprintf and vscnprintf
84175 */
84176
84177+#ifdef CONFIG_GRKERNSEC_HIDESYM
84178+#define __INCLUDED_BY_HIDESYM 1
84179+#endif
84180 #include <stdarg.h>
84181 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
84182 #include <linux/types.h>
84183@@ -981,7 +984,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
84184 return number(buf, end, *(const netdev_features_t *)addr, spec);
84185 }
84186
84187+#ifdef CONFIG_GRKERNSEC_HIDESYM
84188+int kptr_restrict __read_mostly = 2;
84189+#else
84190 int kptr_restrict __read_mostly;
84191+#endif
84192
84193 /*
84194 * Show a '%p' thing. A kernel extension is that the '%p' is followed
84195@@ -994,6 +1001,7 @@ int kptr_restrict __read_mostly;
84196 * - 'f' For simple symbolic function names without offset
84197 * - 'S' For symbolic direct pointers with offset
84198 * - 's' For symbolic direct pointers without offset
84199+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
84200 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
84201 * - 'B' For backtraced symbolic direct pointers with offset
84202 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
84203@@ -1052,12 +1060,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
84204
84205 if (!ptr && *fmt != 'K') {
84206 /*
84207- * Print (null) with the same width as a pointer so it makes
84208+ * Print (nil) with the same width as a pointer so it makes
84209 * tabular output look nice.
84210 */
84211 if (spec.field_width == -1)
84212 spec.field_width = default_width;
84213- return string(buf, end, "(null)", spec);
84214+ return string(buf, end, "(nil)", spec);
84215 }
84216
84217 switch (*fmt) {
84218@@ -1067,6 +1075,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
84219 /* Fallthrough */
84220 case 'S':
84221 case 's':
84222+#ifdef CONFIG_GRKERNSEC_HIDESYM
84223+ break;
84224+#else
84225+ return symbol_string(buf, end, ptr, spec, fmt);
84226+#endif
84227+ case 'A':
84228 case 'B':
84229 return symbol_string(buf, end, ptr, spec, fmt);
84230 case 'R':
84231@@ -1107,6 +1121,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
84232 va_end(va);
84233 return buf;
84234 }
84235+ case 'P':
84236+ break;
84237 case 'K':
84238 /*
84239 * %pK cannot be used in IRQ context because its test
84240@@ -1136,6 +1152,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
84241 return number(buf, end,
84242 (unsigned long long) *((phys_addr_t *)ptr), spec);
84243 }
84244+
84245+#ifdef CONFIG_GRKERNSEC_HIDESYM
84246+ /* 'P' = approved pointers to copy to userland,
84247+ as in the /proc/kallsyms case, as we make it display nothing
84248+ for non-root users, and the real contents for root users
84249+ Also ignore 'K' pointers, since we force their NULLing for non-root users
84250+ above
84251+ */
84252+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
84253+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
84254+ dump_stack();
84255+ ptr = NULL;
84256+ }
84257+#endif
84258+
84259 spec.flags |= SMALL;
84260 if (spec.field_width == -1) {
84261 spec.field_width = default_width;
84262@@ -1857,11 +1888,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
84263 typeof(type) value; \
84264 if (sizeof(type) == 8) { \
84265 args = PTR_ALIGN(args, sizeof(u32)); \
84266- *(u32 *)&value = *(u32 *)args; \
84267- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
84268+ *(u32 *)&value = *(const u32 *)args; \
84269+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
84270 } else { \
84271 args = PTR_ALIGN(args, sizeof(type)); \
84272- value = *(typeof(type) *)args; \
84273+ value = *(const typeof(type) *)args; \
84274 } \
84275 args += sizeof(type); \
84276 value; \
84277@@ -1924,7 +1955,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
84278 case FORMAT_TYPE_STR: {
84279 const char *str_arg = args;
84280 args += strlen(str_arg) + 1;
84281- str = string(str, end, (char *)str_arg, spec);
84282+ str = string(str, end, str_arg, spec);
84283 break;
84284 }
84285
84286diff --git a/localversion-grsec b/localversion-grsec
84287new file mode 100644
84288index 0000000..7cd6065
84289--- /dev/null
84290+++ b/localversion-grsec
84291@@ -0,0 +1 @@
84292+-grsec
84293diff --git a/mm/Kconfig b/mm/Kconfig
84294index e742d06..c56fdd8 100644
84295--- a/mm/Kconfig
84296+++ b/mm/Kconfig
84297@@ -317,10 +317,10 @@ config KSM
84298 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
84299
84300 config DEFAULT_MMAP_MIN_ADDR
84301- int "Low address space to protect from user allocation"
84302+ int "Low address space to protect from user allocation"
84303 depends on MMU
84304- default 4096
84305- help
84306+ default 65536
84307+ help
84308 This is the portion of low virtual memory which should be protected
84309 from userspace allocation. Keeping a user from writing to low pages
84310 can help reduce the impact of kernel NULL pointer bugs.
84311@@ -351,7 +351,7 @@ config MEMORY_FAILURE
84312
84313 config HWPOISON_INJECT
84314 tristate "HWPoison pages injector"
84315- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
84316+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
84317 select PROC_PAGE_MONITOR
84318
84319 config NOMMU_INITIAL_TRIM_EXCESS
84320diff --git a/mm/backing-dev.c b/mm/backing-dev.c
84321index 5025174..9fc1c5c 100644
84322--- a/mm/backing-dev.c
84323+++ b/mm/backing-dev.c
84324@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
84325 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
84326 unsigned int cap)
84327 {
84328- char tmp[32];
84329 int err;
84330
84331 bdi->name = name;
84332@@ -524,8 +523,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
84333 if (err)
84334 return err;
84335
84336- sprintf(tmp, "%.28s%s", name, "-%d");
84337- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
84338+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return(&bdi_seq));
84339 if (err) {
84340 bdi_destroy(bdi);
84341 return err;
84342diff --git a/mm/filemap.c b/mm/filemap.c
84343index 7905fe7..e60faa8 100644
84344--- a/mm/filemap.c
84345+++ b/mm/filemap.c
84346@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
84347 struct address_space *mapping = file->f_mapping;
84348
84349 if (!mapping->a_ops->readpage)
84350- return -ENOEXEC;
84351+ return -ENODEV;
84352 file_accessed(file);
84353 vma->vm_ops = &generic_file_vm_ops;
84354 return 0;
84355@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
84356 *pos = i_size_read(inode);
84357
84358 if (limit != RLIM_INFINITY) {
84359+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
84360 if (*pos >= limit) {
84361 send_sig(SIGXFSZ, current, 0);
84362 return -EFBIG;
84363diff --git a/mm/fremap.c b/mm/fremap.c
84364index 87da359..3f41cb1 100644
84365--- a/mm/fremap.c
84366+++ b/mm/fremap.c
84367@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
84368 retry:
84369 vma = find_vma(mm, start);
84370
84371+#ifdef CONFIG_PAX_SEGMEXEC
84372+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
84373+ goto out;
84374+#endif
84375+
84376 /*
84377 * Make sure the vma is shared, that it supports prefaulting,
84378 * and that the remapped range is valid and fully within
84379diff --git a/mm/highmem.c b/mm/highmem.c
84380index b32b70c..e512eb0 100644
84381--- a/mm/highmem.c
84382+++ b/mm/highmem.c
84383@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
84384 * So no dangers, even with speculative execution.
84385 */
84386 page = pte_page(pkmap_page_table[i]);
84387+ pax_open_kernel();
84388 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
84389-
84390+ pax_close_kernel();
84391 set_page_address(page, NULL);
84392 need_flush = 1;
84393 }
84394@@ -198,9 +199,11 @@ start:
84395 }
84396 }
84397 vaddr = PKMAP_ADDR(last_pkmap_nr);
84398+
84399+ pax_open_kernel();
84400 set_pte_at(&init_mm, vaddr,
84401 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
84402-
84403+ pax_close_kernel();
84404 pkmap_count[last_pkmap_nr] = 1;
84405 set_page_address(page, (void *)vaddr);
84406
84407diff --git a/mm/hugetlb.c b/mm/hugetlb.c
84408index 5cf99bf..5c01c2f 100644
84409--- a/mm/hugetlb.c
84410+++ b/mm/hugetlb.c
84411@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
84412 struct hstate *h = &default_hstate;
84413 unsigned long tmp;
84414 int ret;
84415+ ctl_table_no_const hugetlb_table;
84416
84417 tmp = h->max_huge_pages;
84418
84419 if (write && h->order >= MAX_ORDER)
84420 return -EINVAL;
84421
84422- table->data = &tmp;
84423- table->maxlen = sizeof(unsigned long);
84424- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
84425+ hugetlb_table = *table;
84426+ hugetlb_table.data = &tmp;
84427+ hugetlb_table.maxlen = sizeof(unsigned long);
84428+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
84429 if (ret)
84430 goto out;
84431
84432@@ -2087,15 +2089,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
84433 struct hstate *h = &default_hstate;
84434 unsigned long tmp;
84435 int ret;
84436+ ctl_table_no_const hugetlb_table;
84437
84438 tmp = h->nr_overcommit_huge_pages;
84439
84440 if (write && h->order >= MAX_ORDER)
84441 return -EINVAL;
84442
84443- table->data = &tmp;
84444- table->maxlen = sizeof(unsigned long);
84445- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
84446+ hugetlb_table = *table;
84447+ hugetlb_table.data = &tmp;
84448+ hugetlb_table.maxlen = sizeof(unsigned long);
84449+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
84450 if (ret)
84451 goto out;
84452
84453@@ -2490,7 +2494,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
84454
84455 mm = vma->vm_mm;
84456
84457- tlb_gather_mmu(&tlb, mm, 0);
84458+ tlb_gather_mmu(&tlb, mm, start, end);
84459 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
84460 tlb_finish_mmu(&tlb, start, end);
84461 }
84462@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
84463 return 1;
84464 }
84465
84466+#ifdef CONFIG_PAX_SEGMEXEC
84467+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
84468+{
84469+ struct mm_struct *mm = vma->vm_mm;
84470+ struct vm_area_struct *vma_m;
84471+ unsigned long address_m;
84472+ pte_t *ptep_m;
84473+
84474+ vma_m = pax_find_mirror_vma(vma);
84475+ if (!vma_m)
84476+ return;
84477+
84478+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
84479+ address_m = address + SEGMEXEC_TASK_SIZE;
84480+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
84481+ get_page(page_m);
84482+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
84483+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
84484+}
84485+#endif
84486+
84487 /*
84488 * Hugetlb_cow() should be called with page lock of the original hugepage held.
84489 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
84490@@ -2663,6 +2688,11 @@ retry_avoidcopy:
84491 make_huge_pte(vma, new_page, 1));
84492 page_remove_rmap(old_page);
84493 hugepage_add_new_anon_rmap(new_page, vma, address);
84494+
84495+#ifdef CONFIG_PAX_SEGMEXEC
84496+ pax_mirror_huge_pte(vma, address, new_page);
84497+#endif
84498+
84499 /* Make the old page be freed below */
84500 new_page = old_page;
84501 }
84502@@ -2821,6 +2851,10 @@ retry:
84503 && (vma->vm_flags & VM_SHARED)));
84504 set_huge_pte_at(mm, address, ptep, new_pte);
84505
84506+#ifdef CONFIG_PAX_SEGMEXEC
84507+ pax_mirror_huge_pte(vma, address, page);
84508+#endif
84509+
84510 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
84511 /* Optimization, do the COW without a second fault */
84512 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
84513@@ -2850,6 +2884,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
84514 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
84515 struct hstate *h = hstate_vma(vma);
84516
84517+#ifdef CONFIG_PAX_SEGMEXEC
84518+ struct vm_area_struct *vma_m;
84519+#endif
84520+
84521 address &= huge_page_mask(h);
84522
84523 ptep = huge_pte_offset(mm, address);
84524@@ -2863,6 +2901,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
84525 VM_FAULT_SET_HINDEX(hstate_index(h));
84526 }
84527
84528+#ifdef CONFIG_PAX_SEGMEXEC
84529+ vma_m = pax_find_mirror_vma(vma);
84530+ if (vma_m) {
84531+ unsigned long address_m;
84532+
84533+ if (vma->vm_start > vma_m->vm_start) {
84534+ address_m = address;
84535+ address -= SEGMEXEC_TASK_SIZE;
84536+ vma = vma_m;
84537+ h = hstate_vma(vma);
84538+ } else
84539+ address_m = address + SEGMEXEC_TASK_SIZE;
84540+
84541+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
84542+ return VM_FAULT_OOM;
84543+ address_m &= HPAGE_MASK;
84544+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
84545+ }
84546+#endif
84547+
84548 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
84549 if (!ptep)
84550 return VM_FAULT_OOM;
84551diff --git a/mm/internal.h b/mm/internal.h
84552index 8562de0..92b2073 100644
84553--- a/mm/internal.h
84554+++ b/mm/internal.h
84555@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
84556 * in mm/page_alloc.c
84557 */
84558 extern void __free_pages_bootmem(struct page *page, unsigned int order);
84559+extern void free_compound_page(struct page *page);
84560 extern void prep_compound_page(struct page *page, unsigned long order);
84561 #ifdef CONFIG_MEMORY_FAILURE
84562 extern bool is_free_buddy_page(struct page *page);
84563@@ -355,7 +356,7 @@ extern u32 hwpoison_filter_enable;
84564
84565 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
84566 unsigned long, unsigned long,
84567- unsigned long, unsigned long);
84568+ unsigned long, unsigned long) __intentional_overflow(-1);
84569
84570 extern void set_pageblock_order(void);
84571 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
84572diff --git a/mm/kmemleak.c b/mm/kmemleak.c
84573index c8d7f31..2dbeffd 100644
84574--- a/mm/kmemleak.c
84575+++ b/mm/kmemleak.c
84576@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
84577
84578 for (i = 0; i < object->trace_len; i++) {
84579 void *ptr = (void *)object->trace[i];
84580- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
84581+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
84582 }
84583 }
84584
84585@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
84586 return -ENOMEM;
84587 }
84588
84589- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
84590+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
84591 &kmemleak_fops);
84592 if (!dentry)
84593 pr_warning("Failed to create the debugfs kmemleak file\n");
84594diff --git a/mm/maccess.c b/mm/maccess.c
84595index d53adf9..03a24bf 100644
84596--- a/mm/maccess.c
84597+++ b/mm/maccess.c
84598@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
84599 set_fs(KERNEL_DS);
84600 pagefault_disable();
84601 ret = __copy_from_user_inatomic(dst,
84602- (__force const void __user *)src, size);
84603+ (const void __force_user *)src, size);
84604 pagefault_enable();
84605 set_fs(old_fs);
84606
84607@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
84608
84609 set_fs(KERNEL_DS);
84610 pagefault_disable();
84611- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
84612+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
84613 pagefault_enable();
84614 set_fs(old_fs);
84615
84616diff --git a/mm/madvise.c b/mm/madvise.c
84617index 7055883..aafb1ed 100644
84618--- a/mm/madvise.c
84619+++ b/mm/madvise.c
84620@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
84621 pgoff_t pgoff;
84622 unsigned long new_flags = vma->vm_flags;
84623
84624+#ifdef CONFIG_PAX_SEGMEXEC
84625+ struct vm_area_struct *vma_m;
84626+#endif
84627+
84628 switch (behavior) {
84629 case MADV_NORMAL:
84630 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
84631@@ -126,6 +130,13 @@ success:
84632 /*
84633 * vm_flags is protected by the mmap_sem held in write mode.
84634 */
84635+
84636+#ifdef CONFIG_PAX_SEGMEXEC
84637+ vma_m = pax_find_mirror_vma(vma);
84638+ if (vma_m)
84639+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
84640+#endif
84641+
84642 vma->vm_flags = new_flags;
84643
84644 out:
84645@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
84646 struct vm_area_struct ** prev,
84647 unsigned long start, unsigned long end)
84648 {
84649+
84650+#ifdef CONFIG_PAX_SEGMEXEC
84651+ struct vm_area_struct *vma_m;
84652+#endif
84653+
84654 *prev = vma;
84655 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
84656 return -EINVAL;
84657@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
84658 zap_page_range(vma, start, end - start, &details);
84659 } else
84660 zap_page_range(vma, start, end - start, NULL);
84661+
84662+#ifdef CONFIG_PAX_SEGMEXEC
84663+ vma_m = pax_find_mirror_vma(vma);
84664+ if (vma_m) {
84665+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
84666+ struct zap_details details = {
84667+ .nonlinear_vma = vma_m,
84668+ .last_index = ULONG_MAX,
84669+ };
84670+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
84671+ } else
84672+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
84673+ }
84674+#endif
84675+
84676 return 0;
84677 }
84678
84679@@ -485,6 +516,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
84680 if (end < start)
84681 return error;
84682
84683+#ifdef CONFIG_PAX_SEGMEXEC
84684+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
84685+ if (end > SEGMEXEC_TASK_SIZE)
84686+ return error;
84687+ } else
84688+#endif
84689+
84690+ if (end > TASK_SIZE)
84691+ return error;
84692+
84693 error = 0;
84694 if (end == start)
84695 return error;
84696diff --git a/mm/memory-failure.c b/mm/memory-failure.c
84697index ceb0c7f..b2b8e94 100644
84698--- a/mm/memory-failure.c
84699+++ b/mm/memory-failure.c
84700@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
84701
84702 int sysctl_memory_failure_recovery __read_mostly = 1;
84703
84704-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
84705+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
84706
84707 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
84708
84709@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
84710 pfn, t->comm, t->pid);
84711 si.si_signo = SIGBUS;
84712 si.si_errno = 0;
84713- si.si_addr = (void *)addr;
84714+ si.si_addr = (void __user *)addr;
84715 #ifdef __ARCH_SI_TRAPNO
84716 si.si_trapno = trapno;
84717 #endif
84718@@ -760,7 +760,7 @@ static struct page_state {
84719 unsigned long res;
84720 char *msg;
84721 int (*action)(struct page *p, unsigned long pfn);
84722-} error_states[] = {
84723+} __do_const error_states[] = {
84724 { reserved, reserved, "reserved kernel", me_kernel },
84725 /*
84726 * free pages are specially detected outside this table:
84727@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
84728 nr_pages = 1 << compound_order(hpage);
84729 else /* normal page or thp */
84730 nr_pages = 1;
84731- atomic_long_add(nr_pages, &num_poisoned_pages);
84732+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
84733
84734 /*
84735 * We need/can do nothing about count=0 pages.
84736@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
84737 if (!PageHWPoison(hpage)
84738 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
84739 || (p != hpage && TestSetPageHWPoison(hpage))) {
84740- atomic_long_sub(nr_pages, &num_poisoned_pages);
84741+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
84742 return 0;
84743 }
84744 set_page_hwpoison_huge_page(hpage);
84745@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
84746 }
84747 if (hwpoison_filter(p)) {
84748 if (TestClearPageHWPoison(p))
84749- atomic_long_sub(nr_pages, &num_poisoned_pages);
84750+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
84751 unlock_page(hpage);
84752 put_page(hpage);
84753 return 0;
84754@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
84755 return 0;
84756 }
84757 if (TestClearPageHWPoison(p))
84758- atomic_long_sub(nr_pages, &num_poisoned_pages);
84759+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
84760 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
84761 return 0;
84762 }
84763@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
84764 */
84765 if (TestClearPageHWPoison(page)) {
84766 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
84767- atomic_long_sub(nr_pages, &num_poisoned_pages);
84768+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
84769 freeit = 1;
84770 if (PageHuge(page))
84771 clear_page_hwpoison_huge_page(page);
84772@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
84773 } else {
84774 set_page_hwpoison_huge_page(hpage);
84775 dequeue_hwpoisoned_huge_page(hpage);
84776- atomic_long_add(1 << compound_trans_order(hpage),
84777+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
84778 &num_poisoned_pages);
84779 }
84780 /* keep elevated page count for bad page */
84781@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
84782 if (PageHuge(page)) {
84783 set_page_hwpoison_huge_page(hpage);
84784 dequeue_hwpoisoned_huge_page(hpage);
84785- atomic_long_add(1 << compound_trans_order(hpage),
84786+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
84787 &num_poisoned_pages);
84788 } else {
84789 SetPageHWPoison(page);
84790- atomic_long_inc(&num_poisoned_pages);
84791+ atomic_long_inc_unchecked(&num_poisoned_pages);
84792 }
84793 }
84794 /* keep elevated page count for bad page */
84795@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
84796 put_page(page);
84797 pr_info("soft_offline: %#lx: invalidated\n", pfn);
84798 SetPageHWPoison(page);
84799- atomic_long_inc(&num_poisoned_pages);
84800+ atomic_long_inc_unchecked(&num_poisoned_pages);
84801 return 0;
84802 }
84803
84804@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
84805 ret = -EIO;
84806 } else {
84807 SetPageHWPoison(page);
84808- atomic_long_inc(&num_poisoned_pages);
84809+ atomic_long_inc_unchecked(&num_poisoned_pages);
84810 }
84811 } else {
84812 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
84813diff --git a/mm/memory.c b/mm/memory.c
84814index 5e50800..7c0340f 100644
84815--- a/mm/memory.c
84816+++ b/mm/memory.c
84817@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
84818 * tear-down from @mm. The @fullmm argument is used when @mm is without
84819 * users and we're going to destroy the full address space (exit/execve).
84820 */
84821-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
84822+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
84823 {
84824 tlb->mm = mm;
84825
84826- tlb->fullmm = fullmm;
84827+ /* Is it from 0 to ~0? */
84828+ tlb->fullmm = !(start | (end+1));
84829 tlb->need_flush_all = 0;
84830- tlb->start = -1UL;
84831- tlb->end = 0;
84832+ tlb->start = start;
84833+ tlb->end = end;
84834 tlb->need_flush = 0;
84835 tlb->local.next = NULL;
84836 tlb->local.nr = 0;
84837@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
84838 {
84839 struct mmu_gather_batch *batch, *next;
84840
84841- tlb->start = start;
84842- tlb->end = end;
84843 tlb_flush_mmu(tlb);
84844
84845 /* keep the page table cache within bounds */
84846@@ -429,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
84847 free_pte_range(tlb, pmd, addr);
84848 } while (pmd++, addr = next, addr != end);
84849
84850+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
84851 start &= PUD_MASK;
84852 if (start < floor)
84853 return;
84854@@ -443,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
84855 pmd = pmd_offset(pud, start);
84856 pud_clear(pud);
84857 pmd_free_tlb(tlb, pmd, start);
84858+#endif
84859+
84860 }
84861
84862 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
84863@@ -462,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
84864 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
84865 } while (pud++, addr = next, addr != end);
84866
84867+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
84868 start &= PGDIR_MASK;
84869 if (start < floor)
84870 return;
84871@@ -476,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
84872 pud = pud_offset(pgd, start);
84873 pgd_clear(pgd);
84874 pud_free_tlb(tlb, pud, start);
84875+#endif
84876+
84877 }
84878
84879 /*
84880@@ -1101,7 +1106,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
84881 spinlock_t *ptl;
84882 pte_t *start_pte;
84883 pte_t *pte;
84884- unsigned long range_start = addr;
84885
84886 again:
84887 init_rss_vec(rss);
84888@@ -1204,17 +1208,25 @@ again:
84889 * and page-free while holding it.
84890 */
84891 if (force_flush) {
84892+ unsigned long old_end;
84893+
84894 force_flush = 0;
84895
84896-#ifdef HAVE_GENERIC_MMU_GATHER
84897- tlb->start = range_start;
84898+ /*
84899+ * Flush the TLB just for the previous segment,
84900+ * then update the range to be the remaining
84901+ * TLB range.
84902+ */
84903+ old_end = tlb->end;
84904 tlb->end = addr;
84905-#endif
84906+
84907 tlb_flush_mmu(tlb);
84908- if (addr != end) {
84909- range_start = addr;
84910+
84911+ tlb->start = addr;
84912+ tlb->end = old_end;
84913+
84914+ if (addr != end)
84915 goto again;
84916- }
84917 }
84918
84919 return addr;
84920@@ -1399,7 +1411,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
84921 unsigned long end = start + size;
84922
84923 lru_add_drain();
84924- tlb_gather_mmu(&tlb, mm, 0);
84925+ tlb_gather_mmu(&tlb, mm, start, end);
84926 update_hiwater_rss(mm);
84927 mmu_notifier_invalidate_range_start(mm, start, end);
84928 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
84929@@ -1425,7 +1437,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
84930 unsigned long end = address + size;
84931
84932 lru_add_drain();
84933- tlb_gather_mmu(&tlb, mm, 0);
84934+ tlb_gather_mmu(&tlb, mm, address, end);
84935 update_hiwater_rss(mm);
84936 mmu_notifier_invalidate_range_start(mm, address, end);
84937 unmap_single_vma(&tlb, vma, address, end, details);
84938@@ -1638,12 +1650,6 @@ no_page_table:
84939 return page;
84940 }
84941
84942-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
84943-{
84944- return stack_guard_page_start(vma, addr) ||
84945- stack_guard_page_end(vma, addr+PAGE_SIZE);
84946-}
84947-
84948 /**
84949 * __get_user_pages() - pin user pages in memory
84950 * @tsk: task_struct of target task
84951@@ -1730,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84952
84953 i = 0;
84954
84955- do {
84956+ while (nr_pages) {
84957 struct vm_area_struct *vma;
84958
84959- vma = find_extend_vma(mm, start);
84960+ vma = find_vma(mm, start);
84961 if (!vma && in_gate_area(mm, start)) {
84962 unsigned long pg = start & PAGE_MASK;
84963 pgd_t *pgd;
84964@@ -1782,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84965 goto next_page;
84966 }
84967
84968- if (!vma ||
84969+ if (!vma || start < vma->vm_start ||
84970 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
84971 !(vm_flags & vma->vm_flags))
84972 return i ? : -EFAULT;
84973@@ -1811,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84974 int ret;
84975 unsigned int fault_flags = 0;
84976
84977- /* For mlock, just skip the stack guard page. */
84978- if (foll_flags & FOLL_MLOCK) {
84979- if (stack_guard_page(vma, start))
84980- goto next_page;
84981- }
84982 if (foll_flags & FOLL_WRITE)
84983 fault_flags |= FAULT_FLAG_WRITE;
84984 if (nonblocking)
84985@@ -1895,7 +1896,7 @@ next_page:
84986 start += page_increm * PAGE_SIZE;
84987 nr_pages -= page_increm;
84988 } while (nr_pages && start < vma->vm_end);
84989- } while (nr_pages);
84990+ }
84991 return i;
84992 }
84993 EXPORT_SYMBOL(__get_user_pages);
84994@@ -2102,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
84995 page_add_file_rmap(page);
84996 set_pte_at(mm, addr, pte, mk_pte(page, prot));
84997
84998+#ifdef CONFIG_PAX_SEGMEXEC
84999+ pax_mirror_file_pte(vma, addr, page, ptl);
85000+#endif
85001+
85002 retval = 0;
85003 pte_unmap_unlock(pte, ptl);
85004 return retval;
85005@@ -2146,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
85006 if (!page_count(page))
85007 return -EINVAL;
85008 if (!(vma->vm_flags & VM_MIXEDMAP)) {
85009+
85010+#ifdef CONFIG_PAX_SEGMEXEC
85011+ struct vm_area_struct *vma_m;
85012+#endif
85013+
85014 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
85015 BUG_ON(vma->vm_flags & VM_PFNMAP);
85016 vma->vm_flags |= VM_MIXEDMAP;
85017+
85018+#ifdef CONFIG_PAX_SEGMEXEC
85019+ vma_m = pax_find_mirror_vma(vma);
85020+ if (vma_m)
85021+ vma_m->vm_flags |= VM_MIXEDMAP;
85022+#endif
85023+
85024 }
85025 return insert_page(vma, addr, page, vma->vm_page_prot);
85026 }
85027@@ -2231,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
85028 unsigned long pfn)
85029 {
85030 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
85031+ BUG_ON(vma->vm_mirror);
85032
85033 if (addr < vma->vm_start || addr >= vma->vm_end)
85034 return -EFAULT;
85035@@ -2478,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
85036
85037 BUG_ON(pud_huge(*pud));
85038
85039- pmd = pmd_alloc(mm, pud, addr);
85040+ pmd = (mm == &init_mm) ?
85041+ pmd_alloc_kernel(mm, pud, addr) :
85042+ pmd_alloc(mm, pud, addr);
85043 if (!pmd)
85044 return -ENOMEM;
85045 do {
85046@@ -2498,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
85047 unsigned long next;
85048 int err;
85049
85050- pud = pud_alloc(mm, pgd, addr);
85051+ pud = (mm == &init_mm) ?
85052+ pud_alloc_kernel(mm, pgd, addr) :
85053+ pud_alloc(mm, pgd, addr);
85054 if (!pud)
85055 return -ENOMEM;
85056 do {
85057@@ -2586,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
85058 copy_user_highpage(dst, src, va, vma);
85059 }
85060
85061+#ifdef CONFIG_PAX_SEGMEXEC
85062+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
85063+{
85064+ struct mm_struct *mm = vma->vm_mm;
85065+ spinlock_t *ptl;
85066+ pte_t *pte, entry;
85067+
85068+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
85069+ entry = *pte;
85070+ if (!pte_present(entry)) {
85071+ if (!pte_none(entry)) {
85072+ BUG_ON(pte_file(entry));
85073+ free_swap_and_cache(pte_to_swp_entry(entry));
85074+ pte_clear_not_present_full(mm, address, pte, 0);
85075+ }
85076+ } else {
85077+ struct page *page;
85078+
85079+ flush_cache_page(vma, address, pte_pfn(entry));
85080+ entry = ptep_clear_flush(vma, address, pte);
85081+ BUG_ON(pte_dirty(entry));
85082+ page = vm_normal_page(vma, address, entry);
85083+ if (page) {
85084+ update_hiwater_rss(mm);
85085+ if (PageAnon(page))
85086+ dec_mm_counter_fast(mm, MM_ANONPAGES);
85087+ else
85088+ dec_mm_counter_fast(mm, MM_FILEPAGES);
85089+ page_remove_rmap(page);
85090+ page_cache_release(page);
85091+ }
85092+ }
85093+ pte_unmap_unlock(pte, ptl);
85094+}
85095+
85096+/* PaX: if vma is mirrored, synchronize the mirror's PTE
85097+ *
85098+ * the ptl of the lower mapped page is held on entry and is not released on exit
85099+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
85100+ */
85101+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
85102+{
85103+ struct mm_struct *mm = vma->vm_mm;
85104+ unsigned long address_m;
85105+ spinlock_t *ptl_m;
85106+ struct vm_area_struct *vma_m;
85107+ pmd_t *pmd_m;
85108+ pte_t *pte_m, entry_m;
85109+
85110+ BUG_ON(!page_m || !PageAnon(page_m));
85111+
85112+ vma_m = pax_find_mirror_vma(vma);
85113+ if (!vma_m)
85114+ return;
85115+
85116+ BUG_ON(!PageLocked(page_m));
85117+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
85118+ address_m = address + SEGMEXEC_TASK_SIZE;
85119+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
85120+ pte_m = pte_offset_map(pmd_m, address_m);
85121+ ptl_m = pte_lockptr(mm, pmd_m);
85122+ if (ptl != ptl_m) {
85123+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
85124+ if (!pte_none(*pte_m))
85125+ goto out;
85126+ }
85127+
85128+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
85129+ page_cache_get(page_m);
85130+ page_add_anon_rmap(page_m, vma_m, address_m);
85131+ inc_mm_counter_fast(mm, MM_ANONPAGES);
85132+ set_pte_at(mm, address_m, pte_m, entry_m);
85133+ update_mmu_cache(vma_m, address_m, pte_m);
85134+out:
85135+ if (ptl != ptl_m)
85136+ spin_unlock(ptl_m);
85137+ pte_unmap(pte_m);
85138+ unlock_page(page_m);
85139+}
85140+
85141+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
85142+{
85143+ struct mm_struct *mm = vma->vm_mm;
85144+ unsigned long address_m;
85145+ spinlock_t *ptl_m;
85146+ struct vm_area_struct *vma_m;
85147+ pmd_t *pmd_m;
85148+ pte_t *pte_m, entry_m;
85149+
85150+ BUG_ON(!page_m || PageAnon(page_m));
85151+
85152+ vma_m = pax_find_mirror_vma(vma);
85153+ if (!vma_m)
85154+ return;
85155+
85156+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
85157+ address_m = address + SEGMEXEC_TASK_SIZE;
85158+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
85159+ pte_m = pte_offset_map(pmd_m, address_m);
85160+ ptl_m = pte_lockptr(mm, pmd_m);
85161+ if (ptl != ptl_m) {
85162+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
85163+ if (!pte_none(*pte_m))
85164+ goto out;
85165+ }
85166+
85167+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
85168+ page_cache_get(page_m);
85169+ page_add_file_rmap(page_m);
85170+ inc_mm_counter_fast(mm, MM_FILEPAGES);
85171+ set_pte_at(mm, address_m, pte_m, entry_m);
85172+ update_mmu_cache(vma_m, address_m, pte_m);
85173+out:
85174+ if (ptl != ptl_m)
85175+ spin_unlock(ptl_m);
85176+ pte_unmap(pte_m);
85177+}
85178+
85179+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
85180+{
85181+ struct mm_struct *mm = vma->vm_mm;
85182+ unsigned long address_m;
85183+ spinlock_t *ptl_m;
85184+ struct vm_area_struct *vma_m;
85185+ pmd_t *pmd_m;
85186+ pte_t *pte_m, entry_m;
85187+
85188+ vma_m = pax_find_mirror_vma(vma);
85189+ if (!vma_m)
85190+ return;
85191+
85192+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
85193+ address_m = address + SEGMEXEC_TASK_SIZE;
85194+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
85195+ pte_m = pte_offset_map(pmd_m, address_m);
85196+ ptl_m = pte_lockptr(mm, pmd_m);
85197+ if (ptl != ptl_m) {
85198+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
85199+ if (!pte_none(*pte_m))
85200+ goto out;
85201+ }
85202+
85203+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
85204+ set_pte_at(mm, address_m, pte_m, entry_m);
85205+out:
85206+ if (ptl != ptl_m)
85207+ spin_unlock(ptl_m);
85208+ pte_unmap(pte_m);
85209+}
85210+
85211+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
85212+{
85213+ struct page *page_m;
85214+ pte_t entry;
85215+
85216+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
85217+ goto out;
85218+
85219+ entry = *pte;
85220+ page_m = vm_normal_page(vma, address, entry);
85221+ if (!page_m)
85222+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
85223+ else if (PageAnon(page_m)) {
85224+ if (pax_find_mirror_vma(vma)) {
85225+ pte_unmap_unlock(pte, ptl);
85226+ lock_page(page_m);
85227+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
85228+ if (pte_same(entry, *pte))
85229+ pax_mirror_anon_pte(vma, address, page_m, ptl);
85230+ else
85231+ unlock_page(page_m);
85232+ }
85233+ } else
85234+ pax_mirror_file_pte(vma, address, page_m, ptl);
85235+
85236+out:
85237+ pte_unmap_unlock(pte, ptl);
85238+}
85239+#endif
85240+
85241 /*
85242 * This routine handles present pages, when users try to write
85243 * to a shared page. It is done by copying the page to a new address
85244@@ -2802,6 +3004,12 @@ gotten:
85245 */
85246 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
85247 if (likely(pte_same(*page_table, orig_pte))) {
85248+
85249+#ifdef CONFIG_PAX_SEGMEXEC
85250+ if (pax_find_mirror_vma(vma))
85251+ BUG_ON(!trylock_page(new_page));
85252+#endif
85253+
85254 if (old_page) {
85255 if (!PageAnon(old_page)) {
85256 dec_mm_counter_fast(mm, MM_FILEPAGES);
85257@@ -2853,6 +3061,10 @@ gotten:
85258 page_remove_rmap(old_page);
85259 }
85260
85261+#ifdef CONFIG_PAX_SEGMEXEC
85262+ pax_mirror_anon_pte(vma, address, new_page, ptl);
85263+#endif
85264+
85265 /* Free the old page.. */
85266 new_page = old_page;
85267 ret |= VM_FAULT_WRITE;
85268@@ -3128,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
85269 swap_free(entry);
85270 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
85271 try_to_free_swap(page);
85272+
85273+#ifdef CONFIG_PAX_SEGMEXEC
85274+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
85275+#endif
85276+
85277 unlock_page(page);
85278 if (page != swapcache) {
85279 /*
85280@@ -3151,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
85281
85282 /* No need to invalidate - it was non-present before */
85283 update_mmu_cache(vma, address, page_table);
85284+
85285+#ifdef CONFIG_PAX_SEGMEXEC
85286+ pax_mirror_anon_pte(vma, address, page, ptl);
85287+#endif
85288+
85289 unlock:
85290 pte_unmap_unlock(page_table, ptl);
85291 out:
85292@@ -3170,40 +3392,6 @@ out_release:
85293 }
85294
85295 /*
85296- * This is like a special single-page "expand_{down|up}wards()",
85297- * except we must first make sure that 'address{-|+}PAGE_SIZE'
85298- * doesn't hit another vma.
85299- */
85300-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
85301-{
85302- address &= PAGE_MASK;
85303- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
85304- struct vm_area_struct *prev = vma->vm_prev;
85305-
85306- /*
85307- * Is there a mapping abutting this one below?
85308- *
85309- * That's only ok if it's the same stack mapping
85310- * that has gotten split..
85311- */
85312- if (prev && prev->vm_end == address)
85313- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
85314-
85315- expand_downwards(vma, address - PAGE_SIZE);
85316- }
85317- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
85318- struct vm_area_struct *next = vma->vm_next;
85319-
85320- /* As VM_GROWSDOWN but s/below/above/ */
85321- if (next && next->vm_start == address + PAGE_SIZE)
85322- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
85323-
85324- expand_upwards(vma, address + PAGE_SIZE);
85325- }
85326- return 0;
85327-}
85328-
85329-/*
85330 * We enter with non-exclusive mmap_sem (to exclude vma changes,
85331 * but allow concurrent faults), and pte mapped but not yet locked.
85332 * We return with mmap_sem still held, but pte unmapped and unlocked.
85333@@ -3212,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
85334 unsigned long address, pte_t *page_table, pmd_t *pmd,
85335 unsigned int flags)
85336 {
85337- struct page *page;
85338+ struct page *page = NULL;
85339 spinlock_t *ptl;
85340 pte_t entry;
85341
85342- pte_unmap(page_table);
85343-
85344- /* Check if we need to add a guard page to the stack */
85345- if (check_stack_guard_page(vma, address) < 0)
85346- return VM_FAULT_SIGBUS;
85347-
85348- /* Use the zero-page for reads */
85349 if (!(flags & FAULT_FLAG_WRITE)) {
85350 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
85351 vma->vm_page_prot));
85352- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
85353+ ptl = pte_lockptr(mm, pmd);
85354+ spin_lock(ptl);
85355 if (!pte_none(*page_table))
85356 goto unlock;
85357 goto setpte;
85358 }
85359
85360 /* Allocate our own private page. */
85361+ pte_unmap(page_table);
85362+
85363 if (unlikely(anon_vma_prepare(vma)))
85364 goto oom;
85365 page = alloc_zeroed_user_highpage_movable(vma, address);
85366@@ -3256,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
85367 if (!pte_none(*page_table))
85368 goto release;
85369
85370+#ifdef CONFIG_PAX_SEGMEXEC
85371+ if (pax_find_mirror_vma(vma))
85372+ BUG_ON(!trylock_page(page));
85373+#endif
85374+
85375 inc_mm_counter_fast(mm, MM_ANONPAGES);
85376 page_add_new_anon_rmap(page, vma, address);
85377 setpte:
85378@@ -3263,6 +3452,12 @@ setpte:
85379
85380 /* No need to invalidate - it was non-present before */
85381 update_mmu_cache(vma, address, page_table);
85382+
85383+#ifdef CONFIG_PAX_SEGMEXEC
85384+ if (page)
85385+ pax_mirror_anon_pte(vma, address, page, ptl);
85386+#endif
85387+
85388 unlock:
85389 pte_unmap_unlock(page_table, ptl);
85390 return 0;
85391@@ -3406,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85392 */
85393 /* Only go through if we didn't race with anybody else... */
85394 if (likely(pte_same(*page_table, orig_pte))) {
85395+
85396+#ifdef CONFIG_PAX_SEGMEXEC
85397+ if (anon && pax_find_mirror_vma(vma))
85398+ BUG_ON(!trylock_page(page));
85399+#endif
85400+
85401 flush_icache_page(vma, page);
85402 entry = mk_pte(page, vma->vm_page_prot);
85403 if (flags & FAULT_FLAG_WRITE)
85404@@ -3425,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85405
85406 /* no need to invalidate: a not-present page won't be cached */
85407 update_mmu_cache(vma, address, page_table);
85408+
85409+#ifdef CONFIG_PAX_SEGMEXEC
85410+ if (anon)
85411+ pax_mirror_anon_pte(vma, address, page, ptl);
85412+ else
85413+ pax_mirror_file_pte(vma, address, page, ptl);
85414+#endif
85415+
85416 } else {
85417 if (cow_page)
85418 mem_cgroup_uncharge_page(cow_page);
85419@@ -3746,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
85420 if (flags & FAULT_FLAG_WRITE)
85421 flush_tlb_fix_spurious_fault(vma, address);
85422 }
85423+
85424+#ifdef CONFIG_PAX_SEGMEXEC
85425+ pax_mirror_pte(vma, address, pte, pmd, ptl);
85426+ return 0;
85427+#endif
85428+
85429 unlock:
85430 pte_unmap_unlock(pte, ptl);
85431 return 0;
85432@@ -3762,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85433 pmd_t *pmd;
85434 pte_t *pte;
85435
85436+#ifdef CONFIG_PAX_SEGMEXEC
85437+ struct vm_area_struct *vma_m;
85438+#endif
85439+
85440 __set_current_state(TASK_RUNNING);
85441
85442 count_vm_event(PGFAULT);
85443@@ -3773,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
85444 if (unlikely(is_vm_hugetlb_page(vma)))
85445 return hugetlb_fault(mm, vma, address, flags);
85446
85447+#ifdef CONFIG_PAX_SEGMEXEC
85448+ vma_m = pax_find_mirror_vma(vma);
85449+ if (vma_m) {
85450+ unsigned long address_m;
85451+ pgd_t *pgd_m;
85452+ pud_t *pud_m;
85453+ pmd_t *pmd_m;
85454+
85455+ if (vma->vm_start > vma_m->vm_start) {
85456+ address_m = address;
85457+ address -= SEGMEXEC_TASK_SIZE;
85458+ vma = vma_m;
85459+ } else
85460+ address_m = address + SEGMEXEC_TASK_SIZE;
85461+
85462+ pgd_m = pgd_offset(mm, address_m);
85463+ pud_m = pud_alloc(mm, pgd_m, address_m);
85464+ if (!pud_m)
85465+ return VM_FAULT_OOM;
85466+ pmd_m = pmd_alloc(mm, pud_m, address_m);
85467+ if (!pmd_m)
85468+ return VM_FAULT_OOM;
85469+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
85470+ return VM_FAULT_OOM;
85471+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
85472+ }
85473+#endif
85474+
85475 retry:
85476 pgd = pgd_offset(mm, address);
85477 pud = pud_alloc(mm, pgd, address);
85478@@ -3871,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
85479 spin_unlock(&mm->page_table_lock);
85480 return 0;
85481 }
85482+
85483+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
85484+{
85485+ pud_t *new = pud_alloc_one(mm, address);
85486+ if (!new)
85487+ return -ENOMEM;
85488+
85489+ smp_wmb(); /* See comment in __pte_alloc */
85490+
85491+ spin_lock(&mm->page_table_lock);
85492+ if (pgd_present(*pgd)) /* Another has populated it */
85493+ pud_free(mm, new);
85494+ else
85495+ pgd_populate_kernel(mm, pgd, new);
85496+ spin_unlock(&mm->page_table_lock);
85497+ return 0;
85498+}
85499 #endif /* __PAGETABLE_PUD_FOLDED */
85500
85501 #ifndef __PAGETABLE_PMD_FOLDED
85502@@ -3901,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
85503 spin_unlock(&mm->page_table_lock);
85504 return 0;
85505 }
85506+
85507+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
85508+{
85509+ pmd_t *new = pmd_alloc_one(mm, address);
85510+ if (!new)
85511+ return -ENOMEM;
85512+
85513+ smp_wmb(); /* See comment in __pte_alloc */
85514+
85515+ spin_lock(&mm->page_table_lock);
85516+#ifndef __ARCH_HAS_4LEVEL_HACK
85517+ if (pud_present(*pud)) /* Another has populated it */
85518+ pmd_free(mm, new);
85519+ else
85520+ pud_populate_kernel(mm, pud, new);
85521+#else
85522+ if (pgd_present(*pud)) /* Another has populated it */
85523+ pmd_free(mm, new);
85524+ else
85525+ pgd_populate_kernel(mm, pud, new);
85526+#endif /* __ARCH_HAS_4LEVEL_HACK */
85527+ spin_unlock(&mm->page_table_lock);
85528+ return 0;
85529+}
85530 #endif /* __PAGETABLE_PMD_FOLDED */
85531
85532 #if !defined(__HAVE_ARCH_GATE_AREA)
85533@@ -3914,7 +4202,7 @@ static int __init gate_vma_init(void)
85534 gate_vma.vm_start = FIXADDR_USER_START;
85535 gate_vma.vm_end = FIXADDR_USER_END;
85536 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
85537- gate_vma.vm_page_prot = __P101;
85538+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
85539
85540 return 0;
85541 }
85542@@ -4048,8 +4336,8 @@ out:
85543 return ret;
85544 }
85545
85546-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85547- void *buf, int len, int write)
85548+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85549+ void *buf, size_t len, int write)
85550 {
85551 resource_size_t phys_addr;
85552 unsigned long prot = 0;
85553@@ -4074,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
85554 * Access another process' address space as given in mm. If non-NULL, use the
85555 * given task for page fault accounting.
85556 */
85557-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
85558- unsigned long addr, void *buf, int len, int write)
85559+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
85560+ unsigned long addr, void *buf, size_t len, int write)
85561 {
85562 struct vm_area_struct *vma;
85563 void *old_buf = buf;
85564@@ -4083,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
85565 down_read(&mm->mmap_sem);
85566 /* ignore errors, just check how much was successfully transferred */
85567 while (len) {
85568- int bytes, ret, offset;
85569+ ssize_t bytes, ret, offset;
85570 void *maddr;
85571 struct page *page = NULL;
85572
85573@@ -4142,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
85574 *
85575 * The caller must hold a reference on @mm.
85576 */
85577-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
85578- void *buf, int len, int write)
85579+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
85580+ void *buf, size_t len, int write)
85581 {
85582 return __access_remote_vm(NULL, mm, addr, buf, len, write);
85583 }
85584@@ -4153,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
85585 * Source/target buffer must be kernel space,
85586 * Do not walk the page table directly, use get_user_pages
85587 */
85588-int access_process_vm(struct task_struct *tsk, unsigned long addr,
85589- void *buf, int len, int write)
85590+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
85591+ void *buf, size_t len, int write)
85592 {
85593 struct mm_struct *mm;
85594- int ret;
85595+ ssize_t ret;
85596
85597 mm = get_task_mm(tsk);
85598 if (!mm)
85599diff --git a/mm/mempolicy.c b/mm/mempolicy.c
85600index 4baf12e..5497066 100644
85601--- a/mm/mempolicy.c
85602+++ b/mm/mempolicy.c
85603@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
85604 unsigned long vmstart;
85605 unsigned long vmend;
85606
85607+#ifdef CONFIG_PAX_SEGMEXEC
85608+ struct vm_area_struct *vma_m;
85609+#endif
85610+
85611 vma = find_vma(mm, start);
85612 if (!vma || vma->vm_start > start)
85613 return -EFAULT;
85614@@ -751,6 +755,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
85615 err = vma_replace_policy(vma, new_pol);
85616 if (err)
85617 goto out;
85618+
85619+#ifdef CONFIG_PAX_SEGMEXEC
85620+ vma_m = pax_find_mirror_vma(vma);
85621+ if (vma_m) {
85622+ err = vma_replace_policy(vma_m, new_pol);
85623+ if (err)
85624+ goto out;
85625+ }
85626+#endif
85627+
85628 }
85629
85630 out:
85631@@ -1206,6 +1220,17 @@ static long do_mbind(unsigned long start, unsigned long len,
85632
85633 if (end < start)
85634 return -EINVAL;
85635+
85636+#ifdef CONFIG_PAX_SEGMEXEC
85637+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
85638+ if (end > SEGMEXEC_TASK_SIZE)
85639+ return -EINVAL;
85640+ } else
85641+#endif
85642+
85643+ if (end > TASK_SIZE)
85644+ return -EINVAL;
85645+
85646 if (end == start)
85647 return 0;
85648
85649@@ -1434,8 +1459,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
85650 */
85651 tcred = __task_cred(task);
85652 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
85653- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
85654- !capable(CAP_SYS_NICE)) {
85655+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
85656 rcu_read_unlock();
85657 err = -EPERM;
85658 goto out_put;
85659@@ -1466,6 +1490,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
85660 goto out;
85661 }
85662
85663+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85664+ if (mm != current->mm &&
85665+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
85666+ mmput(mm);
85667+ err = -EPERM;
85668+ goto out;
85669+ }
85670+#endif
85671+
85672 err = do_migrate_pages(mm, old, new,
85673 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
85674
85675diff --git a/mm/migrate.c b/mm/migrate.c
85676index 6f0c244..6d1ae32 100644
85677--- a/mm/migrate.c
85678+++ b/mm/migrate.c
85679@@ -1399,8 +1399,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
85680 */
85681 tcred = __task_cred(task);
85682 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
85683- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
85684- !capable(CAP_SYS_NICE)) {
85685+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
85686 rcu_read_unlock();
85687 err = -EPERM;
85688 goto out;
85689diff --git a/mm/mlock.c b/mm/mlock.c
85690index 79b7cf7..9944291 100644
85691--- a/mm/mlock.c
85692+++ b/mm/mlock.c
85693@@ -13,6 +13,7 @@
85694 #include <linux/pagemap.h>
85695 #include <linux/mempolicy.h>
85696 #include <linux/syscalls.h>
85697+#include <linux/security.h>
85698 #include <linux/sched.h>
85699 #include <linux/export.h>
85700 #include <linux/rmap.h>
85701@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
85702 {
85703 unsigned long nstart, end, tmp;
85704 struct vm_area_struct * vma, * prev;
85705- int error;
85706+ int error = 0;
85707
85708 VM_BUG_ON(start & ~PAGE_MASK);
85709 VM_BUG_ON(len != PAGE_ALIGN(len));
85710@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
85711 return -EINVAL;
85712 if (end == start)
85713 return 0;
85714+ if (end > TASK_SIZE)
85715+ return -EINVAL;
85716+
85717 vma = find_vma(current->mm, start);
85718 if (!vma || vma->vm_start > start)
85719 return -ENOMEM;
85720@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
85721 for (nstart = start ; ; ) {
85722 vm_flags_t newflags;
85723
85724+#ifdef CONFIG_PAX_SEGMEXEC
85725+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
85726+ break;
85727+#endif
85728+
85729 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
85730
85731 newflags = vma->vm_flags & ~VM_LOCKED;
85732@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
85733 lock_limit >>= PAGE_SHIFT;
85734
85735 /* check against resource limits */
85736+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
85737 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
85738 error = do_mlock(start, len, 1);
85739 up_write(&current->mm->mmap_sem);
85740@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
85741 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
85742 vm_flags_t newflags;
85743
85744+#ifdef CONFIG_PAX_SEGMEXEC
85745+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
85746+ break;
85747+#endif
85748+
85749 newflags = vma->vm_flags & ~VM_LOCKED;
85750 if (flags & MCL_CURRENT)
85751 newflags |= VM_LOCKED;
85752@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
85753 lock_limit >>= PAGE_SHIFT;
85754
85755 ret = -ENOMEM;
85756+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
85757 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
85758 capable(CAP_IPC_LOCK))
85759 ret = do_mlockall(flags);
85760diff --git a/mm/mmap.c b/mm/mmap.c
85761index 7dbe397..bfb7626 100644
85762--- a/mm/mmap.c
85763+++ b/mm/mmap.c
85764@@ -36,6 +36,7 @@
85765 #include <linux/sched/sysctl.h>
85766 #include <linux/notifier.h>
85767 #include <linux/memory.h>
85768+#include <linux/random.h>
85769
85770 #include <asm/uaccess.h>
85771 #include <asm/cacheflush.h>
85772@@ -52,6 +53,16 @@
85773 #define arch_rebalance_pgtables(addr, len) (addr)
85774 #endif
85775
85776+static inline void verify_mm_writelocked(struct mm_struct *mm)
85777+{
85778+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
85779+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
85780+ up_read(&mm->mmap_sem);
85781+ BUG();
85782+ }
85783+#endif
85784+}
85785+
85786 static void unmap_region(struct mm_struct *mm,
85787 struct vm_area_struct *vma, struct vm_area_struct *prev,
85788 unsigned long start, unsigned long end);
85789@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
85790 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
85791 *
85792 */
85793-pgprot_t protection_map[16] = {
85794+pgprot_t protection_map[16] __read_only = {
85795 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
85796 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
85797 };
85798
85799-pgprot_t vm_get_page_prot(unsigned long vm_flags)
85800+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
85801 {
85802- return __pgprot(pgprot_val(protection_map[vm_flags &
85803+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
85804 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
85805 pgprot_val(arch_vm_get_page_prot(vm_flags)));
85806+
85807+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
85808+ if (!(__supported_pte_mask & _PAGE_NX) &&
85809+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
85810+ (vm_flags & (VM_READ | VM_WRITE)))
85811+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
85812+#endif
85813+
85814+ return prot;
85815 }
85816 EXPORT_SYMBOL(vm_get_page_prot);
85817
85818@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
85819 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
85820 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
85821 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
85822+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
85823 /*
85824 * Make sure vm_committed_as in one cacheline and not cacheline shared with
85825 * other variables. It can be updated by several CPUs frequently.
85826@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
85827 struct vm_area_struct *next = vma->vm_next;
85828
85829 might_sleep();
85830+ BUG_ON(vma->vm_mirror);
85831 if (vma->vm_ops && vma->vm_ops->close)
85832 vma->vm_ops->close(vma);
85833 if (vma->vm_file)
85834@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
85835 * not page aligned -Ram Gupta
85836 */
85837 rlim = rlimit(RLIMIT_DATA);
85838+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
85839 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
85840 (mm->end_data - mm->start_data) > rlim)
85841 goto out;
85842@@ -933,6 +956,12 @@ static int
85843 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
85844 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
85845 {
85846+
85847+#ifdef CONFIG_PAX_SEGMEXEC
85848+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
85849+ return 0;
85850+#endif
85851+
85852 if (is_mergeable_vma(vma, file, vm_flags) &&
85853 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
85854 if (vma->vm_pgoff == vm_pgoff)
85855@@ -952,6 +981,12 @@ static int
85856 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
85857 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
85858 {
85859+
85860+#ifdef CONFIG_PAX_SEGMEXEC
85861+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
85862+ return 0;
85863+#endif
85864+
85865 if (is_mergeable_vma(vma, file, vm_flags) &&
85866 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
85867 pgoff_t vm_pglen;
85868@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
85869 struct vm_area_struct *vma_merge(struct mm_struct *mm,
85870 struct vm_area_struct *prev, unsigned long addr,
85871 unsigned long end, unsigned long vm_flags,
85872- struct anon_vma *anon_vma, struct file *file,
85873+ struct anon_vma *anon_vma, struct file *file,
85874 pgoff_t pgoff, struct mempolicy *policy)
85875 {
85876 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
85877 struct vm_area_struct *area, *next;
85878 int err;
85879
85880+#ifdef CONFIG_PAX_SEGMEXEC
85881+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
85882+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
85883+
85884+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
85885+#endif
85886+
85887 /*
85888 * We later require that vma->vm_flags == vm_flags,
85889 * so this tests vma->vm_flags & VM_SPECIAL, too.
85890@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
85891 if (next && next->vm_end == end) /* cases 6, 7, 8 */
85892 next = next->vm_next;
85893
85894+#ifdef CONFIG_PAX_SEGMEXEC
85895+ if (prev)
85896+ prev_m = pax_find_mirror_vma(prev);
85897+ if (area)
85898+ area_m = pax_find_mirror_vma(area);
85899+ if (next)
85900+ next_m = pax_find_mirror_vma(next);
85901+#endif
85902+
85903 /*
85904 * Can it merge with the predecessor?
85905 */
85906@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
85907 /* cases 1, 6 */
85908 err = vma_adjust(prev, prev->vm_start,
85909 next->vm_end, prev->vm_pgoff, NULL);
85910- } else /* cases 2, 5, 7 */
85911+
85912+#ifdef CONFIG_PAX_SEGMEXEC
85913+ if (!err && prev_m)
85914+ err = vma_adjust(prev_m, prev_m->vm_start,
85915+ next_m->vm_end, prev_m->vm_pgoff, NULL);
85916+#endif
85917+
85918+ } else { /* cases 2, 5, 7 */
85919 err = vma_adjust(prev, prev->vm_start,
85920 end, prev->vm_pgoff, NULL);
85921+
85922+#ifdef CONFIG_PAX_SEGMEXEC
85923+ if (!err && prev_m)
85924+ err = vma_adjust(prev_m, prev_m->vm_start,
85925+ end_m, prev_m->vm_pgoff, NULL);
85926+#endif
85927+
85928+ }
85929 if (err)
85930 return NULL;
85931 khugepaged_enter_vma_merge(prev);
85932@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
85933 mpol_equal(policy, vma_policy(next)) &&
85934 can_vma_merge_before(next, vm_flags,
85935 anon_vma, file, pgoff+pglen)) {
85936- if (prev && addr < prev->vm_end) /* case 4 */
85937+ if (prev && addr < prev->vm_end) { /* case 4 */
85938 err = vma_adjust(prev, prev->vm_start,
85939 addr, prev->vm_pgoff, NULL);
85940- else /* cases 3, 8 */
85941+
85942+#ifdef CONFIG_PAX_SEGMEXEC
85943+ if (!err && prev_m)
85944+ err = vma_adjust(prev_m, prev_m->vm_start,
85945+ addr_m, prev_m->vm_pgoff, NULL);
85946+#endif
85947+
85948+ } else { /* cases 3, 8 */
85949 err = vma_adjust(area, addr, next->vm_end,
85950 next->vm_pgoff - pglen, NULL);
85951+
85952+#ifdef CONFIG_PAX_SEGMEXEC
85953+ if (!err && area_m)
85954+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
85955+ next_m->vm_pgoff - pglen, NULL);
85956+#endif
85957+
85958+ }
85959 if (err)
85960 return NULL;
85961 khugepaged_enter_vma_merge(area);
85962@@ -1165,8 +1246,10 @@ none:
85963 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
85964 struct file *file, long pages)
85965 {
85966- const unsigned long stack_flags
85967- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
85968+
85969+#ifdef CONFIG_PAX_RANDMMAP
85970+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
85971+#endif
85972
85973 mm->total_vm += pages;
85974
85975@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
85976 mm->shared_vm += pages;
85977 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
85978 mm->exec_vm += pages;
85979- } else if (flags & stack_flags)
85980+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
85981 mm->stack_vm += pages;
85982 }
85983 #endif /* CONFIG_PROC_FS */
85984@@ -1213,7 +1296,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
85985 * (the exception is when the underlying filesystem is noexec
85986 * mounted, in which case we dont add PROT_EXEC.)
85987 */
85988- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
85989+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
85990 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
85991 prot |= PROT_EXEC;
85992
85993@@ -1239,7 +1322,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
85994 /* Obtain the address to map to. we verify (or select) it and ensure
85995 * that it represents a valid section of the address space.
85996 */
85997- addr = get_unmapped_area(file, addr, len, pgoff, flags);
85998+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
85999 if (addr & ~PAGE_MASK)
86000 return addr;
86001
86002@@ -1250,6 +1333,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
86003 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
86004 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
86005
86006+#ifdef CONFIG_PAX_MPROTECT
86007+ if (mm->pax_flags & MF_PAX_MPROTECT) {
86008+
86009+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86010+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
86011+ mm->binfmt->handle_mmap)
86012+ mm->binfmt->handle_mmap(file);
86013+#endif
86014+
86015+#ifndef CONFIG_PAX_MPROTECT_COMPAT
86016+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
86017+ gr_log_rwxmmap(file);
86018+
86019+#ifdef CONFIG_PAX_EMUPLT
86020+ vm_flags &= ~VM_EXEC;
86021+#else
86022+ return -EPERM;
86023+#endif
86024+
86025+ }
86026+
86027+ if (!(vm_flags & VM_EXEC))
86028+ vm_flags &= ~VM_MAYEXEC;
86029+#else
86030+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
86031+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
86032+#endif
86033+ else
86034+ vm_flags &= ~VM_MAYWRITE;
86035+ }
86036+#endif
86037+
86038+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
86039+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
86040+ vm_flags &= ~VM_PAGEEXEC;
86041+#endif
86042+
86043 if (flags & MAP_LOCKED)
86044 if (!can_do_mlock())
86045 return -EPERM;
86046@@ -1261,6 +1381,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
86047 locked += mm->locked_vm;
86048 lock_limit = rlimit(RLIMIT_MEMLOCK);
86049 lock_limit >>= PAGE_SHIFT;
86050+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
86051 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
86052 return -EAGAIN;
86053 }
86054@@ -1341,6 +1462,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
86055 vm_flags |= VM_NORESERVE;
86056 }
86057
86058+ if (!gr_acl_handle_mmap(file, prot))
86059+ return -EACCES;
86060+
86061 addr = mmap_region(file, addr, len, vm_flags, pgoff);
86062 if (!IS_ERR_VALUE(addr) &&
86063 ((vm_flags & VM_LOCKED) ||
86064@@ -1432,7 +1556,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
86065 vm_flags_t vm_flags = vma->vm_flags;
86066
86067 /* If it was private or non-writable, the write bit is already clear */
86068- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
86069+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
86070 return 0;
86071
86072 /* The backer wishes to know when pages are first written to? */
86073@@ -1480,7 +1604,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
86074 unsigned long charged = 0;
86075 struct inode *inode = file ? file_inode(file) : NULL;
86076
86077+#ifdef CONFIG_PAX_SEGMEXEC
86078+ struct vm_area_struct *vma_m = NULL;
86079+#endif
86080+
86081+ /*
86082+ * mm->mmap_sem is required to protect against another thread
86083+ * changing the mappings in case we sleep.
86084+ */
86085+ verify_mm_writelocked(mm);
86086+
86087 /* Check against address space limit. */
86088+
86089+#ifdef CONFIG_PAX_RANDMMAP
86090+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
86091+#endif
86092+
86093 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
86094 unsigned long nr_pages;
86095
86096@@ -1499,11 +1638,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
86097
86098 /* Clear old maps */
86099 error = -ENOMEM;
86100-munmap_back:
86101 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
86102 if (do_munmap(mm, addr, len))
86103 return -ENOMEM;
86104- goto munmap_back;
86105+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
86106 }
86107
86108 /*
86109@@ -1534,6 +1672,16 @@ munmap_back:
86110 goto unacct_error;
86111 }
86112
86113+#ifdef CONFIG_PAX_SEGMEXEC
86114+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
86115+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
86116+ if (!vma_m) {
86117+ error = -ENOMEM;
86118+ goto free_vma;
86119+ }
86120+ }
86121+#endif
86122+
86123 vma->vm_mm = mm;
86124 vma->vm_start = addr;
86125 vma->vm_end = addr + len;
86126@@ -1558,6 +1706,13 @@ munmap_back:
86127 if (error)
86128 goto unmap_and_free_vma;
86129
86130+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
86131+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
86132+ vma->vm_flags |= VM_PAGEEXEC;
86133+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
86134+ }
86135+#endif
86136+
86137 /* Can addr have changed??
86138 *
86139 * Answer: Yes, several device drivers can do it in their
86140@@ -1596,6 +1751,11 @@ munmap_back:
86141 vma_link(mm, vma, prev, rb_link, rb_parent);
86142 file = vma->vm_file;
86143
86144+#ifdef CONFIG_PAX_SEGMEXEC
86145+ if (vma_m)
86146+ BUG_ON(pax_mirror_vma(vma_m, vma));
86147+#endif
86148+
86149 /* Once vma denies write, undo our temporary denial count */
86150 if (correct_wcount)
86151 atomic_inc(&inode->i_writecount);
86152@@ -1603,6 +1763,7 @@ out:
86153 perf_event_mmap(vma);
86154
86155 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
86156+ track_exec_limit(mm, addr, addr + len, vm_flags);
86157 if (vm_flags & VM_LOCKED) {
86158 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
86159 vma == get_gate_vma(current->mm)))
86160@@ -1626,6 +1787,12 @@ unmap_and_free_vma:
86161 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
86162 charged = 0;
86163 free_vma:
86164+
86165+#ifdef CONFIG_PAX_SEGMEXEC
86166+ if (vma_m)
86167+ kmem_cache_free(vm_area_cachep, vma_m);
86168+#endif
86169+
86170 kmem_cache_free(vm_area_cachep, vma);
86171 unacct_error:
86172 if (charged)
86173@@ -1633,7 +1800,63 @@ unacct_error:
86174 return error;
86175 }
86176
86177-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
86178+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
86179+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
86180+{
86181+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
86182+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
86183+
86184+ return 0;
86185+}
86186+#endif
86187+
86188+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
86189+{
86190+ if (!vma) {
86191+#ifdef CONFIG_STACK_GROWSUP
86192+ if (addr > sysctl_heap_stack_gap)
86193+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
86194+ else
86195+ vma = find_vma(current->mm, 0);
86196+ if (vma && (vma->vm_flags & VM_GROWSUP))
86197+ return false;
86198+#endif
86199+ return true;
86200+ }
86201+
86202+ if (addr + len > vma->vm_start)
86203+ return false;
86204+
86205+ if (vma->vm_flags & VM_GROWSDOWN)
86206+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
86207+#ifdef CONFIG_STACK_GROWSUP
86208+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
86209+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
86210+#endif
86211+ else if (offset)
86212+ return offset <= vma->vm_start - addr - len;
86213+
86214+ return true;
86215+}
86216+
86217+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
86218+{
86219+ if (vma->vm_start < len)
86220+ return -ENOMEM;
86221+
86222+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
86223+ if (offset <= vma->vm_start - len)
86224+ return vma->vm_start - len - offset;
86225+ else
86226+ return -ENOMEM;
86227+ }
86228+
86229+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
86230+ return vma->vm_start - len - sysctl_heap_stack_gap;
86231+ return -ENOMEM;
86232+}
86233+
86234+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
86235 {
86236 /*
86237 * We implement the search by looking for an rbtree node that
86238@@ -1681,11 +1904,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
86239 }
86240 }
86241
86242- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
86243+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
86244 check_current:
86245 /* Check if current node has a suitable gap */
86246 if (gap_start > high_limit)
86247 return -ENOMEM;
86248+
86249+ if (gap_end - gap_start > info->threadstack_offset)
86250+ gap_start += info->threadstack_offset;
86251+ else
86252+ gap_start = gap_end;
86253+
86254+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
86255+ if (gap_end - gap_start > sysctl_heap_stack_gap)
86256+ gap_start += sysctl_heap_stack_gap;
86257+ else
86258+ gap_start = gap_end;
86259+ }
86260+ if (vma->vm_flags & VM_GROWSDOWN) {
86261+ if (gap_end - gap_start > sysctl_heap_stack_gap)
86262+ gap_end -= sysctl_heap_stack_gap;
86263+ else
86264+ gap_end = gap_start;
86265+ }
86266 if (gap_end >= low_limit && gap_end - gap_start >= length)
86267 goto found;
86268
86269@@ -1735,7 +1976,7 @@ found:
86270 return gap_start;
86271 }
86272
86273-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
86274+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
86275 {
86276 struct mm_struct *mm = current->mm;
86277 struct vm_area_struct *vma;
86278@@ -1789,6 +2030,24 @@ check_current:
86279 gap_end = vma->vm_start;
86280 if (gap_end < low_limit)
86281 return -ENOMEM;
86282+
86283+ if (gap_end - gap_start > info->threadstack_offset)
86284+ gap_end -= info->threadstack_offset;
86285+ else
86286+ gap_end = gap_start;
86287+
86288+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
86289+ if (gap_end - gap_start > sysctl_heap_stack_gap)
86290+ gap_start += sysctl_heap_stack_gap;
86291+ else
86292+ gap_start = gap_end;
86293+ }
86294+ if (vma->vm_flags & VM_GROWSDOWN) {
86295+ if (gap_end - gap_start > sysctl_heap_stack_gap)
86296+ gap_end -= sysctl_heap_stack_gap;
86297+ else
86298+ gap_end = gap_start;
86299+ }
86300 if (gap_start <= high_limit && gap_end - gap_start >= length)
86301 goto found;
86302
86303@@ -1852,6 +2111,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
86304 struct mm_struct *mm = current->mm;
86305 struct vm_area_struct *vma;
86306 struct vm_unmapped_area_info info;
86307+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
86308
86309 if (len > TASK_SIZE)
86310 return -ENOMEM;
86311@@ -1859,29 +2119,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
86312 if (flags & MAP_FIXED)
86313 return addr;
86314
86315+#ifdef CONFIG_PAX_RANDMMAP
86316+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
86317+#endif
86318+
86319 if (addr) {
86320 addr = PAGE_ALIGN(addr);
86321 vma = find_vma(mm, addr);
86322- if (TASK_SIZE - len >= addr &&
86323- (!vma || addr + len <= vma->vm_start))
86324+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
86325 return addr;
86326 }
86327
86328 info.flags = 0;
86329 info.length = len;
86330 info.low_limit = TASK_UNMAPPED_BASE;
86331+
86332+#ifdef CONFIG_PAX_RANDMMAP
86333+ if (mm->pax_flags & MF_PAX_RANDMMAP)
86334+ info.low_limit += mm->delta_mmap;
86335+#endif
86336+
86337 info.high_limit = TASK_SIZE;
86338 info.align_mask = 0;
86339+ info.threadstack_offset = offset;
86340 return vm_unmapped_area(&info);
86341 }
86342 #endif
86343
86344 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
86345 {
86346+
86347+#ifdef CONFIG_PAX_SEGMEXEC
86348+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
86349+ return;
86350+#endif
86351+
86352 /*
86353 * Is this a new hole at the lowest possible address?
86354 */
86355- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
86356+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
86357 mm->free_area_cache = addr;
86358 }
86359
86360@@ -1899,6 +2175,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86361 struct mm_struct *mm = current->mm;
86362 unsigned long addr = addr0;
86363 struct vm_unmapped_area_info info;
86364+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
86365
86366 /* requested length too big for entire address space */
86367 if (len > TASK_SIZE)
86368@@ -1907,12 +2184,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86369 if (flags & MAP_FIXED)
86370 return addr;
86371
86372+#ifdef CONFIG_PAX_RANDMMAP
86373+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
86374+#endif
86375+
86376 /* requesting a specific address */
86377 if (addr) {
86378 addr = PAGE_ALIGN(addr);
86379 vma = find_vma(mm, addr);
86380- if (TASK_SIZE - len >= addr &&
86381- (!vma || addr + len <= vma->vm_start))
86382+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
86383 return addr;
86384 }
86385
86386@@ -1921,6 +2201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86387 info.low_limit = PAGE_SIZE;
86388 info.high_limit = mm->mmap_base;
86389 info.align_mask = 0;
86390+ info.threadstack_offset = offset;
86391 addr = vm_unmapped_area(&info);
86392
86393 /*
86394@@ -1933,6 +2214,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86395 VM_BUG_ON(addr != -ENOMEM);
86396 info.flags = 0;
86397 info.low_limit = TASK_UNMAPPED_BASE;
86398+
86399+#ifdef CONFIG_PAX_RANDMMAP
86400+ if (mm->pax_flags & MF_PAX_RANDMMAP)
86401+ info.low_limit += mm->delta_mmap;
86402+#endif
86403+
86404 info.high_limit = TASK_SIZE;
86405 addr = vm_unmapped_area(&info);
86406 }
86407@@ -1943,6 +2230,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86408
86409 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
86410 {
86411+
86412+#ifdef CONFIG_PAX_SEGMEXEC
86413+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
86414+ return;
86415+#endif
86416+
86417 /*
86418 * Is this a new hole at the highest possible address?
86419 */
86420@@ -1950,8 +2243,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
86421 mm->free_area_cache = addr;
86422
86423 /* dont allow allocations above current base */
86424- if (mm->free_area_cache > mm->mmap_base)
86425+ if (mm->free_area_cache > mm->mmap_base) {
86426 mm->free_area_cache = mm->mmap_base;
86427+ mm->cached_hole_size = ~0UL;
86428+ }
86429 }
86430
86431 unsigned long
86432@@ -2047,6 +2342,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
86433 return vma;
86434 }
86435
86436+#ifdef CONFIG_PAX_SEGMEXEC
86437+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
86438+{
86439+ struct vm_area_struct *vma_m;
86440+
86441+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
86442+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
86443+ BUG_ON(vma->vm_mirror);
86444+ return NULL;
86445+ }
86446+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
86447+ vma_m = vma->vm_mirror;
86448+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
86449+ BUG_ON(vma->vm_file != vma_m->vm_file);
86450+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
86451+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
86452+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
86453+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
86454+ return vma_m;
86455+}
86456+#endif
86457+
86458 /*
86459 * Verify that the stack growth is acceptable and
86460 * update accounting. This is shared with both the
86461@@ -2063,6 +2380,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
86462 return -ENOMEM;
86463
86464 /* Stack limit test */
86465+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
86466 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
86467 return -ENOMEM;
86468
86469@@ -2073,6 +2391,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
86470 locked = mm->locked_vm + grow;
86471 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
86472 limit >>= PAGE_SHIFT;
86473+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
86474 if (locked > limit && !capable(CAP_IPC_LOCK))
86475 return -ENOMEM;
86476 }
86477@@ -2102,37 +2421,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
86478 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
86479 * vma is the last one with address > vma->vm_end. Have to extend vma.
86480 */
86481+#ifndef CONFIG_IA64
86482+static
86483+#endif
86484 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
86485 {
86486 int error;
86487+ bool locknext;
86488
86489 if (!(vma->vm_flags & VM_GROWSUP))
86490 return -EFAULT;
86491
86492+ /* Also guard against wrapping around to address 0. */
86493+ if (address < PAGE_ALIGN(address+1))
86494+ address = PAGE_ALIGN(address+1);
86495+ else
86496+ return -ENOMEM;
86497+
86498 /*
86499 * We must make sure the anon_vma is allocated
86500 * so that the anon_vma locking is not a noop.
86501 */
86502 if (unlikely(anon_vma_prepare(vma)))
86503 return -ENOMEM;
86504+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
86505+ if (locknext && anon_vma_prepare(vma->vm_next))
86506+ return -ENOMEM;
86507 vma_lock_anon_vma(vma);
86508+ if (locknext)
86509+ vma_lock_anon_vma(vma->vm_next);
86510
86511 /*
86512 * vma->vm_start/vm_end cannot change under us because the caller
86513 * is required to hold the mmap_sem in read mode. We need the
86514- * anon_vma lock to serialize against concurrent expand_stacks.
86515- * Also guard against wrapping around to address 0.
86516+ * anon_vma locks to serialize against concurrent expand_stacks
86517+ * and expand_upwards.
86518 */
86519- if (address < PAGE_ALIGN(address+4))
86520- address = PAGE_ALIGN(address+4);
86521- else {
86522- vma_unlock_anon_vma(vma);
86523- return -ENOMEM;
86524- }
86525 error = 0;
86526
86527 /* Somebody else might have raced and expanded it already */
86528- if (address > vma->vm_end) {
86529+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
86530+ error = -ENOMEM;
86531+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
86532 unsigned long size, grow;
86533
86534 size = address - vma->vm_start;
86535@@ -2167,6 +2497,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
86536 }
86537 }
86538 }
86539+ if (locknext)
86540+ vma_unlock_anon_vma(vma->vm_next);
86541 vma_unlock_anon_vma(vma);
86542 khugepaged_enter_vma_merge(vma);
86543 validate_mm(vma->vm_mm);
86544@@ -2181,6 +2513,8 @@ int expand_downwards(struct vm_area_struct *vma,
86545 unsigned long address)
86546 {
86547 int error;
86548+ bool lockprev = false;
86549+ struct vm_area_struct *prev;
86550
86551 /*
86552 * We must make sure the anon_vma is allocated
86553@@ -2194,6 +2528,15 @@ int expand_downwards(struct vm_area_struct *vma,
86554 if (error)
86555 return error;
86556
86557+ prev = vma->vm_prev;
86558+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
86559+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
86560+#endif
86561+ if (lockprev && anon_vma_prepare(prev))
86562+ return -ENOMEM;
86563+ if (lockprev)
86564+ vma_lock_anon_vma(prev);
86565+
86566 vma_lock_anon_vma(vma);
86567
86568 /*
86569@@ -2203,9 +2546,17 @@ int expand_downwards(struct vm_area_struct *vma,
86570 */
86571
86572 /* Somebody else might have raced and expanded it already */
86573- if (address < vma->vm_start) {
86574+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
86575+ error = -ENOMEM;
86576+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
86577 unsigned long size, grow;
86578
86579+#ifdef CONFIG_PAX_SEGMEXEC
86580+ struct vm_area_struct *vma_m;
86581+
86582+ vma_m = pax_find_mirror_vma(vma);
86583+#endif
86584+
86585 size = vma->vm_end - address;
86586 grow = (vma->vm_start - address) >> PAGE_SHIFT;
86587
86588@@ -2230,13 +2581,27 @@ int expand_downwards(struct vm_area_struct *vma,
86589 vma->vm_pgoff -= grow;
86590 anon_vma_interval_tree_post_update_vma(vma);
86591 vma_gap_update(vma);
86592+
86593+#ifdef CONFIG_PAX_SEGMEXEC
86594+ if (vma_m) {
86595+ anon_vma_interval_tree_pre_update_vma(vma_m);
86596+ vma_m->vm_start -= grow << PAGE_SHIFT;
86597+ vma_m->vm_pgoff -= grow;
86598+ anon_vma_interval_tree_post_update_vma(vma_m);
86599+ vma_gap_update(vma_m);
86600+ }
86601+#endif
86602+
86603 spin_unlock(&vma->vm_mm->page_table_lock);
86604
86605+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
86606 perf_event_mmap(vma);
86607 }
86608 }
86609 }
86610 vma_unlock_anon_vma(vma);
86611+ if (lockprev)
86612+ vma_unlock_anon_vma(prev);
86613 khugepaged_enter_vma_merge(vma);
86614 validate_mm(vma->vm_mm);
86615 return error;
86616@@ -2334,6 +2699,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
86617 do {
86618 long nrpages = vma_pages(vma);
86619
86620+#ifdef CONFIG_PAX_SEGMEXEC
86621+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
86622+ vma = remove_vma(vma);
86623+ continue;
86624+ }
86625+#endif
86626+
86627 if (vma->vm_flags & VM_ACCOUNT)
86628 nr_accounted += nrpages;
86629 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
86630@@ -2356,7 +2728,7 @@ static void unmap_region(struct mm_struct *mm,
86631 struct mmu_gather tlb;
86632
86633 lru_add_drain();
86634- tlb_gather_mmu(&tlb, mm, 0);
86635+ tlb_gather_mmu(&tlb, mm, start, end);
86636 update_hiwater_rss(mm);
86637 unmap_vmas(&tlb, vma, start, end);
86638 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
86639@@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
86640 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
86641 vma->vm_prev = NULL;
86642 do {
86643+
86644+#ifdef CONFIG_PAX_SEGMEXEC
86645+ if (vma->vm_mirror) {
86646+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
86647+ vma->vm_mirror->vm_mirror = NULL;
86648+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
86649+ vma->vm_mirror = NULL;
86650+ }
86651+#endif
86652+
86653 vma_rb_erase(vma, &mm->mm_rb);
86654 mm->map_count--;
86655 tail_vma = vma;
86656@@ -2410,14 +2792,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86657 struct vm_area_struct *new;
86658 int err = -ENOMEM;
86659
86660+#ifdef CONFIG_PAX_SEGMEXEC
86661+ struct vm_area_struct *vma_m, *new_m = NULL;
86662+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
86663+#endif
86664+
86665 if (is_vm_hugetlb_page(vma) && (addr &
86666 ~(huge_page_mask(hstate_vma(vma)))))
86667 return -EINVAL;
86668
86669+#ifdef CONFIG_PAX_SEGMEXEC
86670+ vma_m = pax_find_mirror_vma(vma);
86671+#endif
86672+
86673 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
86674 if (!new)
86675 goto out_err;
86676
86677+#ifdef CONFIG_PAX_SEGMEXEC
86678+ if (vma_m) {
86679+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
86680+ if (!new_m) {
86681+ kmem_cache_free(vm_area_cachep, new);
86682+ goto out_err;
86683+ }
86684+ }
86685+#endif
86686+
86687 /* most fields are the same, copy all, and then fixup */
86688 *new = *vma;
86689
86690@@ -2430,6 +2831,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86691 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
86692 }
86693
86694+#ifdef CONFIG_PAX_SEGMEXEC
86695+ if (vma_m) {
86696+ *new_m = *vma_m;
86697+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
86698+ new_m->vm_mirror = new;
86699+ new->vm_mirror = new_m;
86700+
86701+ if (new_below)
86702+ new_m->vm_end = addr_m;
86703+ else {
86704+ new_m->vm_start = addr_m;
86705+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
86706+ }
86707+ }
86708+#endif
86709+
86710 pol = mpol_dup(vma_policy(vma));
86711 if (IS_ERR(pol)) {
86712 err = PTR_ERR(pol);
86713@@ -2452,6 +2869,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86714 else
86715 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
86716
86717+#ifdef CONFIG_PAX_SEGMEXEC
86718+ if (!err && vma_m) {
86719+ if (anon_vma_clone(new_m, vma_m))
86720+ goto out_free_mpol;
86721+
86722+ mpol_get(pol);
86723+ vma_set_policy(new_m, pol);
86724+
86725+ if (new_m->vm_file)
86726+ get_file(new_m->vm_file);
86727+
86728+ if (new_m->vm_ops && new_m->vm_ops->open)
86729+ new_m->vm_ops->open(new_m);
86730+
86731+ if (new_below)
86732+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
86733+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
86734+ else
86735+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
86736+
86737+ if (err) {
86738+ if (new_m->vm_ops && new_m->vm_ops->close)
86739+ new_m->vm_ops->close(new_m);
86740+ if (new_m->vm_file)
86741+ fput(new_m->vm_file);
86742+ mpol_put(pol);
86743+ }
86744+ }
86745+#endif
86746+
86747 /* Success. */
86748 if (!err)
86749 return 0;
86750@@ -2461,10 +2908,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86751 new->vm_ops->close(new);
86752 if (new->vm_file)
86753 fput(new->vm_file);
86754- unlink_anon_vmas(new);
86755 out_free_mpol:
86756 mpol_put(pol);
86757 out_free_vma:
86758+
86759+#ifdef CONFIG_PAX_SEGMEXEC
86760+ if (new_m) {
86761+ unlink_anon_vmas(new_m);
86762+ kmem_cache_free(vm_area_cachep, new_m);
86763+ }
86764+#endif
86765+
86766+ unlink_anon_vmas(new);
86767 kmem_cache_free(vm_area_cachep, new);
86768 out_err:
86769 return err;
86770@@ -2477,6 +2932,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
86771 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
86772 unsigned long addr, int new_below)
86773 {
86774+
86775+#ifdef CONFIG_PAX_SEGMEXEC
86776+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
86777+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
86778+ if (mm->map_count >= sysctl_max_map_count-1)
86779+ return -ENOMEM;
86780+ } else
86781+#endif
86782+
86783 if (mm->map_count >= sysctl_max_map_count)
86784 return -ENOMEM;
86785
86786@@ -2488,11 +2952,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
86787 * work. This now handles partial unmappings.
86788 * Jeremy Fitzhardinge <jeremy@goop.org>
86789 */
86790+#ifdef CONFIG_PAX_SEGMEXEC
86791 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
86792 {
86793+ int ret = __do_munmap(mm, start, len);
86794+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
86795+ return ret;
86796+
86797+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
86798+}
86799+
86800+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
86801+#else
86802+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
86803+#endif
86804+{
86805 unsigned long end;
86806 struct vm_area_struct *vma, *prev, *last;
86807
86808+ /*
86809+ * mm->mmap_sem is required to protect against another thread
86810+ * changing the mappings in case we sleep.
86811+ */
86812+ verify_mm_writelocked(mm);
86813+
86814 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
86815 return -EINVAL;
86816
86817@@ -2567,6 +3050,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
86818 /* Fix up all other VM information */
86819 remove_vma_list(mm, vma);
86820
86821+ track_exec_limit(mm, start, end, 0UL);
86822+
86823 return 0;
86824 }
86825
86826@@ -2575,6 +3060,13 @@ int vm_munmap(unsigned long start, size_t len)
86827 int ret;
86828 struct mm_struct *mm = current->mm;
86829
86830+
86831+#ifdef CONFIG_PAX_SEGMEXEC
86832+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
86833+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
86834+ return -EINVAL;
86835+#endif
86836+
86837 down_write(&mm->mmap_sem);
86838 ret = do_munmap(mm, start, len);
86839 up_write(&mm->mmap_sem);
86840@@ -2588,16 +3080,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
86841 return vm_munmap(addr, len);
86842 }
86843
86844-static inline void verify_mm_writelocked(struct mm_struct *mm)
86845-{
86846-#ifdef CONFIG_DEBUG_VM
86847- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
86848- WARN_ON(1);
86849- up_read(&mm->mmap_sem);
86850- }
86851-#endif
86852-}
86853-
86854 /*
86855 * this is really a simplified "do_mmap". it only handles
86856 * anonymous maps. eventually we may be able to do some
86857@@ -2611,6 +3093,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86858 struct rb_node ** rb_link, * rb_parent;
86859 pgoff_t pgoff = addr >> PAGE_SHIFT;
86860 int error;
86861+ unsigned long charged;
86862
86863 len = PAGE_ALIGN(len);
86864 if (!len)
86865@@ -2618,16 +3101,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86866
86867 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
86868
86869+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
86870+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
86871+ flags &= ~VM_EXEC;
86872+
86873+#ifdef CONFIG_PAX_MPROTECT
86874+ if (mm->pax_flags & MF_PAX_MPROTECT)
86875+ flags &= ~VM_MAYEXEC;
86876+#endif
86877+
86878+ }
86879+#endif
86880+
86881 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
86882 if (error & ~PAGE_MASK)
86883 return error;
86884
86885+ charged = len >> PAGE_SHIFT;
86886+
86887 /*
86888 * mlock MCL_FUTURE?
86889 */
86890 if (mm->def_flags & VM_LOCKED) {
86891 unsigned long locked, lock_limit;
86892- locked = len >> PAGE_SHIFT;
86893+ locked = charged;
86894 locked += mm->locked_vm;
86895 lock_limit = rlimit(RLIMIT_MEMLOCK);
86896 lock_limit >>= PAGE_SHIFT;
86897@@ -2644,21 +3141,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86898 /*
86899 * Clear old maps. this also does some error checking for us
86900 */
86901- munmap_back:
86902 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
86903 if (do_munmap(mm, addr, len))
86904 return -ENOMEM;
86905- goto munmap_back;
86906+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
86907 }
86908
86909 /* Check against address space limits *after* clearing old maps... */
86910- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
86911+ if (!may_expand_vm(mm, charged))
86912 return -ENOMEM;
86913
86914 if (mm->map_count > sysctl_max_map_count)
86915 return -ENOMEM;
86916
86917- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
86918+ if (security_vm_enough_memory_mm(mm, charged))
86919 return -ENOMEM;
86920
86921 /* Can we just expand an old private anonymous mapping? */
86922@@ -2672,7 +3168,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86923 */
86924 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
86925 if (!vma) {
86926- vm_unacct_memory(len >> PAGE_SHIFT);
86927+ vm_unacct_memory(charged);
86928 return -ENOMEM;
86929 }
86930
86931@@ -2686,9 +3182,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
86932 vma_link(mm, vma, prev, rb_link, rb_parent);
86933 out:
86934 perf_event_mmap(vma);
86935- mm->total_vm += len >> PAGE_SHIFT;
86936+ mm->total_vm += charged;
86937 if (flags & VM_LOCKED)
86938- mm->locked_vm += (len >> PAGE_SHIFT);
86939+ mm->locked_vm += charged;
86940+ track_exec_limit(mm, addr, addr + len, flags);
86941 return addr;
86942 }
86943
86944@@ -2735,7 +3232,7 @@ void exit_mmap(struct mm_struct *mm)
86945
86946 lru_add_drain();
86947 flush_cache_mm(mm);
86948- tlb_gather_mmu(&tlb, mm, 1);
86949+ tlb_gather_mmu(&tlb, mm, 0, -1);
86950 /* update_hiwater_rss(mm) here? but nobody should be looking */
86951 /* Use -1 here to ensure all VMAs in the mm are unmapped */
86952 unmap_vmas(&tlb, vma, 0, -1);
86953@@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
86954 while (vma) {
86955 if (vma->vm_flags & VM_ACCOUNT)
86956 nr_accounted += vma_pages(vma);
86957+ vma->vm_mirror = NULL;
86958 vma = remove_vma(vma);
86959 }
86960 vm_unacct_memory(nr_accounted);
86961@@ -2766,6 +3264,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
86962 struct vm_area_struct *prev;
86963 struct rb_node **rb_link, *rb_parent;
86964
86965+#ifdef CONFIG_PAX_SEGMEXEC
86966+ struct vm_area_struct *vma_m = NULL;
86967+#endif
86968+
86969+ if (security_mmap_addr(vma->vm_start))
86970+ return -EPERM;
86971+
86972 /*
86973 * The vm_pgoff of a purely anonymous vma should be irrelevant
86974 * until its first write fault, when page's anon_vma and index
86975@@ -2789,7 +3294,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
86976 security_vm_enough_memory_mm(mm, vma_pages(vma)))
86977 return -ENOMEM;
86978
86979+#ifdef CONFIG_PAX_SEGMEXEC
86980+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
86981+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
86982+ if (!vma_m)
86983+ return -ENOMEM;
86984+ }
86985+#endif
86986+
86987 vma_link(mm, vma, prev, rb_link, rb_parent);
86988+
86989+#ifdef CONFIG_PAX_SEGMEXEC
86990+ if (vma_m)
86991+ BUG_ON(pax_mirror_vma(vma_m, vma));
86992+#endif
86993+
86994 return 0;
86995 }
86996
86997@@ -2809,6 +3328,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
86998 struct mempolicy *pol;
86999 bool faulted_in_anon_vma = true;
87000
87001+ BUG_ON(vma->vm_mirror);
87002+
87003 /*
87004 * If anonymous vma has not yet been faulted, update new pgoff
87005 * to match new location, to increase its chance of merging.
87006@@ -2875,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
87007 return NULL;
87008 }
87009
87010+#ifdef CONFIG_PAX_SEGMEXEC
87011+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
87012+{
87013+ struct vm_area_struct *prev_m;
87014+ struct rb_node **rb_link_m, *rb_parent_m;
87015+ struct mempolicy *pol_m;
87016+
87017+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
87018+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
87019+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
87020+ *vma_m = *vma;
87021+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
87022+ if (anon_vma_clone(vma_m, vma))
87023+ return -ENOMEM;
87024+ pol_m = vma_policy(vma_m);
87025+ mpol_get(pol_m);
87026+ vma_set_policy(vma_m, pol_m);
87027+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
87028+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
87029+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
87030+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
87031+ if (vma_m->vm_file)
87032+ get_file(vma_m->vm_file);
87033+ if (vma_m->vm_ops && vma_m->vm_ops->open)
87034+ vma_m->vm_ops->open(vma_m);
87035+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
87036+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
87037+ vma_m->vm_mirror = vma;
87038+ vma->vm_mirror = vma_m;
87039+ return 0;
87040+}
87041+#endif
87042+
87043 /*
87044 * Return true if the calling process may expand its vm space by the passed
87045 * number of pages
87046@@ -2886,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
87047
87048 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
87049
87050+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
87051 if (cur + npages > lim)
87052 return 0;
87053 return 1;
87054@@ -2956,6 +3511,22 @@ int install_special_mapping(struct mm_struct *mm,
87055 vma->vm_start = addr;
87056 vma->vm_end = addr + len;
87057
87058+#ifdef CONFIG_PAX_MPROTECT
87059+ if (mm->pax_flags & MF_PAX_MPROTECT) {
87060+#ifndef CONFIG_PAX_MPROTECT_COMPAT
87061+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
87062+ return -EPERM;
87063+ if (!(vm_flags & VM_EXEC))
87064+ vm_flags &= ~VM_MAYEXEC;
87065+#else
87066+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
87067+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
87068+#endif
87069+ else
87070+ vm_flags &= ~VM_MAYWRITE;
87071+ }
87072+#endif
87073+
87074 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
87075 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
87076
87077diff --git a/mm/mprotect.c b/mm/mprotect.c
87078index 94722a4..e661e29 100644
87079--- a/mm/mprotect.c
87080+++ b/mm/mprotect.c
87081@@ -23,10 +23,18 @@
87082 #include <linux/mmu_notifier.h>
87083 #include <linux/migrate.h>
87084 #include <linux/perf_event.h>
87085+#include <linux/sched/sysctl.h>
87086+
87087+#ifdef CONFIG_PAX_MPROTECT
87088+#include <linux/elf.h>
87089+#include <linux/binfmts.h>
87090+#endif
87091+
87092 #include <asm/uaccess.h>
87093 #include <asm/pgtable.h>
87094 #include <asm/cacheflush.h>
87095 #include <asm/tlbflush.h>
87096+#include <asm/mmu_context.h>
87097
87098 #ifndef pgprot_modify
87099 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
87100@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
87101 return pages;
87102 }
87103
87104+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
87105+/* called while holding the mmap semaphor for writing except stack expansion */
87106+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
87107+{
87108+ unsigned long oldlimit, newlimit = 0UL;
87109+
87110+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
87111+ return;
87112+
87113+ spin_lock(&mm->page_table_lock);
87114+ oldlimit = mm->context.user_cs_limit;
87115+ if ((prot & VM_EXEC) && oldlimit < end)
87116+ /* USER_CS limit moved up */
87117+ newlimit = end;
87118+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
87119+ /* USER_CS limit moved down */
87120+ newlimit = start;
87121+
87122+ if (newlimit) {
87123+ mm->context.user_cs_limit = newlimit;
87124+
87125+#ifdef CONFIG_SMP
87126+ wmb();
87127+ cpus_clear(mm->context.cpu_user_cs_mask);
87128+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
87129+#endif
87130+
87131+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
87132+ }
87133+ spin_unlock(&mm->page_table_lock);
87134+ if (newlimit == end) {
87135+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
87136+
87137+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
87138+ if (is_vm_hugetlb_page(vma))
87139+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
87140+ else
87141+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
87142+ }
87143+}
87144+#endif
87145+
87146 int
87147 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
87148 unsigned long start, unsigned long end, unsigned long newflags)
87149@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
87150 int error;
87151 int dirty_accountable = 0;
87152
87153+#ifdef CONFIG_PAX_SEGMEXEC
87154+ struct vm_area_struct *vma_m = NULL;
87155+ unsigned long start_m, end_m;
87156+
87157+ start_m = start + SEGMEXEC_TASK_SIZE;
87158+ end_m = end + SEGMEXEC_TASK_SIZE;
87159+#endif
87160+
87161 if (newflags == oldflags) {
87162 *pprev = vma;
87163 return 0;
87164 }
87165
87166+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
87167+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
87168+
87169+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
87170+ return -ENOMEM;
87171+
87172+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
87173+ return -ENOMEM;
87174+ }
87175+
87176 /*
87177 * If we make a private mapping writable we increase our commit;
87178 * but (without finer accounting) cannot reduce our commit if we
87179@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
87180 }
87181 }
87182
87183+#ifdef CONFIG_PAX_SEGMEXEC
87184+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
87185+ if (start != vma->vm_start) {
87186+ error = split_vma(mm, vma, start, 1);
87187+ if (error)
87188+ goto fail;
87189+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
87190+ *pprev = (*pprev)->vm_next;
87191+ }
87192+
87193+ if (end != vma->vm_end) {
87194+ error = split_vma(mm, vma, end, 0);
87195+ if (error)
87196+ goto fail;
87197+ }
87198+
87199+ if (pax_find_mirror_vma(vma)) {
87200+ error = __do_munmap(mm, start_m, end_m - start_m);
87201+ if (error)
87202+ goto fail;
87203+ } else {
87204+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
87205+ if (!vma_m) {
87206+ error = -ENOMEM;
87207+ goto fail;
87208+ }
87209+ vma->vm_flags = newflags;
87210+ error = pax_mirror_vma(vma_m, vma);
87211+ if (error) {
87212+ vma->vm_flags = oldflags;
87213+ goto fail;
87214+ }
87215+ }
87216+ }
87217+#endif
87218+
87219 /*
87220 * First try to merge with previous and/or next vma.
87221 */
87222@@ -296,9 +400,21 @@ success:
87223 * vm_flags and vm_page_prot are protected by the mmap_sem
87224 * held in write mode.
87225 */
87226+
87227+#ifdef CONFIG_PAX_SEGMEXEC
87228+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
87229+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
87230+#endif
87231+
87232 vma->vm_flags = newflags;
87233+
87234+#ifdef CONFIG_PAX_MPROTECT
87235+ if (mm->binfmt && mm->binfmt->handle_mprotect)
87236+ mm->binfmt->handle_mprotect(vma, newflags);
87237+#endif
87238+
87239 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
87240- vm_get_page_prot(newflags));
87241+ vm_get_page_prot(vma->vm_flags));
87242
87243 if (vma_wants_writenotify(vma)) {
87244 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
87245@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87246 end = start + len;
87247 if (end <= start)
87248 return -ENOMEM;
87249+
87250+#ifdef CONFIG_PAX_SEGMEXEC
87251+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
87252+ if (end > SEGMEXEC_TASK_SIZE)
87253+ return -EINVAL;
87254+ } else
87255+#endif
87256+
87257+ if (end > TASK_SIZE)
87258+ return -EINVAL;
87259+
87260 if (!arch_validate_prot(prot))
87261 return -EINVAL;
87262
87263@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87264 /*
87265 * Does the application expect PROT_READ to imply PROT_EXEC:
87266 */
87267- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
87268+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
87269 prot |= PROT_EXEC;
87270
87271 vm_flags = calc_vm_prot_bits(prot);
87272@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87273 if (start > vma->vm_start)
87274 prev = vma;
87275
87276+#ifdef CONFIG_PAX_MPROTECT
87277+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
87278+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
87279+#endif
87280+
87281 for (nstart = start ; ; ) {
87282 unsigned long newflags;
87283
87284@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87285
87286 /* newflags >> 4 shift VM_MAY% in place of VM_% */
87287 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
87288+ if (prot & (PROT_WRITE | PROT_EXEC))
87289+ gr_log_rwxmprotect(vma);
87290+
87291+ error = -EACCES;
87292+ goto out;
87293+ }
87294+
87295+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
87296 error = -EACCES;
87297 goto out;
87298 }
87299@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
87300 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
87301 if (error)
87302 goto out;
87303+
87304+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
87305+
87306 nstart = tmp;
87307
87308 if (nstart < prev->vm_end)
87309diff --git a/mm/mremap.c b/mm/mremap.c
87310index 463a257..c0c7a92 100644
87311--- a/mm/mremap.c
87312+++ b/mm/mremap.c
87313@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
87314 continue;
87315 pte = ptep_get_and_clear(mm, old_addr, old_pte);
87316 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
87317+
87318+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
87319+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
87320+ pte = pte_exprotect(pte);
87321+#endif
87322+
87323 set_pte_at(mm, new_addr, new_pte, pte);
87324 }
87325
87326@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
87327 if (is_vm_hugetlb_page(vma))
87328 goto Einval;
87329
87330+#ifdef CONFIG_PAX_SEGMEXEC
87331+ if (pax_find_mirror_vma(vma))
87332+ goto Einval;
87333+#endif
87334+
87335 /* We can't remap across vm area boundaries */
87336 if (old_len > vma->vm_end - addr)
87337 goto Efault;
87338@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
87339 unsigned long ret = -EINVAL;
87340 unsigned long charged = 0;
87341 unsigned long map_flags;
87342+ unsigned long pax_task_size = TASK_SIZE;
87343
87344 if (new_addr & ~PAGE_MASK)
87345 goto out;
87346
87347- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
87348+#ifdef CONFIG_PAX_SEGMEXEC
87349+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
87350+ pax_task_size = SEGMEXEC_TASK_SIZE;
87351+#endif
87352+
87353+ pax_task_size -= PAGE_SIZE;
87354+
87355+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
87356 goto out;
87357
87358 /* Check if the location we're moving into overlaps the
87359 * old location at all, and fail if it does.
87360 */
87361- if ((new_addr <= addr) && (new_addr+new_len) > addr)
87362- goto out;
87363-
87364- if ((addr <= new_addr) && (addr+old_len) > new_addr)
87365+ if (addr + old_len > new_addr && new_addr + new_len > addr)
87366 goto out;
87367
87368 ret = do_munmap(mm, new_addr, new_len);
87369@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
87370 unsigned long ret = -EINVAL;
87371 unsigned long charged = 0;
87372 bool locked = false;
87373+ unsigned long pax_task_size = TASK_SIZE;
87374
87375 down_write(&current->mm->mmap_sem);
87376
87377@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
87378 if (!new_len)
87379 goto out;
87380
87381+#ifdef CONFIG_PAX_SEGMEXEC
87382+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
87383+ pax_task_size = SEGMEXEC_TASK_SIZE;
87384+#endif
87385+
87386+ pax_task_size -= PAGE_SIZE;
87387+
87388+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
87389+ old_len > pax_task_size || addr > pax_task_size-old_len)
87390+ goto out;
87391+
87392 if (flags & MREMAP_FIXED) {
87393 if (flags & MREMAP_MAYMOVE)
87394 ret = mremap_to(addr, old_len, new_addr, new_len,
87395@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
87396 new_addr = addr;
87397 }
87398 ret = addr;
87399+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
87400 goto out;
87401 }
87402 }
87403@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
87404 goto out;
87405 }
87406
87407+ map_flags = vma->vm_flags;
87408 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
87409+ if (!(ret & ~PAGE_MASK)) {
87410+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
87411+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
87412+ }
87413 }
87414 out:
87415 if (ret & ~PAGE_MASK)
87416diff --git a/mm/nommu.c b/mm/nommu.c
87417index 298884d..5f74980 100644
87418--- a/mm/nommu.c
87419+++ b/mm/nommu.c
87420@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
87421 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
87422 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
87423 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
87424-int heap_stack_gap = 0;
87425
87426 atomic_long_t mmap_pages_allocated;
87427
87428@@ -842,15 +841,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
87429 EXPORT_SYMBOL(find_vma);
87430
87431 /*
87432- * find a VMA
87433- * - we don't extend stack VMAs under NOMMU conditions
87434- */
87435-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
87436-{
87437- return find_vma(mm, addr);
87438-}
87439-
87440-/*
87441 * expand a stack to a given address
87442 * - not supported under NOMMU conditions
87443 */
87444@@ -1561,6 +1551,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
87445
87446 /* most fields are the same, copy all, and then fixup */
87447 *new = *vma;
87448+ INIT_LIST_HEAD(&new->anon_vma_chain);
87449 *region = *vma->vm_region;
87450 new->vm_region = region;
87451
87452@@ -1995,8 +1986,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
87453 }
87454 EXPORT_SYMBOL(generic_file_remap_pages);
87455
87456-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87457- unsigned long addr, void *buf, int len, int write)
87458+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87459+ unsigned long addr, void *buf, size_t len, int write)
87460 {
87461 struct vm_area_struct *vma;
87462
87463@@ -2037,8 +2028,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
87464 *
87465 * The caller must hold a reference on @mm.
87466 */
87467-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
87468- void *buf, int len, int write)
87469+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
87470+ void *buf, size_t len, int write)
87471 {
87472 return __access_remote_vm(NULL, mm, addr, buf, len, write);
87473 }
87474@@ -2047,7 +2038,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
87475 * Access another process' address space.
87476 * - source/target buffer must be kernel space
87477 */
87478-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
87479+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
87480 {
87481 struct mm_struct *mm;
87482
87483diff --git a/mm/page-writeback.c b/mm/page-writeback.c
87484index 4514ad7..92eaa1c 100644
87485--- a/mm/page-writeback.c
87486+++ b/mm/page-writeback.c
87487@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
87488 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
87489 * - the bdi dirty thresh drops quickly due to change of JBOD workload
87490 */
87491-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
87492+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
87493 unsigned long thresh,
87494 unsigned long bg_thresh,
87495 unsigned long dirty,
87496@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
87497 }
87498 }
87499
87500-static struct notifier_block __cpuinitdata ratelimit_nb = {
87501+static struct notifier_block ratelimit_nb = {
87502 .notifier_call = ratelimit_handler,
87503 .next = NULL,
87504 };
87505diff --git a/mm/page_alloc.c b/mm/page_alloc.c
87506index 2ee0fd3..6e2edfb 100644
87507--- a/mm/page_alloc.c
87508+++ b/mm/page_alloc.c
87509@@ -60,6 +60,7 @@
87510 #include <linux/page-debug-flags.h>
87511 #include <linux/hugetlb.h>
87512 #include <linux/sched/rt.h>
87513+#include <linux/random.h>
87514
87515 #include <asm/tlbflush.h>
87516 #include <asm/div64.h>
87517@@ -345,7 +346,7 @@ out:
87518 * This usage means that zero-order pages may not be compound.
87519 */
87520
87521-static void free_compound_page(struct page *page)
87522+void free_compound_page(struct page *page)
87523 {
87524 __free_pages_ok(page, compound_order(page));
87525 }
87526@@ -702,6 +703,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
87527 int i;
87528 int bad = 0;
87529
87530+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87531+ unsigned long index = 1UL << order;
87532+#endif
87533+
87534 trace_mm_page_free(page, order);
87535 kmemcheck_free_shadow(page, order);
87536
87537@@ -717,6 +722,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
87538 debug_check_no_obj_freed(page_address(page),
87539 PAGE_SIZE << order);
87540 }
87541+
87542+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87543+ for (; index; --index)
87544+ sanitize_highpage(page + index - 1);
87545+#endif
87546+
87547 arch_free_page(page, order);
87548 kernel_map_pages(page, 1 << order, 0);
87549
87550@@ -739,6 +750,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
87551 local_irq_restore(flags);
87552 }
87553
87554+#ifdef CONFIG_PAX_LATENT_ENTROPY
87555+bool __meminitdata extra_latent_entropy;
87556+
87557+static int __init setup_pax_extra_latent_entropy(char *str)
87558+{
87559+ extra_latent_entropy = true;
87560+ return 0;
87561+}
87562+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
87563+
87564+volatile u64 latent_entropy;
87565+#endif
87566+
87567 /*
87568 * Read access to zone->managed_pages is safe because it's unsigned long,
87569 * but we still need to serialize writers. Currently all callers of
87570@@ -761,6 +785,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
87571 set_page_count(p, 0);
87572 }
87573
87574+#ifdef CONFIG_PAX_LATENT_ENTROPY
87575+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
87576+ u64 hash = 0;
87577+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
87578+ const u64 *data = lowmem_page_address(page);
87579+
87580+ for (index = 0; index < end; index++)
87581+ hash ^= hash + data[index];
87582+ latent_entropy ^= hash;
87583+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
87584+ }
87585+#endif
87586+
87587 page_zone(page)->managed_pages += 1 << order;
87588 set_page_refcounted(page);
87589 __free_pages(page, order);
87590@@ -870,8 +907,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
87591 arch_alloc_page(page, order);
87592 kernel_map_pages(page, 1 << order, 1);
87593
87594+#ifndef CONFIG_PAX_MEMORY_SANITIZE
87595 if (gfp_flags & __GFP_ZERO)
87596 prep_zero_page(page, order, gfp_flags);
87597+#endif
87598
87599 if (order && (gfp_flags & __GFP_COMP))
87600 prep_compound_page(page, order);
87601diff --git a/mm/page_io.c b/mm/page_io.c
87602index a8a3ef4..7260a60 100644
87603--- a/mm/page_io.c
87604+++ b/mm/page_io.c
87605@@ -214,7 +214,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
87606 struct file *swap_file = sis->swap_file;
87607 struct address_space *mapping = swap_file->f_mapping;
87608 struct iovec iov = {
87609- .iov_base = kmap(page),
87610+ .iov_base = (void __force_user *)kmap(page),
87611 .iov_len = PAGE_SIZE,
87612 };
87613
87614diff --git a/mm/percpu.c b/mm/percpu.c
87615index 8c8e08f..73a5cda 100644
87616--- a/mm/percpu.c
87617+++ b/mm/percpu.c
87618@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
87619 static unsigned int pcpu_high_unit_cpu __read_mostly;
87620
87621 /* the address of the first chunk which starts with the kernel static area */
87622-void *pcpu_base_addr __read_mostly;
87623+void *pcpu_base_addr __read_only;
87624 EXPORT_SYMBOL_GPL(pcpu_base_addr);
87625
87626 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
87627diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
87628index fd26d04..0cea1b0 100644
87629--- a/mm/process_vm_access.c
87630+++ b/mm/process_vm_access.c
87631@@ -13,6 +13,7 @@
87632 #include <linux/uio.h>
87633 #include <linux/sched.h>
87634 #include <linux/highmem.h>
87635+#include <linux/security.h>
87636 #include <linux/ptrace.h>
87637 #include <linux/slab.h>
87638 #include <linux/syscalls.h>
87639@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
87640 size_t iov_l_curr_offset = 0;
87641 ssize_t iov_len;
87642
87643+ return -ENOSYS; // PaX: until properly audited
87644+
87645 /*
87646 * Work out how many pages of struct pages we're going to need
87647 * when eventually calling get_user_pages
87648 */
87649 for (i = 0; i < riovcnt; i++) {
87650 iov_len = rvec[i].iov_len;
87651- if (iov_len > 0) {
87652- nr_pages_iov = ((unsigned long)rvec[i].iov_base
87653- + iov_len)
87654- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
87655- / PAGE_SIZE + 1;
87656- nr_pages = max(nr_pages, nr_pages_iov);
87657- }
87658+ if (iov_len <= 0)
87659+ continue;
87660+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
87661+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
87662+ nr_pages = max(nr_pages, nr_pages_iov);
87663 }
87664
87665 if (nr_pages == 0)
87666@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
87667 goto free_proc_pages;
87668 }
87669
87670+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
87671+ rc = -EPERM;
87672+ goto put_task_struct;
87673+ }
87674+
87675 mm = mm_access(task, PTRACE_MODE_ATTACH);
87676 if (!mm || IS_ERR(mm)) {
87677 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
87678diff --git a/mm/rmap.c b/mm/rmap.c
87679index 6280da8..b5c090e 100644
87680--- a/mm/rmap.c
87681+++ b/mm/rmap.c
87682@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
87683 struct anon_vma *anon_vma = vma->anon_vma;
87684 struct anon_vma_chain *avc;
87685
87686+#ifdef CONFIG_PAX_SEGMEXEC
87687+ struct anon_vma_chain *avc_m = NULL;
87688+#endif
87689+
87690 might_sleep();
87691 if (unlikely(!anon_vma)) {
87692 struct mm_struct *mm = vma->vm_mm;
87693@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
87694 if (!avc)
87695 goto out_enomem;
87696
87697+#ifdef CONFIG_PAX_SEGMEXEC
87698+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
87699+ if (!avc_m)
87700+ goto out_enomem_free_avc;
87701+#endif
87702+
87703 anon_vma = find_mergeable_anon_vma(vma);
87704 allocated = NULL;
87705 if (!anon_vma) {
87706@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
87707 /* page_table_lock to protect against threads */
87708 spin_lock(&mm->page_table_lock);
87709 if (likely(!vma->anon_vma)) {
87710+
87711+#ifdef CONFIG_PAX_SEGMEXEC
87712+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
87713+
87714+ if (vma_m) {
87715+ BUG_ON(vma_m->anon_vma);
87716+ vma_m->anon_vma = anon_vma;
87717+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
87718+ avc_m = NULL;
87719+ }
87720+#endif
87721+
87722 vma->anon_vma = anon_vma;
87723 anon_vma_chain_link(vma, avc, anon_vma);
87724 allocated = NULL;
87725@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
87726
87727 if (unlikely(allocated))
87728 put_anon_vma(allocated);
87729+
87730+#ifdef CONFIG_PAX_SEGMEXEC
87731+ if (unlikely(avc_m))
87732+ anon_vma_chain_free(avc_m);
87733+#endif
87734+
87735 if (unlikely(avc))
87736 anon_vma_chain_free(avc);
87737 }
87738 return 0;
87739
87740 out_enomem_free_avc:
87741+
87742+#ifdef CONFIG_PAX_SEGMEXEC
87743+ if (avc_m)
87744+ anon_vma_chain_free(avc_m);
87745+#endif
87746+
87747 anon_vma_chain_free(avc);
87748 out_enomem:
87749 return -ENOMEM;
87750@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
87751 * Attach the anon_vmas from src to dst.
87752 * Returns 0 on success, -ENOMEM on failure.
87753 */
87754-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
87755+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
87756 {
87757 struct anon_vma_chain *avc, *pavc;
87758 struct anon_vma *root = NULL;
87759@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
87760 * the corresponding VMA in the parent process is attached to.
87761 * Returns 0 on success, non-zero on failure.
87762 */
87763-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
87764+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
87765 {
87766 struct anon_vma_chain *avc;
87767 struct anon_vma *anon_vma;
87768@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
87769 void __init anon_vma_init(void)
87770 {
87771 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
87772- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
87773- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
87774+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
87775+ anon_vma_ctor);
87776+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
87777+ SLAB_PANIC|SLAB_NO_SANITIZE);
87778 }
87779
87780 /*
87781diff --git a/mm/shmem.c b/mm/shmem.c
87782index 5e6a842..b41916e 100644
87783--- a/mm/shmem.c
87784+++ b/mm/shmem.c
87785@@ -33,7 +33,7 @@
87786 #include <linux/swap.h>
87787 #include <linux/aio.h>
87788
87789-static struct vfsmount *shm_mnt;
87790+struct vfsmount *shm_mnt;
87791
87792 #ifdef CONFIG_SHMEM
87793 /*
87794@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
87795 #define BOGO_DIRENT_SIZE 20
87796
87797 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
87798-#define SHORT_SYMLINK_LEN 128
87799+#define SHORT_SYMLINK_LEN 64
87800
87801 /*
87802 * shmem_fallocate and shmem_writepage communicate via inode->i_private
87803@@ -2203,6 +2203,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
87804 static int shmem_xattr_validate(const char *name)
87805 {
87806 struct { const char *prefix; size_t len; } arr[] = {
87807+
87808+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
87809+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
87810+#endif
87811+
87812 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
87813 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
87814 };
87815@@ -2258,6 +2263,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
87816 if (err)
87817 return err;
87818
87819+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
87820+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
87821+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
87822+ return -EOPNOTSUPP;
87823+ if (size > 8)
87824+ return -EINVAL;
87825+ }
87826+#endif
87827+
87828 return simple_xattr_set(&info->xattrs, name, value, size, flags);
87829 }
87830
87831@@ -2570,8 +2584,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
87832 int err = -ENOMEM;
87833
87834 /* Round up to L1_CACHE_BYTES to resist false sharing */
87835- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
87836- L1_CACHE_BYTES), GFP_KERNEL);
87837+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
87838 if (!sbinfo)
87839 return -ENOMEM;
87840
87841diff --git a/mm/slab.c b/mm/slab.c
87842index bd88411..2d46fd6 100644
87843--- a/mm/slab.c
87844+++ b/mm/slab.c
87845@@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
87846 if ((x)->max_freeable < i) \
87847 (x)->max_freeable = i; \
87848 } while (0)
87849-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
87850-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
87851-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
87852-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
87853+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
87854+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
87855+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
87856+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
87857+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
87858+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
87859 #else
87860 #define STATS_INC_ACTIVE(x) do { } while (0)
87861 #define STATS_DEC_ACTIVE(x) do { } while (0)
87862@@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
87863 #define STATS_INC_ALLOCMISS(x) do { } while (0)
87864 #define STATS_INC_FREEHIT(x) do { } while (0)
87865 #define STATS_INC_FREEMISS(x) do { } while (0)
87866+#define STATS_INC_SANITIZED(x) do { } while (0)
87867+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
87868 #endif
87869
87870 #if DEBUG
87871@@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
87872 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
87873 */
87874 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
87875- const struct slab *slab, void *obj)
87876+ const struct slab *slab, const void *obj)
87877 {
87878 u32 offset = (obj - slab->s_mem);
87879 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
87880@@ -1384,7 +1388,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
87881 return notifier_from_errno(err);
87882 }
87883
87884-static struct notifier_block __cpuinitdata cpucache_notifier = {
87885+static struct notifier_block cpucache_notifier = {
87886 &cpuup_callback, NULL, 0
87887 };
87888
87889@@ -1565,12 +1569,12 @@ void __init kmem_cache_init(void)
87890 */
87891
87892 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
87893- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
87894+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
87895
87896 if (INDEX_AC != INDEX_NODE)
87897 kmalloc_caches[INDEX_NODE] =
87898 create_kmalloc_cache("kmalloc-node",
87899- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
87900+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
87901
87902 slab_early_init = 0;
87903
87904@@ -3583,6 +3587,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
87905 struct array_cache *ac = cpu_cache_get(cachep);
87906
87907 check_irq_off();
87908+
87909+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87910+ if (pax_sanitize_slab) {
87911+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
87912+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
87913+
87914+ if (cachep->ctor)
87915+ cachep->ctor(objp);
87916+
87917+ STATS_INC_SANITIZED(cachep);
87918+ } else
87919+ STATS_INC_NOT_SANITIZED(cachep);
87920+ }
87921+#endif
87922+
87923 kmemleak_free_recursive(objp, cachep->flags);
87924 objp = cache_free_debugcheck(cachep, objp, caller);
87925
87926@@ -3800,6 +3819,7 @@ void kfree(const void *objp)
87927
87928 if (unlikely(ZERO_OR_NULL_PTR(objp)))
87929 return;
87930+ VM_BUG_ON(!virt_addr_valid(objp));
87931 local_irq_save(flags);
87932 kfree_debugcheck(objp);
87933 c = virt_to_cache(objp);
87934@@ -4241,14 +4261,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
87935 }
87936 /* cpu stats */
87937 {
87938- unsigned long allochit = atomic_read(&cachep->allochit);
87939- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
87940- unsigned long freehit = atomic_read(&cachep->freehit);
87941- unsigned long freemiss = atomic_read(&cachep->freemiss);
87942+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
87943+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
87944+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
87945+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
87946
87947 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
87948 allochit, allocmiss, freehit, freemiss);
87949 }
87950+#ifdef CONFIG_PAX_MEMORY_SANITIZE
87951+ {
87952+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
87953+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
87954+
87955+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
87956+ }
87957+#endif
87958 #endif
87959 }
87960
87961@@ -4476,13 +4504,71 @@ static const struct file_operations proc_slabstats_operations = {
87962 static int __init slab_proc_init(void)
87963 {
87964 #ifdef CONFIG_DEBUG_SLAB_LEAK
87965- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
87966+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
87967 #endif
87968 return 0;
87969 }
87970 module_init(slab_proc_init);
87971 #endif
87972
87973+bool is_usercopy_object(const void *ptr)
87974+{
87975+ struct page *page;
87976+ struct kmem_cache *cachep;
87977+
87978+ if (ZERO_OR_NULL_PTR(ptr))
87979+ return false;
87980+
87981+ if (!slab_is_available())
87982+ return false;
87983+
87984+ if (!virt_addr_valid(ptr))
87985+ return false;
87986+
87987+ page = virt_to_head_page(ptr);
87988+
87989+ if (!PageSlab(page))
87990+ return false;
87991+
87992+ cachep = page->slab_cache;
87993+ return cachep->flags & SLAB_USERCOPY;
87994+}
87995+
87996+#ifdef CONFIG_PAX_USERCOPY
87997+const char *check_heap_object(const void *ptr, unsigned long n)
87998+{
87999+ struct page *page;
88000+ struct kmem_cache *cachep;
88001+ struct slab *slabp;
88002+ unsigned int objnr;
88003+ unsigned long offset;
88004+
88005+ if (ZERO_OR_NULL_PTR(ptr))
88006+ return "<null>";
88007+
88008+ if (!virt_addr_valid(ptr))
88009+ return NULL;
88010+
88011+ page = virt_to_head_page(ptr);
88012+
88013+ if (!PageSlab(page))
88014+ return NULL;
88015+
88016+ cachep = page->slab_cache;
88017+ if (!(cachep->flags & SLAB_USERCOPY))
88018+ return cachep->name;
88019+
88020+ slabp = page->slab_page;
88021+ objnr = obj_to_index(cachep, slabp, ptr);
88022+ BUG_ON(objnr >= cachep->num);
88023+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
88024+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
88025+ return NULL;
88026+
88027+ return cachep->name;
88028+}
88029+#endif
88030+
88031 /**
88032 * ksize - get the actual amount of memory allocated for a given object
88033 * @objp: Pointer to the object
88034diff --git a/mm/slab.h b/mm/slab.h
88035index f96b49e..db1d204 100644
88036--- a/mm/slab.h
88037+++ b/mm/slab.h
88038@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
88039 /* The slab cache that manages slab cache information */
88040 extern struct kmem_cache *kmem_cache;
88041
88042+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88043+#ifdef CONFIG_X86_64
88044+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
88045+#else
88046+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
88047+#endif
88048+extern bool pax_sanitize_slab;
88049+#endif
88050+
88051 unsigned long calculate_alignment(unsigned long flags,
88052 unsigned long align, unsigned long size);
88053
88054@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
88055
88056 /* Legal flag mask for kmem_cache_create(), for various configurations */
88057 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
88058- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
88059+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
88060+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
88061
88062 #if defined(CONFIG_DEBUG_SLAB)
88063 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
88064@@ -229,6 +239,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
88065 return s;
88066
88067 page = virt_to_head_page(x);
88068+
88069+ BUG_ON(!PageSlab(page));
88070+
88071 cachep = page->slab_cache;
88072 if (slab_equal_or_root(cachep, s))
88073 return cachep;
88074diff --git a/mm/slab_common.c b/mm/slab_common.c
88075index 2d41450..4efe6ee 100644
88076--- a/mm/slab_common.c
88077+++ b/mm/slab_common.c
88078@@ -22,11 +22,22 @@
88079
88080 #include "slab.h"
88081
88082-enum slab_state slab_state;
88083+enum slab_state slab_state __read_only;
88084 LIST_HEAD(slab_caches);
88085 DEFINE_MUTEX(slab_mutex);
88086 struct kmem_cache *kmem_cache;
88087
88088+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88089+bool pax_sanitize_slab __read_only = true;
88090+static int __init pax_sanitize_slab_setup(char *str)
88091+{
88092+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
88093+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
88094+ return 1;
88095+}
88096+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
88097+#endif
88098+
88099 #ifdef CONFIG_DEBUG_VM
88100 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
88101 size_t size)
88102@@ -209,7 +220,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
88103
88104 err = __kmem_cache_create(s, flags);
88105 if (!err) {
88106- s->refcount = 1;
88107+ atomic_set(&s->refcount, 1);
88108 list_add(&s->list, &slab_caches);
88109 memcg_cache_list_add(memcg, s);
88110 } else {
88111@@ -255,8 +266,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
88112
88113 get_online_cpus();
88114 mutex_lock(&slab_mutex);
88115- s->refcount--;
88116- if (!s->refcount) {
88117+ if (atomic_dec_and_test(&s->refcount)) {
88118 list_del(&s->list);
88119
88120 if (!__kmem_cache_shutdown(s)) {
88121@@ -302,7 +312,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
88122 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
88123 name, size, err);
88124
88125- s->refcount = -1; /* Exempt from merging for now */
88126+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
88127 }
88128
88129 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
88130@@ -315,7 +325,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
88131
88132 create_boot_cache(s, name, size, flags);
88133 list_add(&s->list, &slab_caches);
88134- s->refcount = 1;
88135+ atomic_set(&s->refcount, 1);
88136 return s;
88137 }
88138
88139@@ -327,6 +337,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
88140 EXPORT_SYMBOL(kmalloc_dma_caches);
88141 #endif
88142
88143+#ifdef CONFIG_PAX_USERCOPY_SLABS
88144+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
88145+EXPORT_SYMBOL(kmalloc_usercopy_caches);
88146+#endif
88147+
88148 /*
88149 * Conversion table for small slabs sizes / 8 to the index in the
88150 * kmalloc array. This is necessary for slabs < 192 since we have non power
88151@@ -391,6 +406,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
88152 return kmalloc_dma_caches[index];
88153
88154 #endif
88155+
88156+#ifdef CONFIG_PAX_USERCOPY_SLABS
88157+ if (unlikely((flags & GFP_USERCOPY)))
88158+ return kmalloc_usercopy_caches[index];
88159+
88160+#endif
88161+
88162 return kmalloc_caches[index];
88163 }
88164
88165@@ -447,7 +469,7 @@ void __init create_kmalloc_caches(unsigned long flags)
88166 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
88167 if (!kmalloc_caches[i]) {
88168 kmalloc_caches[i] = create_kmalloc_cache(NULL,
88169- 1 << i, flags);
88170+ 1 << i, SLAB_USERCOPY | flags);
88171 }
88172
88173 /*
88174@@ -456,10 +478,10 @@ void __init create_kmalloc_caches(unsigned long flags)
88175 * earlier power of two caches
88176 */
88177 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
88178- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
88179+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
88180
88181 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
88182- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
88183+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
88184 }
88185
88186 /* Kmalloc array is now usable */
88187@@ -492,6 +514,23 @@ void __init create_kmalloc_caches(unsigned long flags)
88188 }
88189 }
88190 #endif
88191+
88192+#ifdef CONFIG_PAX_USERCOPY_SLABS
88193+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
88194+ struct kmem_cache *s = kmalloc_caches[i];
88195+
88196+ if (s) {
88197+ int size = kmalloc_size(i);
88198+ char *n = kasprintf(GFP_NOWAIT,
88199+ "usercopy-kmalloc-%d", size);
88200+
88201+ BUG_ON(!n);
88202+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
88203+ size, SLAB_USERCOPY | flags);
88204+ }
88205+ }
88206+#endif
88207+
88208 }
88209 #endif /* !CONFIG_SLOB */
88210
88211@@ -516,6 +555,9 @@ void print_slabinfo_header(struct seq_file *m)
88212 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
88213 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
88214 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
88215+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88216+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
88217+#endif
88218 #endif
88219 seq_putc(m, '\n');
88220 }
88221diff --git a/mm/slob.c b/mm/slob.c
88222index eeed4a0..bb0e9ab 100644
88223--- a/mm/slob.c
88224+++ b/mm/slob.c
88225@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
88226 /*
88227 * Return the size of a slob block.
88228 */
88229-static slobidx_t slob_units(slob_t *s)
88230+static slobidx_t slob_units(const slob_t *s)
88231 {
88232 if (s->units > 0)
88233 return s->units;
88234@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
88235 /*
88236 * Return the next free slob block pointer after this one.
88237 */
88238-static slob_t *slob_next(slob_t *s)
88239+static slob_t *slob_next(const slob_t *s)
88240 {
88241 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
88242 slobidx_t next;
88243@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
88244 /*
88245 * Returns true if s is the last free block in its page.
88246 */
88247-static int slob_last(slob_t *s)
88248+static int slob_last(const slob_t *s)
88249 {
88250 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
88251 }
88252
88253-static void *slob_new_pages(gfp_t gfp, int order, int node)
88254+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
88255 {
88256- void *page;
88257+ struct page *page;
88258
88259 #ifdef CONFIG_NUMA
88260 if (node != NUMA_NO_NODE)
88261@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
88262 if (!page)
88263 return NULL;
88264
88265- return page_address(page);
88266+ __SetPageSlab(page);
88267+ return page;
88268 }
88269
88270-static void slob_free_pages(void *b, int order)
88271+static void slob_free_pages(struct page *sp, int order)
88272 {
88273 if (current->reclaim_state)
88274 current->reclaim_state->reclaimed_slab += 1 << order;
88275- free_pages((unsigned long)b, order);
88276+ __ClearPageSlab(sp);
88277+ page_mapcount_reset(sp);
88278+ sp->private = 0;
88279+ __free_pages(sp, order);
88280 }
88281
88282 /*
88283@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
88284
88285 /* Not enough space: must allocate a new page */
88286 if (!b) {
88287- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
88288- if (!b)
88289+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
88290+ if (!sp)
88291 return NULL;
88292- sp = virt_to_page(b);
88293- __SetPageSlab(sp);
88294+ b = page_address(sp);
88295
88296 spin_lock_irqsave(&slob_lock, flags);
88297 sp->units = SLOB_UNITS(PAGE_SIZE);
88298 sp->freelist = b;
88299+ sp->private = 0;
88300 INIT_LIST_HEAD(&sp->list);
88301 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
88302 set_slob_page_free(sp, slob_list);
88303@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
88304 if (slob_page_free(sp))
88305 clear_slob_page_free(sp);
88306 spin_unlock_irqrestore(&slob_lock, flags);
88307- __ClearPageSlab(sp);
88308- page_mapcount_reset(sp);
88309- slob_free_pages(b, 0);
88310+ slob_free_pages(sp, 0);
88311 return;
88312 }
88313
88314+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88315+ if (pax_sanitize_slab)
88316+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
88317+#endif
88318+
88319 if (!slob_page_free(sp)) {
88320 /* This slob page is about to become partially free. Easy! */
88321 sp->units = units;
88322@@ -424,11 +431,10 @@ out:
88323 */
88324
88325 static __always_inline void *
88326-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
88327+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
88328 {
88329- unsigned int *m;
88330- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
88331- void *ret;
88332+ slob_t *m;
88333+ void *ret = NULL;
88334
88335 gfp &= gfp_allowed_mask;
88336
88337@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
88338
88339 if (!m)
88340 return NULL;
88341- *m = size;
88342+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
88343+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
88344+ m[0].units = size;
88345+ m[1].units = align;
88346 ret = (void *)m + align;
88347
88348 trace_kmalloc_node(caller, ret,
88349 size, size + align, gfp, node);
88350 } else {
88351 unsigned int order = get_order(size);
88352+ struct page *page;
88353
88354 if (likely(order))
88355 gfp |= __GFP_COMP;
88356- ret = slob_new_pages(gfp, order, node);
88357+ page = slob_new_pages(gfp, order, node);
88358+ if (page) {
88359+ ret = page_address(page);
88360+ page->private = size;
88361+ }
88362
88363 trace_kmalloc_node(caller, ret,
88364 size, PAGE_SIZE << order, gfp, node);
88365 }
88366
88367- kmemleak_alloc(ret, size, 1, gfp);
88368+ return ret;
88369+}
88370+
88371+static __always_inline void *
88372+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
88373+{
88374+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
88375+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
88376+
88377+ if (!ZERO_OR_NULL_PTR(ret))
88378+ kmemleak_alloc(ret, size, 1, gfp);
88379 return ret;
88380 }
88381
88382@@ -493,34 +517,112 @@ void kfree(const void *block)
88383 return;
88384 kmemleak_free(block);
88385
88386+ VM_BUG_ON(!virt_addr_valid(block));
88387 sp = virt_to_page(block);
88388- if (PageSlab(sp)) {
88389+ VM_BUG_ON(!PageSlab(sp));
88390+ if (!sp->private) {
88391 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
88392- unsigned int *m = (unsigned int *)(block - align);
88393- slob_free(m, *m + align);
88394- } else
88395+ slob_t *m = (slob_t *)(block - align);
88396+ slob_free(m, m[0].units + align);
88397+ } else {
88398+ __ClearPageSlab(sp);
88399+ page_mapcount_reset(sp);
88400+ sp->private = 0;
88401 __free_pages(sp, compound_order(sp));
88402+ }
88403 }
88404 EXPORT_SYMBOL(kfree);
88405
88406+bool is_usercopy_object(const void *ptr)
88407+{
88408+ if (!slab_is_available())
88409+ return false;
88410+
88411+ // PAX: TODO
88412+
88413+ return false;
88414+}
88415+
88416+#ifdef CONFIG_PAX_USERCOPY
88417+const char *check_heap_object(const void *ptr, unsigned long n)
88418+{
88419+ struct page *page;
88420+ const slob_t *free;
88421+ const void *base;
88422+ unsigned long flags;
88423+
88424+ if (ZERO_OR_NULL_PTR(ptr))
88425+ return "<null>";
88426+
88427+ if (!virt_addr_valid(ptr))
88428+ return NULL;
88429+
88430+ page = virt_to_head_page(ptr);
88431+ if (!PageSlab(page))
88432+ return NULL;
88433+
88434+ if (page->private) {
88435+ base = page;
88436+ if (base <= ptr && n <= page->private - (ptr - base))
88437+ return NULL;
88438+ return "<slob>";
88439+ }
88440+
88441+ /* some tricky double walking to find the chunk */
88442+ spin_lock_irqsave(&slob_lock, flags);
88443+ base = (void *)((unsigned long)ptr & PAGE_MASK);
88444+ free = page->freelist;
88445+
88446+ while (!slob_last(free) && (void *)free <= ptr) {
88447+ base = free + slob_units(free);
88448+ free = slob_next(free);
88449+ }
88450+
88451+ while (base < (void *)free) {
88452+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
88453+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
88454+ int offset;
88455+
88456+ if (ptr < base + align)
88457+ break;
88458+
88459+ offset = ptr - base - align;
88460+ if (offset >= m) {
88461+ base += size;
88462+ continue;
88463+ }
88464+
88465+ if (n > m - offset)
88466+ break;
88467+
88468+ spin_unlock_irqrestore(&slob_lock, flags);
88469+ return NULL;
88470+ }
88471+
88472+ spin_unlock_irqrestore(&slob_lock, flags);
88473+ return "<slob>";
88474+}
88475+#endif
88476+
88477 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
88478 size_t ksize(const void *block)
88479 {
88480 struct page *sp;
88481 int align;
88482- unsigned int *m;
88483+ slob_t *m;
88484
88485 BUG_ON(!block);
88486 if (unlikely(block == ZERO_SIZE_PTR))
88487 return 0;
88488
88489 sp = virt_to_page(block);
88490- if (unlikely(!PageSlab(sp)))
88491- return PAGE_SIZE << compound_order(sp);
88492+ VM_BUG_ON(!PageSlab(sp));
88493+ if (sp->private)
88494+ return sp->private;
88495
88496 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
88497- m = (unsigned int *)(block - align);
88498- return SLOB_UNITS(*m) * SLOB_UNIT;
88499+ m = (slob_t *)(block - align);
88500+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
88501 }
88502 EXPORT_SYMBOL(ksize);
88503
88504@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
88505
88506 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
88507 {
88508- void *b;
88509+ void *b = NULL;
88510
88511 flags &= gfp_allowed_mask;
88512
88513 lockdep_trace_alloc(flags);
88514
88515+#ifdef CONFIG_PAX_USERCOPY_SLABS
88516+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
88517+#else
88518 if (c->size < PAGE_SIZE) {
88519 b = slob_alloc(c->size, flags, c->align, node);
88520 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
88521 SLOB_UNITS(c->size) * SLOB_UNIT,
88522 flags, node);
88523 } else {
88524- b = slob_new_pages(flags, get_order(c->size), node);
88525+ struct page *sp;
88526+
88527+ sp = slob_new_pages(flags, get_order(c->size), node);
88528+ if (sp) {
88529+ b = page_address(sp);
88530+ sp->private = c->size;
88531+ }
88532 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
88533 PAGE_SIZE << get_order(c->size),
88534 flags, node);
88535 }
88536+#endif
88537
88538 if (c->ctor)
88539 c->ctor(b);
88540@@ -564,10 +676,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
88541
88542 static void __kmem_cache_free(void *b, int size)
88543 {
88544- if (size < PAGE_SIZE)
88545+ struct page *sp;
88546+
88547+ sp = virt_to_page(b);
88548+ BUG_ON(!PageSlab(sp));
88549+ if (!sp->private)
88550 slob_free(b, size);
88551 else
88552- slob_free_pages(b, get_order(size));
88553+ slob_free_pages(sp, get_order(size));
88554 }
88555
88556 static void kmem_rcu_free(struct rcu_head *head)
88557@@ -580,17 +696,31 @@ static void kmem_rcu_free(struct rcu_head *head)
88558
88559 void kmem_cache_free(struct kmem_cache *c, void *b)
88560 {
88561+ int size = c->size;
88562+
88563+#ifdef CONFIG_PAX_USERCOPY_SLABS
88564+ if (size + c->align < PAGE_SIZE) {
88565+ size += c->align;
88566+ b -= c->align;
88567+ }
88568+#endif
88569+
88570 kmemleak_free_recursive(b, c->flags);
88571 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
88572 struct slob_rcu *slob_rcu;
88573- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
88574- slob_rcu->size = c->size;
88575+ slob_rcu = b + (size - sizeof(struct slob_rcu));
88576+ slob_rcu->size = size;
88577 call_rcu(&slob_rcu->head, kmem_rcu_free);
88578 } else {
88579- __kmem_cache_free(b, c->size);
88580+ __kmem_cache_free(b, size);
88581 }
88582
88583+#ifdef CONFIG_PAX_USERCOPY_SLABS
88584+ trace_kfree(_RET_IP_, b);
88585+#else
88586 trace_kmem_cache_free(_RET_IP_, b);
88587+#endif
88588+
88589 }
88590 EXPORT_SYMBOL(kmem_cache_free);
88591
88592diff --git a/mm/slub.c b/mm/slub.c
88593index 57707f0..7857bd3 100644
88594--- a/mm/slub.c
88595+++ b/mm/slub.c
88596@@ -198,7 +198,7 @@ struct track {
88597
88598 enum track_item { TRACK_ALLOC, TRACK_FREE };
88599
88600-#ifdef CONFIG_SYSFS
88601+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88602 static int sysfs_slab_add(struct kmem_cache *);
88603 static int sysfs_slab_alias(struct kmem_cache *, const char *);
88604 static void sysfs_slab_remove(struct kmem_cache *);
88605@@ -519,7 +519,7 @@ static void print_track(const char *s, struct track *t)
88606 if (!t->addr)
88607 return;
88608
88609- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
88610+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
88611 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
88612 #ifdef CONFIG_STACKTRACE
88613 {
88614@@ -2594,6 +2594,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
88615
88616 slab_free_hook(s, x);
88617
88618+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88619+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
88620+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
88621+ if (s->ctor)
88622+ s->ctor(x);
88623+ }
88624+#endif
88625+
88626 redo:
88627 /*
88628 * Determine the currently cpus per cpu slab.
88629@@ -2661,7 +2669,7 @@ static int slub_min_objects;
88630 * Merge control. If this is set then no merging of slab caches will occur.
88631 * (Could be removed. This was introduced to pacify the merge skeptics.)
88632 */
88633-static int slub_nomerge;
88634+static int slub_nomerge = 1;
88635
88636 /*
88637 * Calculate the order of allocation given an slab object size.
88638@@ -2938,6 +2946,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
88639 s->inuse = size;
88640
88641 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
88642+#ifdef CONFIG_PAX_MEMORY_SANITIZE
88643+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
88644+#endif
88645 s->ctor)) {
88646 /*
88647 * Relocate free pointer after the object if it is not
88648@@ -3283,6 +3294,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
88649 EXPORT_SYMBOL(__kmalloc_node);
88650 #endif
88651
88652+bool is_usercopy_object(const void *ptr)
88653+{
88654+ struct page *page;
88655+ struct kmem_cache *s;
88656+
88657+ if (ZERO_OR_NULL_PTR(ptr))
88658+ return false;
88659+
88660+ if (!slab_is_available())
88661+ return false;
88662+
88663+ if (!virt_addr_valid(ptr))
88664+ return false;
88665+
88666+ page = virt_to_head_page(ptr);
88667+
88668+ if (!PageSlab(page))
88669+ return false;
88670+
88671+ s = page->slab_cache;
88672+ return s->flags & SLAB_USERCOPY;
88673+}
88674+
88675+#ifdef CONFIG_PAX_USERCOPY
88676+const char *check_heap_object(const void *ptr, unsigned long n)
88677+{
88678+ struct page *page;
88679+ struct kmem_cache *s;
88680+ unsigned long offset;
88681+
88682+ if (ZERO_OR_NULL_PTR(ptr))
88683+ return "<null>";
88684+
88685+ if (!virt_addr_valid(ptr))
88686+ return NULL;
88687+
88688+ page = virt_to_head_page(ptr);
88689+
88690+ if (!PageSlab(page))
88691+ return NULL;
88692+
88693+ s = page->slab_cache;
88694+ if (!(s->flags & SLAB_USERCOPY))
88695+ return s->name;
88696+
88697+ offset = (ptr - page_address(page)) % s->size;
88698+ if (offset <= s->object_size && n <= s->object_size - offset)
88699+ return NULL;
88700+
88701+ return s->name;
88702+}
88703+#endif
88704+
88705 size_t ksize(const void *object)
88706 {
88707 struct page *page;
88708@@ -3347,6 +3411,7 @@ void kfree(const void *x)
88709 if (unlikely(ZERO_OR_NULL_PTR(x)))
88710 return;
88711
88712+ VM_BUG_ON(!virt_addr_valid(x));
88713 page = virt_to_head_page(x);
88714 if (unlikely(!PageSlab(page))) {
88715 BUG_ON(!PageCompound(page));
88716@@ -3652,7 +3717,7 @@ static int slab_unmergeable(struct kmem_cache *s)
88717 /*
88718 * We may have set a slab to be unmergeable during bootstrap.
88719 */
88720- if (s->refcount < 0)
88721+ if (atomic_read(&s->refcount) < 0)
88722 return 1;
88723
88724 return 0;
88725@@ -3710,7 +3775,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
88726
88727 s = find_mergeable(memcg, size, align, flags, name, ctor);
88728 if (s) {
88729- s->refcount++;
88730+ atomic_inc(&s->refcount);
88731 /*
88732 * Adjust the object sizes so that we clear
88733 * the complete object on kzalloc.
88734@@ -3719,7 +3784,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
88735 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
88736
88737 if (sysfs_slab_alias(s, name)) {
88738- s->refcount--;
88739+ atomic_dec(&s->refcount);
88740 s = NULL;
88741 }
88742 }
88743@@ -3781,7 +3846,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
88744 return NOTIFY_OK;
88745 }
88746
88747-static struct notifier_block __cpuinitdata slab_notifier = {
88748+static struct notifier_block slab_notifier = {
88749 .notifier_call = slab_cpuup_callback
88750 };
88751
88752@@ -3839,7 +3904,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
88753 }
88754 #endif
88755
88756-#ifdef CONFIG_SYSFS
88757+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88758 static int count_inuse(struct page *page)
88759 {
88760 return page->inuse;
88761@@ -4226,12 +4291,12 @@ static void resiliency_test(void)
88762 validate_slab_cache(kmalloc_caches[9]);
88763 }
88764 #else
88765-#ifdef CONFIG_SYSFS
88766+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88767 static void resiliency_test(void) {};
88768 #endif
88769 #endif
88770
88771-#ifdef CONFIG_SYSFS
88772+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88773 enum slab_stat_type {
88774 SL_ALL, /* All slabs */
88775 SL_PARTIAL, /* Only partially allocated slabs */
88776@@ -4475,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
88777
88778 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
88779 {
88780- return sprintf(buf, "%d\n", s->refcount - 1);
88781+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
88782 }
88783 SLAB_ATTR_RO(aliases);
88784
88785@@ -4563,6 +4628,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
88786 SLAB_ATTR_RO(cache_dma);
88787 #endif
88788
88789+#ifdef CONFIG_PAX_USERCOPY_SLABS
88790+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
88791+{
88792+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
88793+}
88794+SLAB_ATTR_RO(usercopy);
88795+#endif
88796+
88797 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
88798 {
88799 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
88800@@ -4897,6 +4970,9 @@ static struct attribute *slab_attrs[] = {
88801 #ifdef CONFIG_ZONE_DMA
88802 &cache_dma_attr.attr,
88803 #endif
88804+#ifdef CONFIG_PAX_USERCOPY_SLABS
88805+ &usercopy_attr.attr,
88806+#endif
88807 #ifdef CONFIG_NUMA
88808 &remote_node_defrag_ratio_attr.attr,
88809 #endif
88810@@ -5128,6 +5204,7 @@ static char *create_unique_id(struct kmem_cache *s)
88811 return name;
88812 }
88813
88814+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88815 static int sysfs_slab_add(struct kmem_cache *s)
88816 {
88817 int err;
88818@@ -5151,7 +5228,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
88819 }
88820
88821 s->kobj.kset = slab_kset;
88822- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
88823+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
88824 if (err) {
88825 kobject_put(&s->kobj);
88826 return err;
88827@@ -5185,6 +5262,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
88828 kobject_del(&s->kobj);
88829 kobject_put(&s->kobj);
88830 }
88831+#endif
88832
88833 /*
88834 * Need to buffer aliases during bootup until sysfs becomes
88835@@ -5198,6 +5276,7 @@ struct saved_alias {
88836
88837 static struct saved_alias *alias_list;
88838
88839+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
88840 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
88841 {
88842 struct saved_alias *al;
88843@@ -5220,6 +5299,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
88844 alias_list = al;
88845 return 0;
88846 }
88847+#endif
88848
88849 static int __init slab_sysfs_init(void)
88850 {
88851diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
88852index 27eeab3..7c3f7f2 100644
88853--- a/mm/sparse-vmemmap.c
88854+++ b/mm/sparse-vmemmap.c
88855@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
88856 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
88857 if (!p)
88858 return NULL;
88859- pud_populate(&init_mm, pud, p);
88860+ pud_populate_kernel(&init_mm, pud, p);
88861 }
88862 return pud;
88863 }
88864@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
88865 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
88866 if (!p)
88867 return NULL;
88868- pgd_populate(&init_mm, pgd, p);
88869+ pgd_populate_kernel(&init_mm, pgd, p);
88870 }
88871 return pgd;
88872 }
88873diff --git a/mm/sparse.c b/mm/sparse.c
88874index 1c91f0d3..485470a 100644
88875--- a/mm/sparse.c
88876+++ b/mm/sparse.c
88877@@ -761,7 +761,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
88878
88879 for (i = 0; i < PAGES_PER_SECTION; i++) {
88880 if (PageHWPoison(&memmap[i])) {
88881- atomic_long_sub(1, &num_poisoned_pages);
88882+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
88883 ClearPageHWPoison(&memmap[i]);
88884 }
88885 }
88886diff --git a/mm/swap.c b/mm/swap.c
88887index dfd7d71..ccdf688 100644
88888--- a/mm/swap.c
88889+++ b/mm/swap.c
88890@@ -31,6 +31,7 @@
88891 #include <linux/memcontrol.h>
88892 #include <linux/gfp.h>
88893 #include <linux/uio.h>
88894+#include <linux/hugetlb.h>
88895
88896 #include "internal.h"
88897
88898@@ -73,6 +74,8 @@ static void __put_compound_page(struct page *page)
88899
88900 __page_cache_release(page);
88901 dtor = get_compound_page_dtor(page);
88902+ if (!PageHuge(page))
88903+ BUG_ON(dtor != free_compound_page);
88904 (*dtor)(page);
88905 }
88906
88907diff --git a/mm/swapfile.c b/mm/swapfile.c
88908index 746af55b..7ac94ae 100644
88909--- a/mm/swapfile.c
88910+++ b/mm/swapfile.c
88911@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
88912
88913 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
88914 /* Activity counter to indicate that a swapon or swapoff has occurred */
88915-static atomic_t proc_poll_event = ATOMIC_INIT(0);
88916+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
88917
88918 static inline unsigned char swap_count(unsigned char ent)
88919 {
88920@@ -1684,7 +1684,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
88921 }
88922 filp_close(swap_file, NULL);
88923 err = 0;
88924- atomic_inc(&proc_poll_event);
88925+ atomic_inc_unchecked(&proc_poll_event);
88926 wake_up_interruptible(&proc_poll_wait);
88927
88928 out_dput:
88929@@ -1701,8 +1701,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
88930
88931 poll_wait(file, &proc_poll_wait, wait);
88932
88933- if (seq->poll_event != atomic_read(&proc_poll_event)) {
88934- seq->poll_event = atomic_read(&proc_poll_event);
88935+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
88936+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
88937 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
88938 }
88939
88940@@ -1800,7 +1800,7 @@ static int swaps_open(struct inode *inode, struct file *file)
88941 return ret;
88942
88943 seq = file->private_data;
88944- seq->poll_event = atomic_read(&proc_poll_event);
88945+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
88946 return 0;
88947 }
88948
88949@@ -2143,7 +2143,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
88950 (frontswap_map) ? "FS" : "");
88951
88952 mutex_unlock(&swapon_mutex);
88953- atomic_inc(&proc_poll_event);
88954+ atomic_inc_unchecked(&proc_poll_event);
88955 wake_up_interruptible(&proc_poll_wait);
88956
88957 if (S_ISREG(inode->i_mode))
88958diff --git a/mm/util.c b/mm/util.c
88959index ab1424d..7c5bd5a 100644
88960--- a/mm/util.c
88961+++ b/mm/util.c
88962@@ -294,6 +294,12 @@ done:
88963 void arch_pick_mmap_layout(struct mm_struct *mm)
88964 {
88965 mm->mmap_base = TASK_UNMAPPED_BASE;
88966+
88967+#ifdef CONFIG_PAX_RANDMMAP
88968+ if (mm->pax_flags & MF_PAX_RANDMMAP)
88969+ mm->mmap_base += mm->delta_mmap;
88970+#endif
88971+
88972 mm->get_unmapped_area = arch_get_unmapped_area;
88973 mm->unmap_area = arch_unmap_area;
88974 }
88975diff --git a/mm/vmalloc.c b/mm/vmalloc.c
88976index d365724..6cae7c2 100644
88977--- a/mm/vmalloc.c
88978+++ b/mm/vmalloc.c
88979@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
88980
88981 pte = pte_offset_kernel(pmd, addr);
88982 do {
88983- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
88984- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
88985+
88986+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
88987+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
88988+ BUG_ON(!pte_exec(*pte));
88989+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
88990+ continue;
88991+ }
88992+#endif
88993+
88994+ {
88995+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
88996+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
88997+ }
88998 } while (pte++, addr += PAGE_SIZE, addr != end);
88999 }
89000
89001@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
89002 pte = pte_alloc_kernel(pmd, addr);
89003 if (!pte)
89004 return -ENOMEM;
89005+
89006+ pax_open_kernel();
89007 do {
89008 struct page *page = pages[*nr];
89009
89010- if (WARN_ON(!pte_none(*pte)))
89011+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89012+ if (pgprot_val(prot) & _PAGE_NX)
89013+#endif
89014+
89015+ if (!pte_none(*pte)) {
89016+ pax_close_kernel();
89017+ WARN_ON(1);
89018 return -EBUSY;
89019- if (WARN_ON(!page))
89020+ }
89021+ if (!page) {
89022+ pax_close_kernel();
89023+ WARN_ON(1);
89024 return -ENOMEM;
89025+ }
89026 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
89027 (*nr)++;
89028 } while (pte++, addr += PAGE_SIZE, addr != end);
89029+ pax_close_kernel();
89030 return 0;
89031 }
89032
89033@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
89034 pmd_t *pmd;
89035 unsigned long next;
89036
89037- pmd = pmd_alloc(&init_mm, pud, addr);
89038+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
89039 if (!pmd)
89040 return -ENOMEM;
89041 do {
89042@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
89043 pud_t *pud;
89044 unsigned long next;
89045
89046- pud = pud_alloc(&init_mm, pgd, addr);
89047+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
89048 if (!pud)
89049 return -ENOMEM;
89050 do {
89051@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
89052 if (addr >= MODULES_VADDR && addr < MODULES_END)
89053 return 1;
89054 #endif
89055+
89056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89057+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
89058+ return 1;
89059+#endif
89060+
89061 return is_vmalloc_addr(x);
89062 }
89063
89064@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
89065
89066 if (!pgd_none(*pgd)) {
89067 pud_t *pud = pud_offset(pgd, addr);
89068+#ifdef CONFIG_X86
89069+ if (!pud_large(*pud))
89070+#endif
89071 if (!pud_none(*pud)) {
89072 pmd_t *pmd = pmd_offset(pud, addr);
89073+#ifdef CONFIG_X86
89074+ if (!pmd_large(*pmd))
89075+#endif
89076 if (!pmd_none(*pmd)) {
89077 pte_t *ptep, pte;
89078
89079@@ -339,7 +375,7 @@ static void purge_vmap_area_lazy(void);
89080 * Allocate a region of KVA of the specified size and alignment, within the
89081 * vstart and vend.
89082 */
89083-static struct vmap_area *alloc_vmap_area(unsigned long size,
89084+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
89085 unsigned long align,
89086 unsigned long vstart, unsigned long vend,
89087 int node, gfp_t gfp_mask)
89088@@ -1337,6 +1373,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
89089 struct vm_struct *area;
89090
89091 BUG_ON(in_interrupt());
89092+
89093+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89094+ if (flags & VM_KERNEXEC) {
89095+ if (start != VMALLOC_START || end != VMALLOC_END)
89096+ return NULL;
89097+ start = (unsigned long)MODULES_EXEC_VADDR;
89098+ end = (unsigned long)MODULES_EXEC_END;
89099+ }
89100+#endif
89101+
89102 if (flags & VM_IOREMAP) {
89103 int bit = fls(size);
89104
89105@@ -1581,6 +1627,11 @@ void *vmap(struct page **pages, unsigned int count,
89106 if (count > totalram_pages)
89107 return NULL;
89108
89109+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89110+ if (!(pgprot_val(prot) & _PAGE_NX))
89111+ flags |= VM_KERNEXEC;
89112+#endif
89113+
89114 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
89115 __builtin_return_address(0));
89116 if (!area)
89117@@ -1682,6 +1733,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
89118 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
89119 goto fail;
89120
89121+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89122+ if (!(pgprot_val(prot) & _PAGE_NX))
89123+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
89124+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
89125+ else
89126+#endif
89127+
89128 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
89129 start, end, node, gfp_mask, caller);
89130 if (!area)
89131@@ -1858,10 +1916,9 @@ EXPORT_SYMBOL(vzalloc_node);
89132 * For tight control over page level allocator and protection flags
89133 * use __vmalloc() instead.
89134 */
89135-
89136 void *vmalloc_exec(unsigned long size)
89137 {
89138- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
89139+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
89140 NUMA_NO_NODE, __builtin_return_address(0));
89141 }
89142
89143@@ -2168,6 +2225,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
89144 unsigned long uaddr = vma->vm_start;
89145 unsigned long usize = vma->vm_end - vma->vm_start;
89146
89147+ BUG_ON(vma->vm_mirror);
89148+
89149 if ((PAGE_SIZE-1) & (unsigned long)addr)
89150 return -EINVAL;
89151
89152@@ -2629,7 +2688,11 @@ static int s_show(struct seq_file *m, void *p)
89153 v->addr, v->addr + v->size, v->size);
89154
89155 if (v->caller)
89156+#ifdef CONFIG_GRKERNSEC_HIDESYM
89157+ seq_printf(m, " %pK", v->caller);
89158+#else
89159 seq_printf(m, " %pS", v->caller);
89160+#endif
89161
89162 if (v->nr_pages)
89163 seq_printf(m, " pages=%d", v->nr_pages);
89164diff --git a/mm/vmstat.c b/mm/vmstat.c
89165index f42745e..62f8346 100644
89166--- a/mm/vmstat.c
89167+++ b/mm/vmstat.c
89168@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
89169 *
89170 * vm_stat contains the global counters
89171 */
89172-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
89173+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
89174 EXPORT_SYMBOL(vm_stat);
89175
89176 #ifdef CONFIG_SMP
89177@@ -452,7 +452,7 @@ void refresh_cpu_vm_stats(int cpu)
89178 v = p->vm_stat_diff[i];
89179 p->vm_stat_diff[i] = 0;
89180 local_irq_restore(flags);
89181- atomic_long_add(v, &zone->vm_stat[i]);
89182+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
89183 global_diff[i] += v;
89184 #ifdef CONFIG_NUMA
89185 /* 3 seconds idle till flush */
89186@@ -490,7 +490,7 @@ void refresh_cpu_vm_stats(int cpu)
89187
89188 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
89189 if (global_diff[i])
89190- atomic_long_add(global_diff[i], &vm_stat[i]);
89191+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
89192 }
89193
89194 /*
89195@@ -505,8 +505,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
89196 if (pset->vm_stat_diff[i]) {
89197 int v = pset->vm_stat_diff[i];
89198 pset->vm_stat_diff[i] = 0;
89199- atomic_long_add(v, &zone->vm_stat[i]);
89200- atomic_long_add(v, &vm_stat[i]);
89201+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
89202+ atomic_long_add_unchecked(v, &vm_stat[i]);
89203 }
89204 }
89205 #endif
89206@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
89207 return NOTIFY_OK;
89208 }
89209
89210-static struct notifier_block __cpuinitdata vmstat_notifier =
89211+static struct notifier_block vmstat_notifier =
89212 { &vmstat_cpuup_callback, NULL, 0 };
89213 #endif
89214
89215@@ -1241,10 +1241,20 @@ static int __init setup_vmstat(void)
89216 start_cpu_timer(cpu);
89217 #endif
89218 #ifdef CONFIG_PROC_FS
89219- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
89220- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
89221- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
89222- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
89223+ {
89224+ mode_t gr_mode = S_IRUGO;
89225+#ifdef CONFIG_GRKERNSEC_PROC_ADD
89226+ gr_mode = S_IRUSR;
89227+#endif
89228+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
89229+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
89230+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
89231+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
89232+#else
89233+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
89234+#endif
89235+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
89236+ }
89237 #endif
89238 return 0;
89239 }
89240diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
89241index 9424f37..6aabf19 100644
89242--- a/net/8021q/vlan.c
89243+++ b/net/8021q/vlan.c
89244@@ -469,7 +469,7 @@ out:
89245 return NOTIFY_DONE;
89246 }
89247
89248-static struct notifier_block vlan_notifier_block __read_mostly = {
89249+static struct notifier_block vlan_notifier_block = {
89250 .notifier_call = vlan_device_event,
89251 };
89252
89253@@ -544,8 +544,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
89254 err = -EPERM;
89255 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
89256 break;
89257- if ((args.u.name_type >= 0) &&
89258- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
89259+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
89260 struct vlan_net *vn;
89261
89262 vn = net_generic(net, vlan_net_id);
89263diff --git a/net/9p/mod.c b/net/9p/mod.c
89264index 6ab36ae..6f1841b 100644
89265--- a/net/9p/mod.c
89266+++ b/net/9p/mod.c
89267@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
89268 void v9fs_register_trans(struct p9_trans_module *m)
89269 {
89270 spin_lock(&v9fs_trans_lock);
89271- list_add_tail(&m->list, &v9fs_trans_list);
89272+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
89273 spin_unlock(&v9fs_trans_lock);
89274 }
89275 EXPORT_SYMBOL(v9fs_register_trans);
89276@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
89277 void v9fs_unregister_trans(struct p9_trans_module *m)
89278 {
89279 spin_lock(&v9fs_trans_lock);
89280- list_del_init(&m->list);
89281+ pax_list_del_init((struct list_head *)&m->list);
89282 spin_unlock(&v9fs_trans_lock);
89283 }
89284 EXPORT_SYMBOL(v9fs_unregister_trans);
89285diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
89286index 02efb25..41541a9 100644
89287--- a/net/9p/trans_fd.c
89288+++ b/net/9p/trans_fd.c
89289@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
89290 oldfs = get_fs();
89291 set_fs(get_ds());
89292 /* The cast to a user pointer is valid due to the set_fs() */
89293- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
89294+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
89295 set_fs(oldfs);
89296
89297 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
89298diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
89299index 876fbe8..8bbea9f 100644
89300--- a/net/atm/atm_misc.c
89301+++ b/net/atm/atm_misc.c
89302@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
89303 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
89304 return 1;
89305 atm_return(vcc, truesize);
89306- atomic_inc(&vcc->stats->rx_drop);
89307+ atomic_inc_unchecked(&vcc->stats->rx_drop);
89308 return 0;
89309 }
89310 EXPORT_SYMBOL(atm_charge);
89311@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
89312 }
89313 }
89314 atm_return(vcc, guess);
89315- atomic_inc(&vcc->stats->rx_drop);
89316+ atomic_inc_unchecked(&vcc->stats->rx_drop);
89317 return NULL;
89318 }
89319 EXPORT_SYMBOL(atm_alloc_charge);
89320@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
89321
89322 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
89323 {
89324-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
89325+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
89326 __SONET_ITEMS
89327 #undef __HANDLE_ITEM
89328 }
89329@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
89330
89331 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
89332 {
89333-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
89334+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
89335 __SONET_ITEMS
89336 #undef __HANDLE_ITEM
89337 }
89338diff --git a/net/atm/lec.h b/net/atm/lec.h
89339index 4149db1..f2ab682 100644
89340--- a/net/atm/lec.h
89341+++ b/net/atm/lec.h
89342@@ -48,7 +48,7 @@ struct lane2_ops {
89343 const u8 *tlvs, u32 sizeoftlvs);
89344 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
89345 const u8 *tlvs, u32 sizeoftlvs);
89346-};
89347+} __no_const;
89348
89349 /*
89350 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
89351diff --git a/net/atm/proc.c b/net/atm/proc.c
89352index bbb6461..cf04016 100644
89353--- a/net/atm/proc.c
89354+++ b/net/atm/proc.c
89355@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
89356 const struct k_atm_aal_stats *stats)
89357 {
89358 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
89359- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
89360- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
89361- atomic_read(&stats->rx_drop));
89362+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
89363+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
89364+ atomic_read_unchecked(&stats->rx_drop));
89365 }
89366
89367 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
89368diff --git a/net/atm/resources.c b/net/atm/resources.c
89369index 0447d5d..3cf4728 100644
89370--- a/net/atm/resources.c
89371+++ b/net/atm/resources.c
89372@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
89373 static void copy_aal_stats(struct k_atm_aal_stats *from,
89374 struct atm_aal_stats *to)
89375 {
89376-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
89377+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
89378 __AAL_STAT_ITEMS
89379 #undef __HANDLE_ITEM
89380 }
89381@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
89382 static void subtract_aal_stats(struct k_atm_aal_stats *from,
89383 struct atm_aal_stats *to)
89384 {
89385-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
89386+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
89387 __AAL_STAT_ITEMS
89388 #undef __HANDLE_ITEM
89389 }
89390diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
89391index d5744b7..506bae3 100644
89392--- a/net/ax25/sysctl_net_ax25.c
89393+++ b/net/ax25/sysctl_net_ax25.c
89394@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
89395 {
89396 char path[sizeof("net/ax25/") + IFNAMSIZ];
89397 int k;
89398- struct ctl_table *table;
89399+ ctl_table_no_const *table;
89400
89401 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
89402 if (!table)
89403diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
89404index f680ee1..97e3542 100644
89405--- a/net/batman-adv/bat_iv_ogm.c
89406+++ b/net/batman-adv/bat_iv_ogm.c
89407@@ -79,7 +79,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
89408
89409 /* randomize initial seqno to avoid collision */
89410 get_random_bytes(&random_seqno, sizeof(random_seqno));
89411- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
89412+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
89413
89414 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
89415 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
89416@@ -627,9 +627,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
89417 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
89418
89419 /* change sequence number to network order */
89420- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
89421+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
89422 batadv_ogm_packet->seqno = htonl(seqno);
89423- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
89424+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
89425
89426 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
89427 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
89428@@ -1037,7 +1037,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
89429 return;
89430
89431 /* could be changed by schedule_own_packet() */
89432- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
89433+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
89434
89435 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
89436 has_directlink_flag = 1;
89437diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
89438index de27b31..7058bfe 100644
89439--- a/net/batman-adv/bridge_loop_avoidance.c
89440+++ b/net/batman-adv/bridge_loop_avoidance.c
89441@@ -1522,6 +1522,8 @@ out:
89442 * in these cases, the skb is further handled by this function and
89443 * returns 1, otherwise it returns 0 and the caller shall further
89444 * process the skb.
89445+ *
89446+ * This call might reallocate skb data.
89447 */
89448 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
89449 {
89450diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
89451index f105219..7614af3 100644
89452--- a/net/batman-adv/gateway_client.c
89453+++ b/net/batman-adv/gateway_client.c
89454@@ -508,6 +508,7 @@ out:
89455 return 0;
89456 }
89457
89458+/* this call might reallocate skb data */
89459 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
89460 {
89461 int ret = false;
89462@@ -568,6 +569,7 @@ out:
89463 return ret;
89464 }
89465
89466+/* this call might reallocate skb data */
89467 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
89468 {
89469 struct ethhdr *ethhdr;
89470@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
89471
89472 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
89473 return false;
89474+
89475+ /* skb->data might have been reallocated by pskb_may_pull() */
89476+ ethhdr = (struct ethhdr *)skb->data;
89477+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
89478+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
89479+
89480 udphdr = (struct udphdr *)(skb->data + *header_len);
89481 *header_len += sizeof(*udphdr);
89482
89483@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
89484 return true;
89485 }
89486
89487+/* this call might reallocate skb data */
89488 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
89489- struct sk_buff *skb, struct ethhdr *ethhdr)
89490+ struct sk_buff *skb)
89491 {
89492 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
89493 struct batadv_orig_node *orig_dst_node = NULL;
89494 struct batadv_gw_node *curr_gw = NULL;
89495+ struct ethhdr *ethhdr;
89496 bool ret, out_of_range = false;
89497 unsigned int header_len = 0;
89498 uint8_t curr_tq_avg;
89499@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
89500 if (!ret)
89501 goto out;
89502
89503+ ethhdr = (struct ethhdr *)skb->data;
89504 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
89505 ethhdr->h_dest);
89506 if (!orig_dst_node)
89507diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
89508index 039902d..1037d75 100644
89509--- a/net/batman-adv/gateway_client.h
89510+++ b/net/batman-adv/gateway_client.h
89511@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
89512 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
89513 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
89514 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
89515-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
89516- struct sk_buff *skb, struct ethhdr *ethhdr);
89517+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
89518
89519 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
89520diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
89521index 522243a..b48c0ef 100644
89522--- a/net/batman-adv/hard-interface.c
89523+++ b/net/batman-adv/hard-interface.c
89524@@ -401,7 +401,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
89525 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
89526 dev_add_pack(&hard_iface->batman_adv_ptype);
89527
89528- atomic_set(&hard_iface->frag_seqno, 1);
89529+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
89530 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
89531 hard_iface->net_dev->name);
89532
89533@@ -550,7 +550,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
89534 /* This can't be called via a bat_priv callback because
89535 * we have no bat_priv yet.
89536 */
89537- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
89538+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
89539 hard_iface->bat_iv.ogm_buff = NULL;
89540
89541 return hard_iface;
89542diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
89543index 819dfb0..226bacd 100644
89544--- a/net/batman-adv/soft-interface.c
89545+++ b/net/batman-adv/soft-interface.c
89546@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
89547 if (batadv_bla_tx(bat_priv, skb, vid))
89548 goto dropped;
89549
89550+ /* skb->data might have been reallocated by batadv_bla_tx() */
89551+ ethhdr = (struct ethhdr *)skb->data;
89552+
89553 /* Register the client MAC in the transtable */
89554 if (!is_multicast_ether_addr(ethhdr->h_source))
89555 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
89556@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
89557 default:
89558 break;
89559 }
89560+
89561+ /* reminder: ethhdr might have become unusable from here on
89562+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
89563+ */
89564 }
89565
89566 /* ethernet packet should be broadcasted */
89567@@ -253,7 +260,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
89568 primary_if->net_dev->dev_addr, ETH_ALEN);
89569
89570 /* set broadcast sequence number */
89571- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
89572+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
89573 bcast_packet->seqno = htonl(seqno);
89574
89575 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
89576@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
89577 /* unicast packet */
89578 } else {
89579 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
89580- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
89581+ ret = batadv_gw_out_of_range(bat_priv, skb);
89582 if (ret)
89583 goto dropped;
89584 }
89585@@ -472,7 +479,7 @@ static int batadv_softif_init_late(struct net_device *dev)
89586 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
89587
89588 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
89589- atomic_set(&bat_priv->bcast_seqno, 1);
89590+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
89591 atomic_set(&bat_priv->tt.vn, 0);
89592 atomic_set(&bat_priv->tt.local_changes, 0);
89593 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
89594diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
89595index aba8364..50fcbb8 100644
89596--- a/net/batman-adv/types.h
89597+++ b/net/batman-adv/types.h
89598@@ -51,7 +51,7 @@
89599 struct batadv_hard_iface_bat_iv {
89600 unsigned char *ogm_buff;
89601 int ogm_buff_len;
89602- atomic_t ogm_seqno;
89603+ atomic_unchecked_t ogm_seqno;
89604 };
89605
89606 /**
89607@@ -75,7 +75,7 @@ struct batadv_hard_iface {
89608 int16_t if_num;
89609 char if_status;
89610 struct net_device *net_dev;
89611- atomic_t frag_seqno;
89612+ atomic_unchecked_t frag_seqno;
89613 struct kobject *hardif_obj;
89614 atomic_t refcount;
89615 struct packet_type batman_adv_ptype;
89616@@ -558,7 +558,7 @@ struct batadv_priv {
89617 #ifdef CONFIG_BATMAN_ADV_DEBUG
89618 atomic_t log_level;
89619 #endif
89620- atomic_t bcast_seqno;
89621+ atomic_unchecked_t bcast_seqno;
89622 atomic_t bcast_queue_left;
89623 atomic_t batman_queue_left;
89624 char num_ifaces;
89625diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
89626index 0bb3b59..0e3052e 100644
89627--- a/net/batman-adv/unicast.c
89628+++ b/net/batman-adv/unicast.c
89629@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
89630 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
89631 frag2->flags = large_tail;
89632
89633- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
89634+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
89635 frag1->seqno = htons(seqno - 1);
89636 frag2->seqno = htons(seqno);
89637
89638@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
89639 * @skb: the skb containing the payload to encapsulate
89640 * @orig_node: the destination node
89641 *
89642- * Returns false if the payload could not be encapsulated or true otherwise
89643+ * Returns false if the payload could not be encapsulated or true otherwise.
89644+ *
89645+ * This call might reallocate skb data.
89646 */
89647 static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
89648 struct batadv_orig_node *orig_node)
89649@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
89650 * @orig_node: the destination node
89651 * @packet_subtype: the batman 4addr packet subtype to use
89652 *
89653- * Returns false if the payload could not be encapsulated or true otherwise
89654+ * Returns false if the payload could not be encapsulated or true otherwise.
89655+ *
89656+ * This call might reallocate skb data.
89657 */
89658 bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
89659 struct sk_buff *skb,
89660@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
89661 struct batadv_neigh_node *neigh_node;
89662 int data_len = skb->len;
89663 int ret = NET_RX_DROP;
89664- unsigned int dev_mtu;
89665+ unsigned int dev_mtu, header_len;
89666
89667 /* get routing information */
89668 if (is_multicast_ether_addr(ethhdr->h_dest)) {
89669@@ -429,10 +433,12 @@ find_router:
89670 switch (packet_type) {
89671 case BATADV_UNICAST:
89672 batadv_unicast_prepare_skb(skb, orig_node);
89673+ header_len = sizeof(struct batadv_unicast_packet);
89674 break;
89675 case BATADV_UNICAST_4ADDR:
89676 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
89677 packet_subtype);
89678+ header_len = sizeof(struct batadv_unicast_4addr_packet);
89679 break;
89680 default:
89681 /* this function supports UNICAST and UNICAST_4ADDR only. It
89682@@ -441,6 +447,7 @@ find_router:
89683 goto out;
89684 }
89685
89686+ ethhdr = (struct ethhdr *)(skb->data + header_len);
89687 unicast_packet = (struct batadv_unicast_packet *)skb->data;
89688
89689 /* inform the destination node that we are still missing a correct route
89690diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
89691index ace5e55..a65a1c0 100644
89692--- a/net/bluetooth/hci_core.c
89693+++ b/net/bluetooth/hci_core.c
89694@@ -2211,16 +2211,16 @@ int hci_register_dev(struct hci_dev *hdev)
89695 list_add(&hdev->list, &hci_dev_list);
89696 write_unlock(&hci_dev_list_lock);
89697
89698- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
89699- WQ_MEM_RECLAIM, 1);
89700+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
89701+ WQ_MEM_RECLAIM, 1, hdev->name);
89702 if (!hdev->workqueue) {
89703 error = -ENOMEM;
89704 goto err;
89705 }
89706
89707- hdev->req_workqueue = alloc_workqueue(hdev->name,
89708+ hdev->req_workqueue = alloc_workqueue("%s",
89709 WQ_HIGHPRI | WQ_UNBOUND |
89710- WQ_MEM_RECLAIM, 1);
89711+ WQ_MEM_RECLAIM, 1, hdev->name);
89712 if (!hdev->req_workqueue) {
89713 destroy_workqueue(hdev->workqueue);
89714 error = -ENOMEM;
89715diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
89716index 9bd7d95..6c4884f 100644
89717--- a/net/bluetooth/hci_sock.c
89718+++ b/net/bluetooth/hci_sock.c
89719@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
89720 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
89721 }
89722
89723- len = min_t(unsigned int, len, sizeof(uf));
89724+ len = min((size_t)len, sizeof(uf));
89725 if (copy_from_user(&uf, optval, len)) {
89726 err = -EFAULT;
89727 break;
89728diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
89729index 68843a2..30e9342 100644
89730--- a/net/bluetooth/l2cap_core.c
89731+++ b/net/bluetooth/l2cap_core.c
89732@@ -3507,8 +3507,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
89733 break;
89734
89735 case L2CAP_CONF_RFC:
89736- if (olen == sizeof(rfc))
89737- memcpy(&rfc, (void *)val, olen);
89738+ if (olen != sizeof(rfc))
89739+ break;
89740+
89741+ memcpy(&rfc, (void *)val, olen);
89742
89743 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
89744 rfc.mode != chan->mode)
89745diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
89746index 36fed40..be2eeb2 100644
89747--- a/net/bluetooth/l2cap_sock.c
89748+++ b/net/bluetooth/l2cap_sock.c
89749@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
89750 struct sock *sk = sock->sk;
89751 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
89752 struct l2cap_options opts;
89753- int len, err = 0;
89754+ int err = 0;
89755+ size_t len = optlen;
89756 u32 opt;
89757
89758 BT_DBG("sk %p", sk);
89759@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
89760 opts.max_tx = chan->max_tx;
89761 opts.txwin_size = chan->tx_win;
89762
89763- len = min_t(unsigned int, sizeof(opts), optlen);
89764+ len = min(sizeof(opts), len);
89765 if (copy_from_user((char *) &opts, optval, len)) {
89766 err = -EFAULT;
89767 break;
89768@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
89769 struct bt_security sec;
89770 struct bt_power pwr;
89771 struct l2cap_conn *conn;
89772- int len, err = 0;
89773+ int err = 0;
89774+ size_t len = optlen;
89775 u32 opt;
89776
89777 BT_DBG("sk %p", sk);
89778@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
89779
89780 sec.level = BT_SECURITY_LOW;
89781
89782- len = min_t(unsigned int, sizeof(sec), optlen);
89783+ len = min(sizeof(sec), len);
89784 if (copy_from_user((char *) &sec, optval, len)) {
89785 err = -EFAULT;
89786 break;
89787@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
89788
89789 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
89790
89791- len = min_t(unsigned int, sizeof(pwr), optlen);
89792+ len = min(sizeof(pwr), len);
89793 if (copy_from_user((char *) &pwr, optval, len)) {
89794 err = -EFAULT;
89795 break;
89796diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
89797index 30b3721..c1bd0a0 100644
89798--- a/net/bluetooth/rfcomm/sock.c
89799+++ b/net/bluetooth/rfcomm/sock.c
89800@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
89801 struct sock *sk = sock->sk;
89802 struct bt_security sec;
89803 int err = 0;
89804- size_t len;
89805+ size_t len = optlen;
89806 u32 opt;
89807
89808 BT_DBG("sk %p", sk);
89809@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
89810
89811 sec.level = BT_SECURITY_LOW;
89812
89813- len = min_t(unsigned int, sizeof(sec), optlen);
89814+ len = min(sizeof(sec), len);
89815 if (copy_from_user((char *) &sec, optval, len)) {
89816 err = -EFAULT;
89817 break;
89818diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
89819index b6e44ad..5b0d514 100644
89820--- a/net/bluetooth/rfcomm/tty.c
89821+++ b/net/bluetooth/rfcomm/tty.c
89822@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
89823 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
89824
89825 spin_lock_irqsave(&dev->port.lock, flags);
89826- if (dev->port.count > 0) {
89827+ if (atomic_read(&dev->port.count) > 0) {
89828 spin_unlock_irqrestore(&dev->port.lock, flags);
89829 return;
89830 }
89831@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
89832 return -ENODEV;
89833
89834 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
89835- dev->channel, dev->port.count);
89836+ dev->channel, atomic_read(&dev->port.count));
89837
89838 spin_lock_irqsave(&dev->port.lock, flags);
89839- if (++dev->port.count > 1) {
89840+ if (atomic_inc_return(&dev->port.count) > 1) {
89841 spin_unlock_irqrestore(&dev->port.lock, flags);
89842 return 0;
89843 }
89844@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
89845 return;
89846
89847 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
89848- dev->port.count);
89849+ atomic_read(&dev->port.count));
89850
89851 spin_lock_irqsave(&dev->port.lock, flags);
89852- if (!--dev->port.count) {
89853+ if (!atomic_dec_return(&dev->port.count)) {
89854 spin_unlock_irqrestore(&dev->port.lock, flags);
89855 if (dev->tty_dev->parent)
89856 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
89857diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
89858index 3d110c4..4e1b2eb 100644
89859--- a/net/bridge/netfilter/ebtables.c
89860+++ b/net/bridge/netfilter/ebtables.c
89861@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
89862 tmp.valid_hooks = t->table->valid_hooks;
89863 }
89864 mutex_unlock(&ebt_mutex);
89865- if (copy_to_user(user, &tmp, *len) != 0){
89866+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
89867 BUGPRINT("c2u Didn't work\n");
89868 ret = -EFAULT;
89869 break;
89870@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
89871 goto out;
89872 tmp.valid_hooks = t->valid_hooks;
89873
89874- if (copy_to_user(user, &tmp, *len) != 0) {
89875+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
89876 ret = -EFAULT;
89877 break;
89878 }
89879@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
89880 tmp.entries_size = t->table->entries_size;
89881 tmp.valid_hooks = t->table->valid_hooks;
89882
89883- if (copy_to_user(user, &tmp, *len) != 0) {
89884+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
89885 ret = -EFAULT;
89886 break;
89887 }
89888diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
89889index 2bd4b58..0dc30a1 100644
89890--- a/net/caif/cfctrl.c
89891+++ b/net/caif/cfctrl.c
89892@@ -10,6 +10,7 @@
89893 #include <linux/spinlock.h>
89894 #include <linux/slab.h>
89895 #include <linux/pkt_sched.h>
89896+#include <linux/sched.h>
89897 #include <net/caif/caif_layer.h>
89898 #include <net/caif/cfpkt.h>
89899 #include <net/caif/cfctrl.h>
89900@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
89901 memset(&dev_info, 0, sizeof(dev_info));
89902 dev_info.id = 0xff;
89903 cfsrvl_init(&this->serv, 0, &dev_info, false);
89904- atomic_set(&this->req_seq_no, 1);
89905- atomic_set(&this->rsp_seq_no, 1);
89906+ atomic_set_unchecked(&this->req_seq_no, 1);
89907+ atomic_set_unchecked(&this->rsp_seq_no, 1);
89908 this->serv.layer.receive = cfctrl_recv;
89909 sprintf(this->serv.layer.name, "ctrl");
89910 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
89911@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
89912 struct cfctrl_request_info *req)
89913 {
89914 spin_lock_bh(&ctrl->info_list_lock);
89915- atomic_inc(&ctrl->req_seq_no);
89916- req->sequence_no = atomic_read(&ctrl->req_seq_no);
89917+ atomic_inc_unchecked(&ctrl->req_seq_no);
89918+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
89919 list_add_tail(&req->list, &ctrl->list);
89920 spin_unlock_bh(&ctrl->info_list_lock);
89921 }
89922@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
89923 if (p != first)
89924 pr_warn("Requests are not received in order\n");
89925
89926- atomic_set(&ctrl->rsp_seq_no,
89927+ atomic_set_unchecked(&ctrl->rsp_seq_no,
89928 p->sequence_no);
89929 list_del(&p->list);
89930 goto out;
89931diff --git a/net/can/af_can.c b/net/can/af_can.c
89932index c4e5085..aa9efdf 100644
89933--- a/net/can/af_can.c
89934+++ b/net/can/af_can.c
89935@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
89936 };
89937
89938 /* notifier block for netdevice event */
89939-static struct notifier_block can_netdev_notifier __read_mostly = {
89940+static struct notifier_block can_netdev_notifier = {
89941 .notifier_call = can_notifier,
89942 };
89943
89944diff --git a/net/can/gw.c b/net/can/gw.c
89945index 3ee690e..00d581b 100644
89946--- a/net/can/gw.c
89947+++ b/net/can/gw.c
89948@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
89949 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
89950
89951 static HLIST_HEAD(cgw_list);
89952-static struct notifier_block notifier;
89953
89954 static struct kmem_cache *cgw_cache __read_mostly;
89955
89956@@ -927,6 +926,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
89957 return err;
89958 }
89959
89960+static struct notifier_block notifier = {
89961+ .notifier_call = cgw_notifier
89962+};
89963+
89964 static __init int cgw_module_init(void)
89965 {
89966 /* sanitize given module parameter */
89967@@ -942,7 +945,6 @@ static __init int cgw_module_init(void)
89968 return -ENOMEM;
89969
89970 /* set notifier */
89971- notifier.notifier_call = cgw_notifier;
89972 register_netdevice_notifier(&notifier);
89973
89974 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
89975diff --git a/net/compat.c b/net/compat.c
89976index f0a1ba6..0541331 100644
89977--- a/net/compat.c
89978+++ b/net/compat.c
89979@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
89980 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
89981 __get_user(kmsg->msg_flags, &umsg->msg_flags))
89982 return -EFAULT;
89983- kmsg->msg_name = compat_ptr(tmp1);
89984- kmsg->msg_iov = compat_ptr(tmp2);
89985- kmsg->msg_control = compat_ptr(tmp3);
89986+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
89987+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
89988+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
89989 return 0;
89990 }
89991
89992@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
89993
89994 if (kern_msg->msg_namelen) {
89995 if (mode == VERIFY_READ) {
89996- int err = move_addr_to_kernel(kern_msg->msg_name,
89997+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
89998 kern_msg->msg_namelen,
89999 kern_address);
90000 if (err < 0)
90001@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
90002 kern_msg->msg_name = NULL;
90003
90004 tot_len = iov_from_user_compat_to_kern(kern_iov,
90005- (struct compat_iovec __user *)kern_msg->msg_iov,
90006+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
90007 kern_msg->msg_iovlen);
90008 if (tot_len >= 0)
90009 kern_msg->msg_iov = kern_iov;
90010@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
90011
90012 #define CMSG_COMPAT_FIRSTHDR(msg) \
90013 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
90014- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
90015+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
90016 (struct compat_cmsghdr __user *)NULL)
90017
90018 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
90019 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
90020 (ucmlen) <= (unsigned long) \
90021 ((mhdr)->msg_controllen - \
90022- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
90023+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
90024
90025 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
90026 struct compat_cmsghdr __user *cmsg, int cmsg_len)
90027 {
90028 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
90029- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
90030+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
90031 msg->msg_controllen)
90032 return NULL;
90033 return (struct compat_cmsghdr __user *)ptr;
90034@@ -219,7 +219,7 @@ Efault:
90035
90036 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
90037 {
90038- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
90039+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
90040 struct compat_cmsghdr cmhdr;
90041 struct compat_timeval ctv;
90042 struct compat_timespec cts[3];
90043@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
90044
90045 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
90046 {
90047- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
90048+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
90049 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
90050 int fdnum = scm->fp->count;
90051 struct file **fp = scm->fp->fp;
90052@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
90053 return -EFAULT;
90054 old_fs = get_fs();
90055 set_fs(KERNEL_DS);
90056- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
90057+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
90058 set_fs(old_fs);
90059
90060 return err;
90061@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
90062 len = sizeof(ktime);
90063 old_fs = get_fs();
90064 set_fs(KERNEL_DS);
90065- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
90066+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
90067 set_fs(old_fs);
90068
90069 if (!err) {
90070@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
90071 case MCAST_JOIN_GROUP:
90072 case MCAST_LEAVE_GROUP:
90073 {
90074- struct compat_group_req __user *gr32 = (void *)optval;
90075+ struct compat_group_req __user *gr32 = (void __user *)optval;
90076 struct group_req __user *kgr =
90077 compat_alloc_user_space(sizeof(struct group_req));
90078 u32 interface;
90079@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
90080 case MCAST_BLOCK_SOURCE:
90081 case MCAST_UNBLOCK_SOURCE:
90082 {
90083- struct compat_group_source_req __user *gsr32 = (void *)optval;
90084+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
90085 struct group_source_req __user *kgsr = compat_alloc_user_space(
90086 sizeof(struct group_source_req));
90087 u32 interface;
90088@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
90089 }
90090 case MCAST_MSFILTER:
90091 {
90092- struct compat_group_filter __user *gf32 = (void *)optval;
90093+ struct compat_group_filter __user *gf32 = (void __user *)optval;
90094 struct group_filter __user *kgf;
90095 u32 interface, fmode, numsrc;
90096
90097@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
90098 char __user *optval, int __user *optlen,
90099 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
90100 {
90101- struct compat_group_filter __user *gf32 = (void *)optval;
90102+ struct compat_group_filter __user *gf32 = (void __user *)optval;
90103 struct group_filter __user *kgf;
90104 int __user *koptlen;
90105 u32 interface, fmode, numsrc;
90106@@ -805,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
90107
90108 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
90109 return -EINVAL;
90110- if (copy_from_user(a, args, nas[call]))
90111+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
90112 return -EFAULT;
90113 a0 = a[0];
90114 a1 = a[1];
90115diff --git a/net/core/datagram.c b/net/core/datagram.c
90116index b71423d..0360434 100644
90117--- a/net/core/datagram.c
90118+++ b/net/core/datagram.c
90119@@ -295,7 +295,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
90120 }
90121
90122 kfree_skb(skb);
90123- atomic_inc(&sk->sk_drops);
90124+ atomic_inc_unchecked(&sk->sk_drops);
90125 sk_mem_reclaim_partial(sk);
90126
90127 return err;
90128diff --git a/net/core/dev.c b/net/core/dev.c
90129index 7ddbb31..3902452 100644
90130--- a/net/core/dev.c
90131+++ b/net/core/dev.c
90132@@ -1649,7 +1649,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
90133 {
90134 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
90135 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
90136- atomic_long_inc(&dev->rx_dropped);
90137+ atomic_long_inc_unchecked(&dev->rx_dropped);
90138 kfree_skb(skb);
90139 return NET_RX_DROP;
90140 }
90141@@ -1658,7 +1658,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
90142 skb_orphan(skb);
90143
90144 if (unlikely(!is_skb_forwardable(dev, skb))) {
90145- atomic_long_inc(&dev->rx_dropped);
90146+ atomic_long_inc_unchecked(&dev->rx_dropped);
90147 kfree_skb(skb);
90148 return NET_RX_DROP;
90149 }
90150@@ -2404,7 +2404,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
90151
90152 struct dev_gso_cb {
90153 void (*destructor)(struct sk_buff *skb);
90154-};
90155+} __no_const;
90156
90157 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
90158
90159@@ -3139,7 +3139,7 @@ enqueue:
90160
90161 local_irq_restore(flags);
90162
90163- atomic_long_inc(&skb->dev->rx_dropped);
90164+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
90165 kfree_skb(skb);
90166 return NET_RX_DROP;
90167 }
90168@@ -3211,7 +3211,7 @@ int netif_rx_ni(struct sk_buff *skb)
90169 }
90170 EXPORT_SYMBOL(netif_rx_ni);
90171
90172-static void net_tx_action(struct softirq_action *h)
90173+static void net_tx_action(void)
90174 {
90175 struct softnet_data *sd = &__get_cpu_var(softnet_data);
90176
90177@@ -3545,7 +3545,7 @@ ncls:
90178 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
90179 } else {
90180 drop:
90181- atomic_long_inc(&skb->dev->rx_dropped);
90182+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
90183 kfree_skb(skb);
90184 /* Jamal, now you will not able to escape explaining
90185 * me how you were going to use this. :-)
90186@@ -4153,7 +4153,7 @@ void netif_napi_del(struct napi_struct *napi)
90187 }
90188 EXPORT_SYMBOL(netif_napi_del);
90189
90190-static void net_rx_action(struct softirq_action *h)
90191+static void net_rx_action(void)
90192 {
90193 struct softnet_data *sd = &__get_cpu_var(softnet_data);
90194 unsigned long time_limit = jiffies + 2;
90195@@ -5590,7 +5590,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
90196 } else {
90197 netdev_stats_to_stats64(storage, &dev->stats);
90198 }
90199- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
90200+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
90201 return storage;
90202 }
90203 EXPORT_SYMBOL(dev_get_stats);
90204diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
90205index 5b7d0e1..cb960fc 100644
90206--- a/net/core/dev_ioctl.c
90207+++ b/net/core/dev_ioctl.c
90208@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
90209 if (no_module && capable(CAP_NET_ADMIN))
90210 no_module = request_module("netdev-%s", name);
90211 if (no_module && capable(CAP_SYS_MODULE)) {
90212+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90213+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
90214+#else
90215 if (!request_module("%s", name))
90216 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
90217 name);
90218+#endif
90219 }
90220 }
90221 EXPORT_SYMBOL(dev_load);
90222diff --git a/net/core/ethtool.c b/net/core/ethtool.c
90223index ce91766..3b71cdb 100644
90224--- a/net/core/ethtool.c
90225+++ b/net/core/ethtool.c
90226@@ -1319,10 +1319,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
90227 if (ret)
90228 return ret;
90229
90230- len = (tmp.len > dump.len) ? dump.len : tmp.len;
90231+ len = min(tmp.len, dump.len);
90232 if (!len)
90233 return -EFAULT;
90234
90235+ /* Don't ever let the driver think there's more space available
90236+ * than it requested with .get_dump_flag().
90237+ */
90238+ dump.len = len;
90239+
90240+ /* Always allocate enough space to hold the whole thing so that the
90241+ * driver does not need to check the length and bother with partial
90242+ * dumping.
90243+ */
90244 data = vzalloc(tmp.len);
90245 if (!data)
90246 return -ENOMEM;
90247@@ -1330,6 +1339,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
90248 if (ret)
90249 goto out;
90250
90251+ /* There are two sane possibilities:
90252+ * 1. The driver's .get_dump_data() does not touch dump.len.
90253+ * 2. Or it may set dump.len to how much it really writes, which
90254+ * should be tmp.len (or len if it can do a partial dump).
90255+ * In any case respond to userspace with the actual length of data
90256+ * it's receiving.
90257+ */
90258+ WARN_ON(dump.len != len && dump.len != tmp.len);
90259+ dump.len = len;
90260+
90261 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
90262 ret = -EFAULT;
90263 goto out;
90264diff --git a/net/core/flow.c b/net/core/flow.c
90265index 7102f16..146b4bd 100644
90266--- a/net/core/flow.c
90267+++ b/net/core/flow.c
90268@@ -61,7 +61,7 @@ struct flow_cache {
90269 struct timer_list rnd_timer;
90270 };
90271
90272-atomic_t flow_cache_genid = ATOMIC_INIT(0);
90273+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
90274 EXPORT_SYMBOL(flow_cache_genid);
90275 static struct flow_cache flow_cache_global;
90276 static struct kmem_cache *flow_cachep __read_mostly;
90277@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
90278
90279 static int flow_entry_valid(struct flow_cache_entry *fle)
90280 {
90281- if (atomic_read(&flow_cache_genid) != fle->genid)
90282+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
90283 return 0;
90284 if (fle->object && !fle->object->ops->check(fle->object))
90285 return 0;
90286@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
90287 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
90288 fcp->hash_count++;
90289 }
90290- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
90291+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
90292 flo = fle->object;
90293 if (!flo)
90294 goto ret_object;
90295@@ -279,7 +279,7 @@ nocache:
90296 }
90297 flo = resolver(net, key, family, dir, flo, ctx);
90298 if (fle) {
90299- fle->genid = atomic_read(&flow_cache_genid);
90300+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
90301 if (!IS_ERR(flo))
90302 fle->object = flo;
90303 else
90304diff --git a/net/core/iovec.c b/net/core/iovec.c
90305index de178e4..1dabd8b 100644
90306--- a/net/core/iovec.c
90307+++ b/net/core/iovec.c
90308@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
90309 if (m->msg_namelen) {
90310 if (mode == VERIFY_READ) {
90311 void __user *namep;
90312- namep = (void __user __force *) m->msg_name;
90313+ namep = (void __force_user *) m->msg_name;
90314 err = move_addr_to_kernel(namep, m->msg_namelen,
90315 address);
90316 if (err < 0)
90317@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
90318 }
90319
90320 size = m->msg_iovlen * sizeof(struct iovec);
90321- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
90322+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
90323 return -EFAULT;
90324
90325 m->msg_iov = iov;
90326diff --git a/net/core/neighbour.c b/net/core/neighbour.c
90327index ce90b02..8752627 100644
90328--- a/net/core/neighbour.c
90329+++ b/net/core/neighbour.c
90330@@ -2771,7 +2771,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
90331 size_t *lenp, loff_t *ppos)
90332 {
90333 int size, ret;
90334- ctl_table tmp = *ctl;
90335+ ctl_table_no_const tmp = *ctl;
90336
90337 tmp.extra1 = &zero;
90338 tmp.extra2 = &unres_qlen_max;
90339diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
90340index 569d355..79cf2d0 100644
90341--- a/net/core/net-procfs.c
90342+++ b/net/core/net-procfs.c
90343@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
90344 else
90345 seq_printf(seq, "%04x", ntohs(pt->type));
90346
90347+#ifdef CONFIG_GRKERNSEC_HIDESYM
90348+ seq_printf(seq, " %-8s %pf\n",
90349+ pt->dev ? pt->dev->name : "", NULL);
90350+#else
90351 seq_printf(seq, " %-8s %pf\n",
90352 pt->dev ? pt->dev->name : "", pt->func);
90353+#endif
90354 }
90355
90356 return 0;
90357diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
90358index 981fed3..536af34 100644
90359--- a/net/core/net-sysfs.c
90360+++ b/net/core/net-sysfs.c
90361@@ -1311,7 +1311,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
90362 }
90363 EXPORT_SYMBOL(netdev_class_remove_file);
90364
90365-int netdev_kobject_init(void)
90366+int __init netdev_kobject_init(void)
90367 {
90368 kobj_ns_type_register(&net_ns_type_operations);
90369 return class_register(&net_class);
90370diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
90371index f9765203..9feaef8 100644
90372--- a/net/core/net_namespace.c
90373+++ b/net/core/net_namespace.c
90374@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
90375 int error;
90376 LIST_HEAD(net_exit_list);
90377
90378- list_add_tail(&ops->list, list);
90379+ pax_list_add_tail((struct list_head *)&ops->list, list);
90380 if (ops->init || (ops->id && ops->size)) {
90381 for_each_net(net) {
90382 error = ops_init(ops, net);
90383@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
90384
90385 out_undo:
90386 /* If I have an error cleanup all namespaces I initialized */
90387- list_del(&ops->list);
90388+ pax_list_del((struct list_head *)&ops->list);
90389 ops_exit_list(ops, &net_exit_list);
90390 ops_free_list(ops, &net_exit_list);
90391 return error;
90392@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
90393 struct net *net;
90394 LIST_HEAD(net_exit_list);
90395
90396- list_del(&ops->list);
90397+ pax_list_del((struct list_head *)&ops->list);
90398 for_each_net(net)
90399 list_add_tail(&net->exit_list, &net_exit_list);
90400 ops_exit_list(ops, &net_exit_list);
90401@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
90402 mutex_lock(&net_mutex);
90403 error = register_pernet_operations(&pernet_list, ops);
90404 if (!error && (first_device == &pernet_list))
90405- first_device = &ops->list;
90406+ first_device = (struct list_head *)&ops->list;
90407 mutex_unlock(&net_mutex);
90408 return error;
90409 }
90410diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
90411index a08bd2b..c59bd7c 100644
90412--- a/net/core/rtnetlink.c
90413+++ b/net/core/rtnetlink.c
90414@@ -58,7 +58,7 @@ struct rtnl_link {
90415 rtnl_doit_func doit;
90416 rtnl_dumpit_func dumpit;
90417 rtnl_calcit_func calcit;
90418-};
90419+} __no_const;
90420
90421 static DEFINE_MUTEX(rtnl_mutex);
90422
90423@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
90424 if (rtnl_link_ops_get(ops->kind))
90425 return -EEXIST;
90426
90427- if (!ops->dellink)
90428- ops->dellink = unregister_netdevice_queue;
90429+ if (!ops->dellink) {
90430+ pax_open_kernel();
90431+ *(void **)&ops->dellink = unregister_netdevice_queue;
90432+ pax_close_kernel();
90433+ }
90434
90435- list_add_tail(&ops->list, &link_ops);
90436+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
90437 return 0;
90438 }
90439 EXPORT_SYMBOL_GPL(__rtnl_link_register);
90440@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
90441 for_each_net(net) {
90442 __rtnl_kill_links(net, ops);
90443 }
90444- list_del(&ops->list);
90445+ pax_list_del((struct list_head *)&ops->list);
90446 }
90447 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
90448
90449@@ -2374,7 +2377,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
90450 struct nlattr *extfilt;
90451 u32 filter_mask = 0;
90452
90453- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
90454+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
90455 IFLA_EXT_MASK);
90456 if (extfilt)
90457 filter_mask = nla_get_u32(extfilt);
90458diff --git a/net/core/scm.c b/net/core/scm.c
90459index 03795d0..eaf7368 100644
90460--- a/net/core/scm.c
90461+++ b/net/core/scm.c
90462@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
90463 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
90464 {
90465 struct cmsghdr __user *cm
90466- = (__force struct cmsghdr __user *)msg->msg_control;
90467+ = (struct cmsghdr __force_user *)msg->msg_control;
90468 struct cmsghdr cmhdr;
90469 int cmlen = CMSG_LEN(len);
90470 int err;
90471@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
90472 err = -EFAULT;
90473 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
90474 goto out;
90475- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
90476+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
90477 goto out;
90478 cmlen = CMSG_SPACE(len);
90479 if (msg->msg_controllen < cmlen)
90480@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
90481 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
90482 {
90483 struct cmsghdr __user *cm
90484- = (__force struct cmsghdr __user*)msg->msg_control;
90485+ = (struct cmsghdr __force_user *)msg->msg_control;
90486
90487 int fdmax = 0;
90488 int fdnum = scm->fp->count;
90489@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
90490 if (fdnum < fdmax)
90491 fdmax = fdnum;
90492
90493- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
90494+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
90495 i++, cmfptr++)
90496 {
90497 struct socket *sock;
90498diff --git a/net/core/skbuff.c b/net/core/skbuff.c
90499index 1c1738c..4cab7f0 100644
90500--- a/net/core/skbuff.c
90501+++ b/net/core/skbuff.c
90502@@ -3087,13 +3087,15 @@ void __init skb_init(void)
90503 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
90504 sizeof(struct sk_buff),
90505 0,
90506- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
90507+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
90508+ SLAB_NO_SANITIZE,
90509 NULL);
90510 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
90511 (2*sizeof(struct sk_buff)) +
90512 sizeof(atomic_t),
90513 0,
90514- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
90515+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
90516+ SLAB_NO_SANITIZE,
90517 NULL);
90518 }
90519
90520diff --git a/net/core/sock.c b/net/core/sock.c
90521index d6d024c..6ea7ab4 100644
90522--- a/net/core/sock.c
90523+++ b/net/core/sock.c
90524@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
90525 struct sk_buff_head *list = &sk->sk_receive_queue;
90526
90527 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
90528- atomic_inc(&sk->sk_drops);
90529+ atomic_inc_unchecked(&sk->sk_drops);
90530 trace_sock_rcvqueue_full(sk, skb);
90531 return -ENOMEM;
90532 }
90533@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
90534 return err;
90535
90536 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
90537- atomic_inc(&sk->sk_drops);
90538+ atomic_inc_unchecked(&sk->sk_drops);
90539 return -ENOBUFS;
90540 }
90541
90542@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
90543 skb_dst_force(skb);
90544
90545 spin_lock_irqsave(&list->lock, flags);
90546- skb->dropcount = atomic_read(&sk->sk_drops);
90547+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
90548 __skb_queue_tail(list, skb);
90549 spin_unlock_irqrestore(&list->lock, flags);
90550
90551@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
90552 skb->dev = NULL;
90553
90554 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
90555- atomic_inc(&sk->sk_drops);
90556+ atomic_inc_unchecked(&sk->sk_drops);
90557 goto discard_and_relse;
90558 }
90559 if (nested)
90560@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
90561 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
90562 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
90563 bh_unlock_sock(sk);
90564- atomic_inc(&sk->sk_drops);
90565+ atomic_inc_unchecked(&sk->sk_drops);
90566 goto discard_and_relse;
90567 }
90568
90569@@ -933,12 +933,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
90570 struct timeval tm;
90571 } v;
90572
90573- int lv = sizeof(int);
90574- int len;
90575+ unsigned int lv = sizeof(int);
90576+ unsigned int len;
90577
90578 if (get_user(len, optlen))
90579 return -EFAULT;
90580- if (len < 0)
90581+ if (len > INT_MAX)
90582 return -EINVAL;
90583
90584 memset(&v, 0, sizeof(v));
90585@@ -1090,11 +1090,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
90586
90587 case SO_PEERNAME:
90588 {
90589- char address[128];
90590+ char address[_K_SS_MAXSIZE];
90591
90592 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
90593 return -ENOTCONN;
90594- if (lv < len)
90595+ if (lv < len || sizeof address < len)
90596 return -EINVAL;
90597 if (copy_to_user(optval, address, len))
90598 return -EFAULT;
90599@@ -1161,7 +1161,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
90600
90601 if (len > lv)
90602 len = lv;
90603- if (copy_to_user(optval, &v, len))
90604+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
90605 return -EFAULT;
90606 lenout:
90607 if (put_user(len, optlen))
90608@@ -2277,7 +2277,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
90609 */
90610 smp_wmb();
90611 atomic_set(&sk->sk_refcnt, 1);
90612- atomic_set(&sk->sk_drops, 0);
90613+ atomic_set_unchecked(&sk->sk_drops, 0);
90614 }
90615 EXPORT_SYMBOL(sock_init_data);
90616
90617diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
90618index a0e9cf6..ef7f9ed 100644
90619--- a/net/core/sock_diag.c
90620+++ b/net/core/sock_diag.c
90621@@ -9,26 +9,33 @@
90622 #include <linux/inet_diag.h>
90623 #include <linux/sock_diag.h>
90624
90625-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
90626+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
90627 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
90628 static DEFINE_MUTEX(sock_diag_table_mutex);
90629
90630 int sock_diag_check_cookie(void *sk, __u32 *cookie)
90631 {
90632+#ifndef CONFIG_GRKERNSEC_HIDESYM
90633 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
90634 cookie[1] != INET_DIAG_NOCOOKIE) &&
90635 ((u32)(unsigned long)sk != cookie[0] ||
90636 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
90637 return -ESTALE;
90638 else
90639+#endif
90640 return 0;
90641 }
90642 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
90643
90644 void sock_diag_save_cookie(void *sk, __u32 *cookie)
90645 {
90646+#ifdef CONFIG_GRKERNSEC_HIDESYM
90647+ cookie[0] = 0;
90648+ cookie[1] = 0;
90649+#else
90650 cookie[0] = (u32)(unsigned long)sk;
90651 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
90652+#endif
90653 }
90654 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
90655
90656@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
90657 mutex_lock(&sock_diag_table_mutex);
90658 if (sock_diag_handlers[hndl->family])
90659 err = -EBUSY;
90660- else
90661+ else {
90662+ pax_open_kernel();
90663 sock_diag_handlers[hndl->family] = hndl;
90664+ pax_close_kernel();
90665+ }
90666 mutex_unlock(&sock_diag_table_mutex);
90667
90668 return err;
90669@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
90670
90671 mutex_lock(&sock_diag_table_mutex);
90672 BUG_ON(sock_diag_handlers[family] != hnld);
90673+ pax_open_kernel();
90674 sock_diag_handlers[family] = NULL;
90675+ pax_close_kernel();
90676 mutex_unlock(&sock_diag_table_mutex);
90677 }
90678 EXPORT_SYMBOL_GPL(sock_diag_unregister);
90679diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
90680index cfdb46a..cef55e1 100644
90681--- a/net/core/sysctl_net_core.c
90682+++ b/net/core/sysctl_net_core.c
90683@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
90684 {
90685 unsigned int orig_size, size;
90686 int ret, i;
90687- ctl_table tmp = {
90688+ ctl_table_no_const tmp = {
90689 .data = &size,
90690 .maxlen = sizeof(size),
90691 .mode = table->mode
90692@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
90693
90694 static __net_init int sysctl_core_net_init(struct net *net)
90695 {
90696- struct ctl_table *tbl;
90697+ ctl_table_no_const *tbl = NULL;
90698
90699 net->core.sysctl_somaxconn = SOMAXCONN;
90700
90701- tbl = netns_core_table;
90702 if (!net_eq(net, &init_net)) {
90703- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
90704+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
90705 if (tbl == NULL)
90706 goto err_dup;
90707
90708@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
90709 if (net->user_ns != &init_user_ns) {
90710 tbl[0].procname = NULL;
90711 }
90712- }
90713-
90714- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
90715+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
90716+ } else
90717+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
90718 if (net->core.sysctl_hdr == NULL)
90719 goto err_reg;
90720
90721 return 0;
90722
90723 err_reg:
90724- if (tbl != netns_core_table)
90725- kfree(tbl);
90726+ kfree(tbl);
90727 err_dup:
90728 return -ENOMEM;
90729 }
90730@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
90731 kfree(tbl);
90732 }
90733
90734-static __net_initdata struct pernet_operations sysctl_core_ops = {
90735+static __net_initconst struct pernet_operations sysctl_core_ops = {
90736 .init = sysctl_core_net_init,
90737 .exit = sysctl_core_net_exit,
90738 };
90739diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
90740index c21f200..bc4565b 100644
90741--- a/net/decnet/af_decnet.c
90742+++ b/net/decnet/af_decnet.c
90743@@ -465,6 +465,7 @@ static struct proto dn_proto = {
90744 .sysctl_rmem = sysctl_decnet_rmem,
90745 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
90746 .obj_size = sizeof(struct dn_sock),
90747+ .slab_flags = SLAB_USERCOPY,
90748 };
90749
90750 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
90751diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
90752index a55eecc..dd8428c 100644
90753--- a/net/decnet/sysctl_net_decnet.c
90754+++ b/net/decnet/sysctl_net_decnet.c
90755@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
90756
90757 if (len > *lenp) len = *lenp;
90758
90759- if (copy_to_user(buffer, addr, len))
90760+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
90761 return -EFAULT;
90762
90763 *lenp = len;
90764@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
90765
90766 if (len > *lenp) len = *lenp;
90767
90768- if (copy_to_user(buffer, devname, len))
90769+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
90770 return -EFAULT;
90771
90772 *lenp = len;
90773diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
90774index d01be2a..8976537 100644
90775--- a/net/ipv4/af_inet.c
90776+++ b/net/ipv4/af_inet.c
90777@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
90778
90779 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
90780
90781- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
90782- if (!sysctl_local_reserved_ports)
90783- goto out;
90784-
90785 rc = proto_register(&tcp_prot, 1);
90786 if (rc)
90787- goto out_free_reserved_ports;
90788+ goto out;
90789
90790 rc = proto_register(&udp_prot, 1);
90791 if (rc)
90792@@ -1818,8 +1814,6 @@ out_unregister_udp_proto:
90793 proto_unregister(&udp_prot);
90794 out_unregister_tcp_proto:
90795 proto_unregister(&tcp_prot);
90796-out_free_reserved_ports:
90797- kfree(sysctl_local_reserved_ports);
90798 goto out;
90799 }
90800
90801diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
90802index 2e7f194..0fa4d6d 100644
90803--- a/net/ipv4/ah4.c
90804+++ b/net/ipv4/ah4.c
90805@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
90806 return;
90807
90808 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
90809- atomic_inc(&flow_cache_genid);
90810+ atomic_inc_unchecked(&flow_cache_genid);
90811 rt_genid_bump(net);
90812
90813 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
90814diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
90815index dfc39d4..0d4fa52 100644
90816--- a/net/ipv4/devinet.c
90817+++ b/net/ipv4/devinet.c
90818@@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
90819 ci = nla_data(tb[IFA_CACHEINFO]);
90820 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
90821 err = -EINVAL;
90822- goto errout;
90823+ goto errout_free;
90824 }
90825 *pvalid_lft = ci->ifa_valid;
90826 *pprefered_lft = ci->ifa_prefered;
90827@@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
90828
90829 return ifa;
90830
90831+errout_free:
90832+ inet_free_ifa(ifa);
90833 errout:
90834 return ERR_PTR(err);
90835 }
90836@@ -1529,7 +1531,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
90837 idx = 0;
90838 head = &net->dev_index_head[h];
90839 rcu_read_lock();
90840- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
90841+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
90842 net->dev_base_seq;
90843 hlist_for_each_entry_rcu(dev, head, index_hlist) {
90844 if (idx < s_idx)
90845@@ -1840,7 +1842,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
90846 idx = 0;
90847 head = &net->dev_index_head[h];
90848 rcu_read_lock();
90849- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
90850+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
90851 net->dev_base_seq;
90852 hlist_for_each_entry_rcu(dev, head, index_hlist) {
90853 if (idx < s_idx)
90854@@ -2065,7 +2067,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
90855 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
90856 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
90857
90858-static struct devinet_sysctl_table {
90859+static const struct devinet_sysctl_table {
90860 struct ctl_table_header *sysctl_header;
90861 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
90862 } devinet_sysctl = {
90863@@ -2183,7 +2185,7 @@ static __net_init int devinet_init_net(struct net *net)
90864 int err;
90865 struct ipv4_devconf *all, *dflt;
90866 #ifdef CONFIG_SYSCTL
90867- struct ctl_table *tbl = ctl_forward_entry;
90868+ ctl_table_no_const *tbl = NULL;
90869 struct ctl_table_header *forw_hdr;
90870 #endif
90871
90872@@ -2201,7 +2203,7 @@ static __net_init int devinet_init_net(struct net *net)
90873 goto err_alloc_dflt;
90874
90875 #ifdef CONFIG_SYSCTL
90876- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
90877+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
90878 if (tbl == NULL)
90879 goto err_alloc_ctl;
90880
90881@@ -2221,7 +2223,10 @@ static __net_init int devinet_init_net(struct net *net)
90882 goto err_reg_dflt;
90883
90884 err = -ENOMEM;
90885- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
90886+ if (!net_eq(net, &init_net))
90887+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
90888+ else
90889+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
90890 if (forw_hdr == NULL)
90891 goto err_reg_ctl;
90892 net->ipv4.forw_hdr = forw_hdr;
90893@@ -2237,8 +2242,7 @@ err_reg_ctl:
90894 err_reg_dflt:
90895 __devinet_sysctl_unregister(all);
90896 err_reg_all:
90897- if (tbl != ctl_forward_entry)
90898- kfree(tbl);
90899+ kfree(tbl);
90900 err_alloc_ctl:
90901 #endif
90902 if (dflt != &ipv4_devconf_dflt)
90903diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
90904index 4cfe34d..d2fac8a 100644
90905--- a/net/ipv4/esp4.c
90906+++ b/net/ipv4/esp4.c
90907@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
90908 }
90909
90910 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
90911- net_adj) & ~(align - 1)) + (net_adj - 2);
90912+ net_adj) & ~(align - 1)) + net_adj - 2;
90913 }
90914
90915 static void esp4_err(struct sk_buff *skb, u32 info)
90916@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
90917 return;
90918
90919 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
90920- atomic_inc(&flow_cache_genid);
90921+ atomic_inc_unchecked(&flow_cache_genid);
90922 rt_genid_bump(net);
90923
90924 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
90925diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
90926index c7629a2..b62d139 100644
90927--- a/net/ipv4/fib_frontend.c
90928+++ b/net/ipv4/fib_frontend.c
90929@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
90930 #ifdef CONFIG_IP_ROUTE_MULTIPATH
90931 fib_sync_up(dev);
90932 #endif
90933- atomic_inc(&net->ipv4.dev_addr_genid);
90934+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
90935 rt_cache_flush(dev_net(dev));
90936 break;
90937 case NETDEV_DOWN:
90938 fib_del_ifaddr(ifa, NULL);
90939- atomic_inc(&net->ipv4.dev_addr_genid);
90940+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
90941 if (ifa->ifa_dev->ifa_list == NULL) {
90942 /* Last address was deleted from this interface.
90943 * Disable IP.
90944@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
90945 #ifdef CONFIG_IP_ROUTE_MULTIPATH
90946 fib_sync_up(dev);
90947 #endif
90948- atomic_inc(&net->ipv4.dev_addr_genid);
90949+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
90950 rt_cache_flush(net);
90951 break;
90952 case NETDEV_DOWN:
90953diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
90954index 8f6cb7a..34507f9 100644
90955--- a/net/ipv4/fib_semantics.c
90956+++ b/net/ipv4/fib_semantics.c
90957@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
90958 nh->nh_saddr = inet_select_addr(nh->nh_dev,
90959 nh->nh_gw,
90960 nh->nh_parent->fib_scope);
90961- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
90962+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
90963
90964 return nh->nh_saddr;
90965 }
90966diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
90967index 49616fe..6e8a13d 100644
90968--- a/net/ipv4/fib_trie.c
90969+++ b/net/ipv4/fib_trie.c
90970@@ -71,7 +71,6 @@
90971 #include <linux/init.h>
90972 #include <linux/list.h>
90973 #include <linux/slab.h>
90974-#include <linux/prefetch.h>
90975 #include <linux/export.h>
90976 #include <net/net_namespace.h>
90977 #include <net/ip.h>
90978@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
90979 if (!c)
90980 continue;
90981
90982- if (IS_LEAF(c)) {
90983- prefetch(rcu_dereference_rtnl(p->child[idx]));
90984+ if (IS_LEAF(c))
90985 return (struct leaf *) c;
90986- }
90987
90988 /* Rescan start scanning in new node */
90989 p = (struct tnode *) c;
90990diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
90991index 6acb541..9ea617d 100644
90992--- a/net/ipv4/inet_connection_sock.c
90993+++ b/net/ipv4/inet_connection_sock.c
90994@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
90995 .range = { 32768, 61000 },
90996 };
90997
90998-unsigned long *sysctl_local_reserved_ports;
90999+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
91000 EXPORT_SYMBOL(sysctl_local_reserved_ports);
91001
91002 void inet_get_local_port_range(int *low, int *high)
91003diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
91004index 6af375a..c493c74 100644
91005--- a/net/ipv4/inet_hashtables.c
91006+++ b/net/ipv4/inet_hashtables.c
91007@@ -18,12 +18,15 @@
91008 #include <linux/sched.h>
91009 #include <linux/slab.h>
91010 #include <linux/wait.h>
91011+#include <linux/security.h>
91012
91013 #include <net/inet_connection_sock.h>
91014 #include <net/inet_hashtables.h>
91015 #include <net/secure_seq.h>
91016 #include <net/ip.h>
91017
91018+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
91019+
91020 /*
91021 * Allocate and initialize a new local port bind bucket.
91022 * The bindhash mutex for snum's hash chain must be held here.
91023@@ -554,6 +557,8 @@ ok:
91024 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
91025 spin_unlock(&head->lock);
91026
91027+ gr_update_task_in_ip_table(current, inet_sk(sk));
91028+
91029 if (tw) {
91030 inet_twsk_deschedule(tw, death_row);
91031 while (twrefcnt) {
91032diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
91033index 000e3d2..5472da3 100644
91034--- a/net/ipv4/inetpeer.c
91035+++ b/net/ipv4/inetpeer.c
91036@@ -503,8 +503,8 @@ relookup:
91037 if (p) {
91038 p->daddr = *daddr;
91039 atomic_set(&p->refcnt, 1);
91040- atomic_set(&p->rid, 0);
91041- atomic_set(&p->ip_id_count,
91042+ atomic_set_unchecked(&p->rid, 0);
91043+ atomic_set_unchecked(&p->ip_id_count,
91044 (daddr->family == AF_INET) ?
91045 secure_ip_id(daddr->addr.a4) :
91046 secure_ipv6_id(daddr->addr.a6));
91047diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
91048index b66910a..cfe416e 100644
91049--- a/net/ipv4/ip_fragment.c
91050+++ b/net/ipv4/ip_fragment.c
91051@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
91052 return 0;
91053
91054 start = qp->rid;
91055- end = atomic_inc_return(&peer->rid);
91056+ end = atomic_inc_return_unchecked(&peer->rid);
91057 qp->rid = end;
91058
91059 rc = qp->q.fragments && (end - start) > max;
91060@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
91061
91062 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
91063 {
91064- struct ctl_table *table;
91065+ ctl_table_no_const *table = NULL;
91066 struct ctl_table_header *hdr;
91067
91068- table = ip4_frags_ns_ctl_table;
91069 if (!net_eq(net, &init_net)) {
91070- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
91071+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
91072 if (table == NULL)
91073 goto err_alloc;
91074
91075@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
91076 /* Don't export sysctls to unprivileged users */
91077 if (net->user_ns != &init_user_ns)
91078 table[0].procname = NULL;
91079- }
91080+ hdr = register_net_sysctl(net, "net/ipv4", table);
91081+ } else
91082+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
91083
91084- hdr = register_net_sysctl(net, "net/ipv4", table);
91085 if (hdr == NULL)
91086 goto err_reg;
91087
91088@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
91089 return 0;
91090
91091 err_reg:
91092- if (!net_eq(net, &init_net))
91093- kfree(table);
91094+ kfree(table);
91095 err_alloc:
91096 return -ENOMEM;
91097 }
91098diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
91099index 855004f..9644112 100644
91100--- a/net/ipv4/ip_gre.c
91101+++ b/net/ipv4/ip_gre.c
91102@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
91103 module_param(log_ecn_error, bool, 0644);
91104 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
91105
91106-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
91107+static struct rtnl_link_ops ipgre_link_ops;
91108 static int ipgre_tunnel_init(struct net_device *dev);
91109
91110 static int ipgre_net_id __read_mostly;
91111@@ -572,7 +572,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
91112 if (daddr)
91113 memcpy(&iph->daddr, daddr, 4);
91114 if (iph->daddr)
91115- return t->hlen;
91116+ return t->hlen + sizeof(*iph);
91117
91118 return -(t->hlen + sizeof(*iph));
91119 }
91120@@ -919,7 +919,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
91121 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
91122 };
91123
91124-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
91125+static struct rtnl_link_ops ipgre_link_ops = {
91126 .kind = "gre",
91127 .maxtype = IFLA_GRE_MAX,
91128 .policy = ipgre_policy,
91129@@ -933,7 +933,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
91130 .fill_info = ipgre_fill_info,
91131 };
91132
91133-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
91134+static struct rtnl_link_ops ipgre_tap_ops = {
91135 .kind = "gretap",
91136 .maxtype = IFLA_GRE_MAX,
91137 .policy = ipgre_policy,
91138diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
91139index d9c4f11..02b82dbc 100644
91140--- a/net/ipv4/ip_sockglue.c
91141+++ b/net/ipv4/ip_sockglue.c
91142@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
91143 len = min_t(unsigned int, len, opt->optlen);
91144 if (put_user(len, optlen))
91145 return -EFAULT;
91146- if (copy_to_user(optval, opt->__data, len))
91147+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
91148+ copy_to_user(optval, opt->__data, len))
91149 return -EFAULT;
91150 return 0;
91151 }
91152@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
91153 if (sk->sk_type != SOCK_STREAM)
91154 return -ENOPROTOOPT;
91155
91156- msg.msg_control = optval;
91157+ msg.msg_control = (void __force_kernel *)optval;
91158 msg.msg_controllen = len;
91159 msg.msg_flags = flags;
91160
91161diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
91162index 17cc0ff..63856c4 100644
91163--- a/net/ipv4/ip_vti.c
91164+++ b/net/ipv4/ip_vti.c
91165@@ -47,7 +47,7 @@
91166 #define HASH_SIZE 16
91167 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
91168
91169-static struct rtnl_link_ops vti_link_ops __read_mostly;
91170+static struct rtnl_link_ops vti_link_ops;
91171
91172 static int vti_net_id __read_mostly;
91173 struct vti_net {
91174@@ -840,7 +840,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
91175 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
91176 };
91177
91178-static struct rtnl_link_ops vti_link_ops __read_mostly = {
91179+static struct rtnl_link_ops vti_link_ops = {
91180 .kind = "vti",
91181 .maxtype = IFLA_VTI_MAX,
91182 .policy = vti_policy,
91183diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
91184index 59cb8c7..a72160c 100644
91185--- a/net/ipv4/ipcomp.c
91186+++ b/net/ipv4/ipcomp.c
91187@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
91188 return;
91189
91190 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
91191- atomic_inc(&flow_cache_genid);
91192+ atomic_inc_unchecked(&flow_cache_genid);
91193 rt_genid_bump(net);
91194
91195 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
91196diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
91197index efa1138..20dbba0 100644
91198--- a/net/ipv4/ipconfig.c
91199+++ b/net/ipv4/ipconfig.c
91200@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
91201
91202 mm_segment_t oldfs = get_fs();
91203 set_fs(get_ds());
91204- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
91205+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
91206 set_fs(oldfs);
91207 return res;
91208 }
91209@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
91210
91211 mm_segment_t oldfs = get_fs();
91212 set_fs(get_ds());
91213- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
91214+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
91215 set_fs(oldfs);
91216 return res;
91217 }
91218@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
91219
91220 mm_segment_t oldfs = get_fs();
91221 set_fs(get_ds());
91222- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
91223+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
91224 set_fs(oldfs);
91225 return res;
91226 }
91227diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
91228index 7cfc456..e726868 100644
91229--- a/net/ipv4/ipip.c
91230+++ b/net/ipv4/ipip.c
91231@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
91232 static int ipip_net_id __read_mostly;
91233
91234 static int ipip_tunnel_init(struct net_device *dev);
91235-static struct rtnl_link_ops ipip_link_ops __read_mostly;
91236+static struct rtnl_link_ops ipip_link_ops;
91237
91238 static int ipip_err(struct sk_buff *skb, u32 info)
91239 {
91240@@ -406,7 +406,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
91241 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
91242 };
91243
91244-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
91245+static struct rtnl_link_ops ipip_link_ops = {
91246 .kind = "ipip",
91247 .maxtype = IFLA_IPTUN_MAX,
91248 .policy = ipip_policy,
91249diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
91250index 85a4f21..1beb1f5 100644
91251--- a/net/ipv4/netfilter/arp_tables.c
91252+++ b/net/ipv4/netfilter/arp_tables.c
91253@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
91254 #endif
91255
91256 static int get_info(struct net *net, void __user *user,
91257- const int *len, int compat)
91258+ int len, int compat)
91259 {
91260 char name[XT_TABLE_MAXNAMELEN];
91261 struct xt_table *t;
91262 int ret;
91263
91264- if (*len != sizeof(struct arpt_getinfo)) {
91265- duprintf("length %u != %Zu\n", *len,
91266+ if (len != sizeof(struct arpt_getinfo)) {
91267+ duprintf("length %u != %Zu\n", len,
91268 sizeof(struct arpt_getinfo));
91269 return -EINVAL;
91270 }
91271@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
91272 info.size = private->size;
91273 strcpy(info.name, name);
91274
91275- if (copy_to_user(user, &info, *len) != 0)
91276+ if (copy_to_user(user, &info, len) != 0)
91277 ret = -EFAULT;
91278 else
91279 ret = 0;
91280@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
91281
91282 switch (cmd) {
91283 case ARPT_SO_GET_INFO:
91284- ret = get_info(sock_net(sk), user, len, 1);
91285+ ret = get_info(sock_net(sk), user, *len, 1);
91286 break;
91287 case ARPT_SO_GET_ENTRIES:
91288 ret = compat_get_entries(sock_net(sk), user, len);
91289@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
91290
91291 switch (cmd) {
91292 case ARPT_SO_GET_INFO:
91293- ret = get_info(sock_net(sk), user, len, 0);
91294+ ret = get_info(sock_net(sk), user, *len, 0);
91295 break;
91296
91297 case ARPT_SO_GET_ENTRIES:
91298diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
91299index d23118d..6ad7277 100644
91300--- a/net/ipv4/netfilter/ip_tables.c
91301+++ b/net/ipv4/netfilter/ip_tables.c
91302@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
91303 #endif
91304
91305 static int get_info(struct net *net, void __user *user,
91306- const int *len, int compat)
91307+ int len, int compat)
91308 {
91309 char name[XT_TABLE_MAXNAMELEN];
91310 struct xt_table *t;
91311 int ret;
91312
91313- if (*len != sizeof(struct ipt_getinfo)) {
91314- duprintf("length %u != %zu\n", *len,
91315+ if (len != sizeof(struct ipt_getinfo)) {
91316+ duprintf("length %u != %zu\n", len,
91317 sizeof(struct ipt_getinfo));
91318 return -EINVAL;
91319 }
91320@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
91321 info.size = private->size;
91322 strcpy(info.name, name);
91323
91324- if (copy_to_user(user, &info, *len) != 0)
91325+ if (copy_to_user(user, &info, len) != 0)
91326 ret = -EFAULT;
91327 else
91328 ret = 0;
91329@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
91330
91331 switch (cmd) {
91332 case IPT_SO_GET_INFO:
91333- ret = get_info(sock_net(sk), user, len, 1);
91334+ ret = get_info(sock_net(sk), user, *len, 1);
91335 break;
91336 case IPT_SO_GET_ENTRIES:
91337 ret = compat_get_entries(sock_net(sk), user, len);
91338@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
91339
91340 switch (cmd) {
91341 case IPT_SO_GET_INFO:
91342- ret = get_info(sock_net(sk), user, len, 0);
91343+ ret = get_info(sock_net(sk), user, *len, 0);
91344 break;
91345
91346 case IPT_SO_GET_ENTRIES:
91347diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
91348index 7d93d62..cbbf2a3 100644
91349--- a/net/ipv4/ping.c
91350+++ b/net/ipv4/ping.c
91351@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
91352 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
91353 0, sock_i_ino(sp),
91354 atomic_read(&sp->sk_refcnt), sp,
91355- atomic_read(&sp->sk_drops), len);
91356+ atomic_read_unchecked(&sp->sk_drops), len);
91357 }
91358
91359 static int ping_seq_show(struct seq_file *seq, void *v)
91360diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
91361index dd44e0a..06dcca4 100644
91362--- a/net/ipv4/raw.c
91363+++ b/net/ipv4/raw.c
91364@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
91365 int raw_rcv(struct sock *sk, struct sk_buff *skb)
91366 {
91367 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
91368- atomic_inc(&sk->sk_drops);
91369+ atomic_inc_unchecked(&sk->sk_drops);
91370 kfree_skb(skb);
91371 return NET_RX_DROP;
91372 }
91373@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
91374
91375 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
91376 {
91377+ struct icmp_filter filter;
91378+
91379 if (optlen > sizeof(struct icmp_filter))
91380 optlen = sizeof(struct icmp_filter);
91381- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
91382+ if (copy_from_user(&filter, optval, optlen))
91383 return -EFAULT;
91384+ raw_sk(sk)->filter = filter;
91385 return 0;
91386 }
91387
91388 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
91389 {
91390 int len, ret = -EFAULT;
91391+ struct icmp_filter filter;
91392
91393 if (get_user(len, optlen))
91394 goto out;
91395@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
91396 if (len > sizeof(struct icmp_filter))
91397 len = sizeof(struct icmp_filter);
91398 ret = -EFAULT;
91399- if (put_user(len, optlen) ||
91400- copy_to_user(optval, &raw_sk(sk)->filter, len))
91401+ filter = raw_sk(sk)->filter;
91402+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
91403 goto out;
91404 ret = 0;
91405 out: return ret;
91406@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
91407 0, 0L, 0,
91408 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
91409 0, sock_i_ino(sp),
91410- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
91411+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
91412 }
91413
91414 static int raw_seq_show(struct seq_file *seq, void *v)
91415diff --git a/net/ipv4/route.c b/net/ipv4/route.c
91416index d35bbf0..faa3ab8 100644
91417--- a/net/ipv4/route.c
91418+++ b/net/ipv4/route.c
91419@@ -2558,34 +2558,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
91420 .maxlen = sizeof(int),
91421 .mode = 0200,
91422 .proc_handler = ipv4_sysctl_rtcache_flush,
91423+ .extra1 = &init_net,
91424 },
91425 { },
91426 };
91427
91428 static __net_init int sysctl_route_net_init(struct net *net)
91429 {
91430- struct ctl_table *tbl;
91431+ ctl_table_no_const *tbl = NULL;
91432
91433- tbl = ipv4_route_flush_table;
91434 if (!net_eq(net, &init_net)) {
91435- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
91436+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
91437 if (tbl == NULL)
91438 goto err_dup;
91439
91440 /* Don't export sysctls to unprivileged users */
91441 if (net->user_ns != &init_user_ns)
91442 tbl[0].procname = NULL;
91443- }
91444- tbl[0].extra1 = net;
91445+ tbl[0].extra1 = net;
91446+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
91447+ } else
91448+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
91449
91450- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
91451 if (net->ipv4.route_hdr == NULL)
91452 goto err_reg;
91453 return 0;
91454
91455 err_reg:
91456- if (tbl != ipv4_route_flush_table)
91457- kfree(tbl);
91458+ kfree(tbl);
91459 err_dup:
91460 return -ENOMEM;
91461 }
91462@@ -2608,7 +2608,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
91463
91464 static __net_init int rt_genid_init(struct net *net)
91465 {
91466- atomic_set(&net->rt_genid, 0);
91467+ atomic_set_unchecked(&net->rt_genid, 0);
91468 get_random_bytes(&net->ipv4.dev_addr_genid,
91469 sizeof(net->ipv4.dev_addr_genid));
91470 return 0;
91471diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
91472index 3f25e75..3ae0f4d 100644
91473--- a/net/ipv4/sysctl_net_ipv4.c
91474+++ b/net/ipv4/sysctl_net_ipv4.c
91475@@ -57,7 +57,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
91476 {
91477 int ret;
91478 int range[2];
91479- ctl_table tmp = {
91480+ ctl_table_no_const tmp = {
91481 .data = &range,
91482 .maxlen = sizeof(range),
91483 .mode = table->mode,
91484@@ -110,7 +110,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
91485 int ret;
91486 gid_t urange[2];
91487 kgid_t low, high;
91488- ctl_table tmp = {
91489+ ctl_table_no_const tmp = {
91490 .data = &urange,
91491 .maxlen = sizeof(urange),
91492 .mode = table->mode,
91493@@ -141,7 +141,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
91494 void __user *buffer, size_t *lenp, loff_t *ppos)
91495 {
91496 char val[TCP_CA_NAME_MAX];
91497- ctl_table tbl = {
91498+ ctl_table_no_const tbl = {
91499 .data = val,
91500 .maxlen = TCP_CA_NAME_MAX,
91501 };
91502@@ -160,7 +160,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
91503 void __user *buffer, size_t *lenp,
91504 loff_t *ppos)
91505 {
91506- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
91507+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
91508 int ret;
91509
91510 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
91511@@ -177,7 +177,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
91512 void __user *buffer, size_t *lenp,
91513 loff_t *ppos)
91514 {
91515- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
91516+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
91517 int ret;
91518
91519 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
91520@@ -203,15 +203,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
91521 struct mem_cgroup *memcg;
91522 #endif
91523
91524- ctl_table tmp = {
91525+ ctl_table_no_const tmp = {
91526 .data = &vec,
91527 .maxlen = sizeof(vec),
91528 .mode = ctl->mode,
91529 };
91530
91531 if (!write) {
91532- ctl->data = &net->ipv4.sysctl_tcp_mem;
91533- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
91534+ ctl_table_no_const tcp_mem = *ctl;
91535+
91536+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
91537+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
91538 }
91539
91540 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
91541@@ -238,7 +240,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
91542 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
91543 size_t *lenp, loff_t *ppos)
91544 {
91545- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
91546+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
91547 struct tcp_fastopen_context *ctxt;
91548 int ret;
91549 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
91550@@ -481,7 +483,7 @@ static struct ctl_table ipv4_table[] = {
91551 },
91552 {
91553 .procname = "ip_local_reserved_ports",
91554- .data = NULL, /* initialized in sysctl_ipv4_init */
91555+ .data = sysctl_local_reserved_ports,
91556 .maxlen = 65536,
91557 .mode = 0644,
91558 .proc_handler = proc_do_large_bitmap,
91559@@ -846,11 +848,10 @@ static struct ctl_table ipv4_net_table[] = {
91560
91561 static __net_init int ipv4_sysctl_init_net(struct net *net)
91562 {
91563- struct ctl_table *table;
91564+ ctl_table_no_const *table = NULL;
91565
91566- table = ipv4_net_table;
91567 if (!net_eq(net, &init_net)) {
91568- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
91569+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
91570 if (table == NULL)
91571 goto err_alloc;
91572
91573@@ -885,15 +886,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
91574
91575 tcp_init_mem(net);
91576
91577- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
91578+ if (!net_eq(net, &init_net))
91579+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
91580+ else
91581+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
91582 if (net->ipv4.ipv4_hdr == NULL)
91583 goto err_reg;
91584
91585 return 0;
91586
91587 err_reg:
91588- if (!net_eq(net, &init_net))
91589- kfree(table);
91590+ kfree(table);
91591 err_alloc:
91592 return -ENOMEM;
91593 }
91594@@ -915,16 +918,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
91595 static __init int sysctl_ipv4_init(void)
91596 {
91597 struct ctl_table_header *hdr;
91598- struct ctl_table *i;
91599-
91600- for (i = ipv4_table; i->procname; i++) {
91601- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
91602- i->data = sysctl_local_reserved_ports;
91603- break;
91604- }
91605- }
91606- if (!i->procname)
91607- return -EINVAL;
91608
91609 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
91610 if (hdr == NULL)
91611diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
91612index 9c62257..651cc27 100644
91613--- a/net/ipv4/tcp_input.c
91614+++ b/net/ipv4/tcp_input.c
91615@@ -4436,7 +4436,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
91616 * simplifies code)
91617 */
91618 static void
91619-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
91620+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
91621 struct sk_buff *head, struct sk_buff *tail,
91622 u32 start, u32 end)
91623 {
91624@@ -5522,6 +5522,7 @@ discard:
91625 tcp_paws_reject(&tp->rx_opt, 0))
91626 goto discard_and_undo;
91627
91628+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
91629 if (th->syn) {
91630 /* We see SYN without ACK. It is attempt of
91631 * simultaneous connect with crossed SYNs.
91632@@ -5572,6 +5573,7 @@ discard:
91633 goto discard;
91634 #endif
91635 }
91636+#endif
91637 /* "fifth, if neither of the SYN or RST bits is set then
91638 * drop the segment and return."
91639 */
91640@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
91641 goto discard;
91642
91643 if (th->syn) {
91644- if (th->fin)
91645+ if (th->fin || th->urg || th->psh)
91646 goto discard;
91647 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
91648 return 1;
91649diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
91650index 7999fc5..c812f42 100644
91651--- a/net/ipv4/tcp_ipv4.c
91652+++ b/net/ipv4/tcp_ipv4.c
91653@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
91654 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91655
91656
91657+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91658+extern int grsec_enable_blackhole;
91659+#endif
91660+
91661 #ifdef CONFIG_TCP_MD5SIG
91662 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
91663 __be32 daddr, __be32 saddr, const struct tcphdr *th);
91664@@ -1855,6 +1859,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
91665 return 0;
91666
91667 reset:
91668+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91669+ if (!grsec_enable_blackhole)
91670+#endif
91671 tcp_v4_send_reset(rsk, skb);
91672 discard:
91673 kfree_skb(skb);
91674@@ -2000,12 +2007,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
91675 TCP_SKB_CB(skb)->sacked = 0;
91676
91677 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
91678- if (!sk)
91679+ if (!sk) {
91680+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91681+ ret = 1;
91682+#endif
91683 goto no_tcp_socket;
91684-
91685+ }
91686 process:
91687- if (sk->sk_state == TCP_TIME_WAIT)
91688+ if (sk->sk_state == TCP_TIME_WAIT) {
91689+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91690+ ret = 2;
91691+#endif
91692 goto do_time_wait;
91693+ }
91694
91695 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
91696 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
91697@@ -2058,6 +2072,10 @@ csum_error:
91698 bad_packet:
91699 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
91700 } else {
91701+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91702+ if (!grsec_enable_blackhole || (ret == 1 &&
91703+ (skb->dev->flags & IFF_LOOPBACK)))
91704+#endif
91705 tcp_v4_send_reset(NULL, skb);
91706 }
91707
91708diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
91709index 0f01788..d52a859 100644
91710--- a/net/ipv4/tcp_minisocks.c
91711+++ b/net/ipv4/tcp_minisocks.c
91712@@ -27,6 +27,10 @@
91713 #include <net/inet_common.h>
91714 #include <net/xfrm.h>
91715
91716+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91717+extern int grsec_enable_blackhole;
91718+#endif
91719+
91720 int sysctl_tcp_syncookies __read_mostly = 1;
91721 EXPORT_SYMBOL(sysctl_tcp_syncookies);
91722
91723@@ -717,7 +721,10 @@ embryonic_reset:
91724 * avoid becoming vulnerable to outside attack aiming at
91725 * resetting legit local connections.
91726 */
91727- req->rsk_ops->send_reset(sk, skb);
91728+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91729+ if (!grsec_enable_blackhole)
91730+#endif
91731+ req->rsk_ops->send_reset(sk, skb);
91732 } else if (fastopen) { /* received a valid RST pkt */
91733 reqsk_fastopen_remove(sk, req, true);
91734 tcp_reset(sk);
91735diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
91736index d4943f6..e7a74a5 100644
91737--- a/net/ipv4/tcp_probe.c
91738+++ b/net/ipv4/tcp_probe.c
91739@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
91740 if (cnt + width >= len)
91741 break;
91742
91743- if (copy_to_user(buf + cnt, tbuf, width))
91744+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
91745 return -EFAULT;
91746 cnt += width;
91747 }
91748diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
91749index 4b85e6f..22f9ac9 100644
91750--- a/net/ipv4/tcp_timer.c
91751+++ b/net/ipv4/tcp_timer.c
91752@@ -22,6 +22,10 @@
91753 #include <linux/gfp.h>
91754 #include <net/tcp.h>
91755
91756+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91757+extern int grsec_lastack_retries;
91758+#endif
91759+
91760 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
91761 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
91762 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
91763@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
91764 }
91765 }
91766
91767+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91768+ if ((sk->sk_state == TCP_LAST_ACK) &&
91769+ (grsec_lastack_retries > 0) &&
91770+ (grsec_lastack_retries < retry_until))
91771+ retry_until = grsec_lastack_retries;
91772+#endif
91773+
91774 if (retransmits_timed_out(sk, retry_until,
91775 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
91776 /* Has it gone just too far? */
91777diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
91778index 93b731d..5a2dd92 100644
91779--- a/net/ipv4/udp.c
91780+++ b/net/ipv4/udp.c
91781@@ -87,6 +87,7 @@
91782 #include <linux/types.h>
91783 #include <linux/fcntl.h>
91784 #include <linux/module.h>
91785+#include <linux/security.h>
91786 #include <linux/socket.h>
91787 #include <linux/sockios.h>
91788 #include <linux/igmp.h>
91789@@ -111,6 +112,10 @@
91790 #include <trace/events/skb.h>
91791 #include "udp_impl.h"
91792
91793+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91794+extern int grsec_enable_blackhole;
91795+#endif
91796+
91797 struct udp_table udp_table __read_mostly;
91798 EXPORT_SYMBOL(udp_table);
91799
91800@@ -594,6 +599,9 @@ found:
91801 return s;
91802 }
91803
91804+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
91805+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
91806+
91807 /*
91808 * This routine is called by the ICMP module when it gets some
91809 * sort of error condition. If err < 0 then the socket should
91810@@ -890,9 +898,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
91811 dport = usin->sin_port;
91812 if (dport == 0)
91813 return -EINVAL;
91814+
91815+ err = gr_search_udp_sendmsg(sk, usin);
91816+ if (err)
91817+ return err;
91818 } else {
91819 if (sk->sk_state != TCP_ESTABLISHED)
91820 return -EDESTADDRREQ;
91821+
91822+ err = gr_search_udp_sendmsg(sk, NULL);
91823+ if (err)
91824+ return err;
91825+
91826 daddr = inet->inet_daddr;
91827 dport = inet->inet_dport;
91828 /* Open fast path for connected socket.
91829@@ -1136,7 +1153,7 @@ static unsigned int first_packet_length(struct sock *sk)
91830 IS_UDPLITE(sk));
91831 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
91832 IS_UDPLITE(sk));
91833- atomic_inc(&sk->sk_drops);
91834+ atomic_inc_unchecked(&sk->sk_drops);
91835 __skb_unlink(skb, rcvq);
91836 __skb_queue_tail(&list_kill, skb);
91837 }
91838@@ -1222,6 +1239,10 @@ try_again:
91839 if (!skb)
91840 goto out;
91841
91842+ err = gr_search_udp_recvmsg(sk, skb);
91843+ if (err)
91844+ goto out_free;
91845+
91846 ulen = skb->len - sizeof(struct udphdr);
91847 copied = len;
91848 if (copied > ulen)
91849@@ -1255,7 +1276,7 @@ try_again:
91850 if (unlikely(err)) {
91851 trace_kfree_skb(skb, udp_recvmsg);
91852 if (!peeked) {
91853- atomic_inc(&sk->sk_drops);
91854+ atomic_inc_unchecked(&sk->sk_drops);
91855 UDP_INC_STATS_USER(sock_net(sk),
91856 UDP_MIB_INERRORS, is_udplite);
91857 }
91858@@ -1542,7 +1563,7 @@ csum_error:
91859 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
91860 drop:
91861 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
91862- atomic_inc(&sk->sk_drops);
91863+ atomic_inc_unchecked(&sk->sk_drops);
91864 kfree_skb(skb);
91865 return -1;
91866 }
91867@@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
91868 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
91869
91870 if (!skb1) {
91871- atomic_inc(&sk->sk_drops);
91872+ atomic_inc_unchecked(&sk->sk_drops);
91873 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
91874 IS_UDPLITE(sk));
91875 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
91876@@ -1730,6 +1751,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
91877 goto csum_error;
91878
91879 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
91880+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
91881+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
91882+#endif
91883 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
91884
91885 /*
91886@@ -2160,7 +2184,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
91887 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
91888 0, sock_i_ino(sp),
91889 atomic_read(&sp->sk_refcnt), sp,
91890- atomic_read(&sp->sk_drops), len);
91891+ atomic_read_unchecked(&sp->sk_drops), len);
91892 }
91893
91894 int udp4_seq_show(struct seq_file *seq, void *v)
91895diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
91896index 9a459be..086b866 100644
91897--- a/net/ipv4/xfrm4_policy.c
91898+++ b/net/ipv4/xfrm4_policy.c
91899@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
91900
91901 static int __net_init xfrm4_net_init(struct net *net)
91902 {
91903- struct ctl_table *table;
91904+ ctl_table_no_const *table = NULL;
91905 struct ctl_table_header *hdr;
91906
91907- table = xfrm4_policy_table;
91908 if (!net_eq(net, &init_net)) {
91909- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
91910+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
91911 if (!table)
91912 goto err_alloc;
91913
91914 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
91915- }
91916-
91917- hdr = register_net_sysctl(net, "net/ipv4", table);
91918+ hdr = register_net_sysctl(net, "net/ipv4", table);
91919+ } else
91920+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
91921 if (!hdr)
91922 goto err_reg;
91923
91924@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
91925 return 0;
91926
91927 err_reg:
91928- if (!net_eq(net, &init_net))
91929- kfree(table);
91930+ kfree(table);
91931 err_alloc:
91932 return -ENOMEM;
91933 }
91934diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
91935index fb8c94c..fb18024 100644
91936--- a/net/ipv6/addrconf.c
91937+++ b/net/ipv6/addrconf.c
91938@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
91939 idx = 0;
91940 head = &net->dev_index_head[h];
91941 rcu_read_lock();
91942- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
91943+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
91944 net->dev_base_seq;
91945 hlist_for_each_entry_rcu(dev, head, index_hlist) {
91946 if (idx < s_idx)
91947@@ -2380,7 +2380,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
91948 p.iph.ihl = 5;
91949 p.iph.protocol = IPPROTO_IPV6;
91950 p.iph.ttl = 64;
91951- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
91952+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
91953
91954 if (ops->ndo_do_ioctl) {
91955 mm_segment_t oldfs = get_fs();
91956@@ -4002,7 +4002,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
91957 s_ip_idx = ip_idx = cb->args[2];
91958
91959 rcu_read_lock();
91960- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
91961+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
91962 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
91963 idx = 0;
91964 head = &net->dev_index_head[h];
91965@@ -4587,7 +4587,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
91966 dst_free(&ifp->rt->dst);
91967 break;
91968 }
91969- atomic_inc(&net->ipv6.dev_addr_genid);
91970+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
91971 }
91972
91973 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
91974@@ -4607,7 +4607,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
91975 int *valp = ctl->data;
91976 int val = *valp;
91977 loff_t pos = *ppos;
91978- ctl_table lctl;
91979+ ctl_table_no_const lctl;
91980 int ret;
91981
91982 /*
91983@@ -4689,7 +4689,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
91984 int *valp = ctl->data;
91985 int val = *valp;
91986 loff_t pos = *ppos;
91987- ctl_table lctl;
91988+ ctl_table_no_const lctl;
91989 int ret;
91990
91991 /*
91992diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
91993index 40ffd72..aeac0dc 100644
91994--- a/net/ipv6/esp6.c
91995+++ b/net/ipv6/esp6.c
91996@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
91997 net_adj = 0;
91998
91999 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
92000- net_adj) & ~(align - 1)) + (net_adj - 2);
92001+ net_adj) & ~(align - 1)) + net_adj - 2;
92002 }
92003
92004 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
92005diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
92006index b4ff0a4..db9b764 100644
92007--- a/net/ipv6/icmp.c
92008+++ b/net/ipv6/icmp.c
92009@@ -980,7 +980,7 @@ ctl_table ipv6_icmp_table_template[] = {
92010
92011 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
92012 {
92013- struct ctl_table *table;
92014+ ctl_table_no_const *table;
92015
92016 table = kmemdup(ipv6_icmp_table_template,
92017 sizeof(ipv6_icmp_table_template),
92018diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
92019index ecd6073..58162ae 100644
92020--- a/net/ipv6/ip6_gre.c
92021+++ b/net/ipv6/ip6_gre.c
92022@@ -74,7 +74,7 @@ struct ip6gre_net {
92023 struct net_device *fb_tunnel_dev;
92024 };
92025
92026-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
92027+static struct rtnl_link_ops ip6gre_link_ops;
92028 static int ip6gre_tunnel_init(struct net_device *dev);
92029 static void ip6gre_tunnel_setup(struct net_device *dev);
92030 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
92031@@ -1283,7 +1283,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
92032 }
92033
92034
92035-static struct inet6_protocol ip6gre_protocol __read_mostly = {
92036+static struct inet6_protocol ip6gre_protocol = {
92037 .handler = ip6gre_rcv,
92038 .err_handler = ip6gre_err,
92039 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
92040@@ -1617,7 +1617,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
92041 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
92042 };
92043
92044-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
92045+static struct rtnl_link_ops ip6gre_link_ops = {
92046 .kind = "ip6gre",
92047 .maxtype = IFLA_GRE_MAX,
92048 .policy = ip6gre_policy,
92049@@ -1630,7 +1630,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
92050 .fill_info = ip6gre_fill_info,
92051 };
92052
92053-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
92054+static struct rtnl_link_ops ip6gre_tap_ops = {
92055 .kind = "ip6gretap",
92056 .maxtype = IFLA_GRE_MAX,
92057 .policy = ip6gre_policy,
92058diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
92059index 1e55866..b398dab 100644
92060--- a/net/ipv6/ip6_tunnel.c
92061+++ b/net/ipv6/ip6_tunnel.c
92062@@ -88,7 +88,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
92063
92064 static int ip6_tnl_dev_init(struct net_device *dev);
92065 static void ip6_tnl_dev_setup(struct net_device *dev);
92066-static struct rtnl_link_ops ip6_link_ops __read_mostly;
92067+static struct rtnl_link_ops ip6_link_ops;
92068
92069 static int ip6_tnl_net_id __read_mostly;
92070 struct ip6_tnl_net {
92071@@ -1672,7 +1672,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
92072 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
92073 };
92074
92075-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
92076+static struct rtnl_link_ops ip6_link_ops = {
92077 .kind = "ip6tnl",
92078 .maxtype = IFLA_IPTUN_MAX,
92079 .policy = ip6_tnl_policy,
92080diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
92081index d1e2e8e..51c19ae 100644
92082--- a/net/ipv6/ipv6_sockglue.c
92083+++ b/net/ipv6/ipv6_sockglue.c
92084@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
92085 if (sk->sk_type != SOCK_STREAM)
92086 return -ENOPROTOOPT;
92087
92088- msg.msg_control = optval;
92089+ msg.msg_control = (void __force_kernel *)optval;
92090 msg.msg_controllen = len;
92091 msg.msg_flags = flags;
92092
92093diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
92094index 44400c2..8e11f52 100644
92095--- a/net/ipv6/netfilter/ip6_tables.c
92096+++ b/net/ipv6/netfilter/ip6_tables.c
92097@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
92098 #endif
92099
92100 static int get_info(struct net *net, void __user *user,
92101- const int *len, int compat)
92102+ int len, int compat)
92103 {
92104 char name[XT_TABLE_MAXNAMELEN];
92105 struct xt_table *t;
92106 int ret;
92107
92108- if (*len != sizeof(struct ip6t_getinfo)) {
92109- duprintf("length %u != %zu\n", *len,
92110+ if (len != sizeof(struct ip6t_getinfo)) {
92111+ duprintf("length %u != %zu\n", len,
92112 sizeof(struct ip6t_getinfo));
92113 return -EINVAL;
92114 }
92115@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
92116 info.size = private->size;
92117 strcpy(info.name, name);
92118
92119- if (copy_to_user(user, &info, *len) != 0)
92120+ if (copy_to_user(user, &info, len) != 0)
92121 ret = -EFAULT;
92122 else
92123 ret = 0;
92124@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92125
92126 switch (cmd) {
92127 case IP6T_SO_GET_INFO:
92128- ret = get_info(sock_net(sk), user, len, 1);
92129+ ret = get_info(sock_net(sk), user, *len, 1);
92130 break;
92131 case IP6T_SO_GET_ENTRIES:
92132 ret = compat_get_entries(sock_net(sk), user, len);
92133@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
92134
92135 switch (cmd) {
92136 case IP6T_SO_GET_INFO:
92137- ret = get_info(sock_net(sk), user, len, 0);
92138+ ret = get_info(sock_net(sk), user, *len, 0);
92139 break;
92140
92141 case IP6T_SO_GET_ENTRIES:
92142diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
92143index dffdc1a..ccc6678 100644
92144--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
92145+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
92146@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
92147
92148 static int nf_ct_frag6_sysctl_register(struct net *net)
92149 {
92150- struct ctl_table *table;
92151+ ctl_table_no_const *table = NULL;
92152 struct ctl_table_header *hdr;
92153
92154- table = nf_ct_frag6_sysctl_table;
92155 if (!net_eq(net, &init_net)) {
92156- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
92157+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
92158 GFP_KERNEL);
92159 if (table == NULL)
92160 goto err_alloc;
92161@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
92162 table[0].data = &net->nf_frag.frags.timeout;
92163 table[1].data = &net->nf_frag.frags.low_thresh;
92164 table[2].data = &net->nf_frag.frags.high_thresh;
92165- }
92166-
92167- hdr = register_net_sysctl(net, "net/netfilter", table);
92168+ hdr = register_net_sysctl(net, "net/netfilter", table);
92169+ } else
92170+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
92171 if (hdr == NULL)
92172 goto err_reg;
92173
92174@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
92175 return 0;
92176
92177 err_reg:
92178- if (!net_eq(net, &init_net))
92179- kfree(table);
92180+ kfree(table);
92181 err_alloc:
92182 return -ENOMEM;
92183 }
92184diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
92185index eedff8c..6e13a47 100644
92186--- a/net/ipv6/raw.c
92187+++ b/net/ipv6/raw.c
92188@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
92189 {
92190 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
92191 skb_checksum_complete(skb)) {
92192- atomic_inc(&sk->sk_drops);
92193+ atomic_inc_unchecked(&sk->sk_drops);
92194 kfree_skb(skb);
92195 return NET_RX_DROP;
92196 }
92197@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
92198 struct raw6_sock *rp = raw6_sk(sk);
92199
92200 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
92201- atomic_inc(&sk->sk_drops);
92202+ atomic_inc_unchecked(&sk->sk_drops);
92203 kfree_skb(skb);
92204 return NET_RX_DROP;
92205 }
92206@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
92207
92208 if (inet->hdrincl) {
92209 if (skb_checksum_complete(skb)) {
92210- atomic_inc(&sk->sk_drops);
92211+ atomic_inc_unchecked(&sk->sk_drops);
92212 kfree_skb(skb);
92213 return NET_RX_DROP;
92214 }
92215@@ -602,7 +602,7 @@ out:
92216 return err;
92217 }
92218
92219-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
92220+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
92221 struct flowi6 *fl6, struct dst_entry **dstp,
92222 unsigned int flags)
92223 {
92224@@ -914,12 +914,15 @@ do_confirm:
92225 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
92226 char __user *optval, int optlen)
92227 {
92228+ struct icmp6_filter filter;
92229+
92230 switch (optname) {
92231 case ICMPV6_FILTER:
92232 if (optlen > sizeof(struct icmp6_filter))
92233 optlen = sizeof(struct icmp6_filter);
92234- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
92235+ if (copy_from_user(&filter, optval, optlen))
92236 return -EFAULT;
92237+ raw6_sk(sk)->filter = filter;
92238 return 0;
92239 default:
92240 return -ENOPROTOOPT;
92241@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
92242 char __user *optval, int __user *optlen)
92243 {
92244 int len;
92245+ struct icmp6_filter filter;
92246
92247 switch (optname) {
92248 case ICMPV6_FILTER:
92249@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
92250 len = sizeof(struct icmp6_filter);
92251 if (put_user(len, optlen))
92252 return -EFAULT;
92253- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
92254+ filter = raw6_sk(sk)->filter;
92255+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
92256 return -EFAULT;
92257 return 0;
92258 default:
92259@@ -1251,7 +1256,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
92260 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
92261 0,
92262 sock_i_ino(sp),
92263- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
92264+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
92265 }
92266
92267 static int raw6_seq_show(struct seq_file *seq, void *v)
92268diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
92269index 790d9f4..68ae078 100644
92270--- a/net/ipv6/reassembly.c
92271+++ b/net/ipv6/reassembly.c
92272@@ -621,12 +621,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
92273
92274 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
92275 {
92276- struct ctl_table *table;
92277+ ctl_table_no_const *table = NULL;
92278 struct ctl_table_header *hdr;
92279
92280- table = ip6_frags_ns_ctl_table;
92281 if (!net_eq(net, &init_net)) {
92282- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
92283+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
92284 if (table == NULL)
92285 goto err_alloc;
92286
92287@@ -637,9 +636,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
92288 /* Don't export sysctls to unprivileged users */
92289 if (net->user_ns != &init_user_ns)
92290 table[0].procname = NULL;
92291- }
92292+ hdr = register_net_sysctl(net, "net/ipv6", table);
92293+ } else
92294+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
92295
92296- hdr = register_net_sysctl(net, "net/ipv6", table);
92297 if (hdr == NULL)
92298 goto err_reg;
92299
92300@@ -647,8 +647,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
92301 return 0;
92302
92303 err_reg:
92304- if (!net_eq(net, &init_net))
92305- kfree(table);
92306+ kfree(table);
92307 err_alloc:
92308 return -ENOMEM;
92309 }
92310diff --git a/net/ipv6/route.c b/net/ipv6/route.c
92311index bacce6c..9d1741a 100644
92312--- a/net/ipv6/route.c
92313+++ b/net/ipv6/route.c
92314@@ -2903,7 +2903,7 @@ ctl_table ipv6_route_table_template[] = {
92315
92316 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
92317 {
92318- struct ctl_table *table;
92319+ ctl_table_no_const *table;
92320
92321 table = kmemdup(ipv6_route_table_template,
92322 sizeof(ipv6_route_table_template),
92323diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
92324index 60df36d..f3ab7c8 100644
92325--- a/net/ipv6/sit.c
92326+++ b/net/ipv6/sit.c
92327@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
92328 static void ipip6_dev_free(struct net_device *dev);
92329 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
92330 __be32 *v4dst);
92331-static struct rtnl_link_ops sit_link_ops __read_mostly;
92332+static struct rtnl_link_ops sit_link_ops;
92333
92334 static int sit_net_id __read_mostly;
92335 struct sit_net {
92336@@ -1453,7 +1453,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
92337 #endif
92338 };
92339
92340-static struct rtnl_link_ops sit_link_ops __read_mostly = {
92341+static struct rtnl_link_ops sit_link_ops = {
92342 .kind = "sit",
92343 .maxtype = IFLA_IPTUN_MAX,
92344 .policy = ipip6_policy,
92345diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
92346index e85c48b..b8268d3 100644
92347--- a/net/ipv6/sysctl_net_ipv6.c
92348+++ b/net/ipv6/sysctl_net_ipv6.c
92349@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
92350
92351 static int __net_init ipv6_sysctl_net_init(struct net *net)
92352 {
92353- struct ctl_table *ipv6_table;
92354+ ctl_table_no_const *ipv6_table;
92355 struct ctl_table *ipv6_route_table;
92356 struct ctl_table *ipv6_icmp_table;
92357 int err;
92358diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
92359index 0a17ed9..2526cc3 100644
92360--- a/net/ipv6/tcp_ipv6.c
92361+++ b/net/ipv6/tcp_ipv6.c
92362@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92363 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
92364 }
92365
92366+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92367+extern int grsec_enable_blackhole;
92368+#endif
92369+
92370 static void tcp_v6_hash(struct sock *sk)
92371 {
92372 if (sk->sk_state != TCP_CLOSE) {
92373@@ -1398,6 +1402,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
92374 return 0;
92375
92376 reset:
92377+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92378+ if (!grsec_enable_blackhole)
92379+#endif
92380 tcp_v6_send_reset(sk, skb);
92381 discard:
92382 if (opt_skb)
92383@@ -1480,12 +1487,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
92384 TCP_SKB_CB(skb)->sacked = 0;
92385
92386 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
92387- if (!sk)
92388+ if (!sk) {
92389+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92390+ ret = 1;
92391+#endif
92392 goto no_tcp_socket;
92393+ }
92394
92395 process:
92396- if (sk->sk_state == TCP_TIME_WAIT)
92397+ if (sk->sk_state == TCP_TIME_WAIT) {
92398+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92399+ ret = 2;
92400+#endif
92401 goto do_time_wait;
92402+ }
92403
92404 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
92405 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
92406@@ -1536,6 +1551,10 @@ csum_error:
92407 bad_packet:
92408 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
92409 } else {
92410+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92411+ if (!grsec_enable_blackhole || (ret == 1 &&
92412+ (skb->dev->flags & IFF_LOOPBACK)))
92413+#endif
92414 tcp_v6_send_reset(NULL, skb);
92415 }
92416
92417diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
92418index e7b28f9..d09c290 100644
92419--- a/net/ipv6/udp.c
92420+++ b/net/ipv6/udp.c
92421@@ -52,6 +52,10 @@
92422 #include <trace/events/skb.h>
92423 #include "udp_impl.h"
92424
92425+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92426+extern int grsec_enable_blackhole;
92427+#endif
92428+
92429 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
92430 {
92431 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
92432@@ -419,7 +423,7 @@ try_again:
92433 if (unlikely(err)) {
92434 trace_kfree_skb(skb, udpv6_recvmsg);
92435 if (!peeked) {
92436- atomic_inc(&sk->sk_drops);
92437+ atomic_inc_unchecked(&sk->sk_drops);
92438 if (is_udp4)
92439 UDP_INC_STATS_USER(sock_net(sk),
92440 UDP_MIB_INERRORS,
92441@@ -665,7 +669,7 @@ csum_error:
92442 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
92443 drop:
92444 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
92445- atomic_inc(&sk->sk_drops);
92446+ atomic_inc_unchecked(&sk->sk_drops);
92447 kfree_skb(skb);
92448 return -1;
92449 }
92450@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
92451 if (likely(skb1 == NULL))
92452 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
92453 if (!skb1) {
92454- atomic_inc(&sk->sk_drops);
92455+ atomic_inc_unchecked(&sk->sk_drops);
92456 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
92457 IS_UDPLITE(sk));
92458 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
92459@@ -860,6 +864,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
92460 goto csum_error;
92461
92462 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
92463+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
92464+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
92465+#endif
92466 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
92467
92468 kfree_skb(skb);
92469@@ -1392,7 +1399,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
92470 0,
92471 sock_i_ino(sp),
92472 atomic_read(&sp->sk_refcnt), sp,
92473- atomic_read(&sp->sk_drops));
92474+ atomic_read_unchecked(&sp->sk_drops));
92475 }
92476
92477 int udp6_seq_show(struct seq_file *seq, void *v)
92478diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
92479index 23ed03d..465a71d 100644
92480--- a/net/ipv6/xfrm6_policy.c
92481+++ b/net/ipv6/xfrm6_policy.c
92482@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
92483
92484 static int __net_init xfrm6_net_init(struct net *net)
92485 {
92486- struct ctl_table *table;
92487+ ctl_table_no_const *table = NULL;
92488 struct ctl_table_header *hdr;
92489
92490- table = xfrm6_policy_table;
92491 if (!net_eq(net, &init_net)) {
92492- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
92493+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
92494 if (!table)
92495 goto err_alloc;
92496
92497 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
92498- }
92499+ hdr = register_net_sysctl(net, "net/ipv6", table);
92500+ } else
92501+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
92502
92503- hdr = register_net_sysctl(net, "net/ipv6", table);
92504 if (!hdr)
92505 goto err_reg;
92506
92507@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
92508 return 0;
92509
92510 err_reg:
92511- if (!net_eq(net, &init_net))
92512- kfree(table);
92513+ kfree(table);
92514 err_alloc:
92515 return -ENOMEM;
92516 }
92517diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
92518index 41ac7938..75e3bb1 100644
92519--- a/net/irda/ircomm/ircomm_tty.c
92520+++ b/net/irda/ircomm/ircomm_tty.c
92521@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
92522 add_wait_queue(&port->open_wait, &wait);
92523
92524 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
92525- __FILE__, __LINE__, tty->driver->name, port->count);
92526+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
92527
92528 spin_lock_irqsave(&port->lock, flags);
92529 if (!tty_hung_up_p(filp))
92530- port->count--;
92531+ atomic_dec(&port->count);
92532 port->blocked_open++;
92533 spin_unlock_irqrestore(&port->lock, flags);
92534
92535@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
92536 }
92537
92538 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
92539- __FILE__, __LINE__, tty->driver->name, port->count);
92540+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
92541
92542 schedule();
92543 }
92544@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
92545
92546 spin_lock_irqsave(&port->lock, flags);
92547 if (!tty_hung_up_p(filp))
92548- port->count++;
92549+ atomic_inc(&port->count);
92550 port->blocked_open--;
92551 spin_unlock_irqrestore(&port->lock, flags);
92552
92553 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
92554- __FILE__, __LINE__, tty->driver->name, port->count);
92555+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
92556
92557 if (!retval)
92558 port->flags |= ASYNC_NORMAL_ACTIVE;
92559@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
92560
92561 /* ++ is not atomic, so this should be protected - Jean II */
92562 spin_lock_irqsave(&self->port.lock, flags);
92563- self->port.count++;
92564+ atomic_inc(&self->port.count);
92565 spin_unlock_irqrestore(&self->port.lock, flags);
92566 tty_port_tty_set(&self->port, tty);
92567
92568 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
92569- self->line, self->port.count);
92570+ self->line, atomic_read(&self->port.count));
92571
92572 /* Not really used by us, but lets do it anyway */
92573 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
92574@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
92575 tty_kref_put(port->tty);
92576 }
92577 port->tty = NULL;
92578- port->count = 0;
92579+ atomic_set(&port->count, 0);
92580 spin_unlock_irqrestore(&port->lock, flags);
92581
92582 wake_up_interruptible(&port->open_wait);
92583@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
92584 seq_putc(m, '\n');
92585
92586 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
92587- seq_printf(m, "Open count: %d\n", self->port.count);
92588+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
92589 seq_printf(m, "Max data size: %d\n", self->max_data_size);
92590 seq_printf(m, "Max header size: %d\n", self->max_header_size);
92591
92592diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
92593index ae69165..c8b82d8 100644
92594--- a/net/iucv/af_iucv.c
92595+++ b/net/iucv/af_iucv.c
92596@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
92597
92598 write_lock_bh(&iucv_sk_list.lock);
92599
92600- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
92601+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
92602 while (__iucv_get_sock_by_name(name)) {
92603 sprintf(name, "%08x",
92604- atomic_inc_return(&iucv_sk_list.autobind_name));
92605+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
92606 }
92607
92608 write_unlock_bh(&iucv_sk_list.lock);
92609diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
92610index 4fe76ff..426a904 100644
92611--- a/net/iucv/iucv.c
92612+++ b/net/iucv/iucv.c
92613@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
92614 return NOTIFY_OK;
92615 }
92616
92617-static struct notifier_block __refdata iucv_cpu_notifier = {
92618+static struct notifier_block iucv_cpu_notifier = {
92619 .notifier_call = iucv_cpu_notify,
92620 };
92621
92622diff --git a/net/key/af_key.c b/net/key/af_key.c
92623index ab8bd2c..cd2d641 100644
92624--- a/net/key/af_key.c
92625+++ b/net/key/af_key.c
92626@@ -3048,10 +3048,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
92627 static u32 get_acqseq(void)
92628 {
92629 u32 res;
92630- static atomic_t acqseq;
92631+ static atomic_unchecked_t acqseq;
92632
92633 do {
92634- res = atomic_inc_return(&acqseq);
92635+ res = atomic_inc_return_unchecked(&acqseq);
92636 } while (!res);
92637 return res;
92638 }
92639diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
92640index ae36f8e..09d42ac 100644
92641--- a/net/mac80211/cfg.c
92642+++ b/net/mac80211/cfg.c
92643@@ -806,7 +806,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
92644 ret = ieee80211_vif_use_channel(sdata, chandef,
92645 IEEE80211_CHANCTX_EXCLUSIVE);
92646 }
92647- } else if (local->open_count == local->monitors) {
92648+ } else if (local_read(&local->open_count) == local->monitors) {
92649 local->_oper_chandef = *chandef;
92650 ieee80211_hw_config(local, 0);
92651 }
92652@@ -2922,7 +2922,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
92653 else
92654 local->probe_req_reg--;
92655
92656- if (!local->open_count)
92657+ if (!local_read(&local->open_count))
92658 break;
92659
92660 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
92661@@ -3385,8 +3385,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
92662 if (chanctx_conf) {
92663 *chandef = chanctx_conf->def;
92664 ret = 0;
92665- } else if (local->open_count > 0 &&
92666- local->open_count == local->monitors &&
92667+ } else if (local_read(&local->open_count) > 0 &&
92668+ local_read(&local->open_count) == local->monitors &&
92669 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
92670 if (local->use_chanctx)
92671 *chandef = local->monitor_chandef;
92672diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
92673index 9ca8e32..48e4a9b 100644
92674--- a/net/mac80211/ieee80211_i.h
92675+++ b/net/mac80211/ieee80211_i.h
92676@@ -28,6 +28,7 @@
92677 #include <net/ieee80211_radiotap.h>
92678 #include <net/cfg80211.h>
92679 #include <net/mac80211.h>
92680+#include <asm/local.h>
92681 #include "key.h"
92682 #include "sta_info.h"
92683 #include "debug.h"
92684@@ -891,7 +892,7 @@ struct ieee80211_local {
92685 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
92686 spinlock_t queue_stop_reason_lock;
92687
92688- int open_count;
92689+ local_t open_count;
92690 int monitors, cooked_mntrs;
92691 /* number of interfaces with corresponding FIF_ flags */
92692 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
92693diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
92694index 514e90f..56f22bf 100644
92695--- a/net/mac80211/iface.c
92696+++ b/net/mac80211/iface.c
92697@@ -502,7 +502,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
92698 break;
92699 }
92700
92701- if (local->open_count == 0) {
92702+ if (local_read(&local->open_count) == 0) {
92703 res = drv_start(local);
92704 if (res)
92705 goto err_del_bss;
92706@@ -545,7 +545,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
92707 break;
92708 }
92709
92710- if (local->monitors == 0 && local->open_count == 0) {
92711+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
92712 res = ieee80211_add_virtual_monitor(local);
92713 if (res)
92714 goto err_stop;
92715@@ -653,7 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
92716 atomic_inc(&local->iff_promiscs);
92717
92718 if (coming_up)
92719- local->open_count++;
92720+ local_inc(&local->open_count);
92721
92722 if (hw_reconf_flags)
92723 ieee80211_hw_config(local, hw_reconf_flags);
92724@@ -691,7 +691,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
92725 err_del_interface:
92726 drv_remove_interface(local, sdata);
92727 err_stop:
92728- if (!local->open_count)
92729+ if (!local_read(&local->open_count))
92730 drv_stop(local);
92731 err_del_bss:
92732 sdata->bss = NULL;
92733@@ -828,7 +828,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
92734 }
92735
92736 if (going_down)
92737- local->open_count--;
92738+ local_dec(&local->open_count);
92739
92740 switch (sdata->vif.type) {
92741 case NL80211_IFTYPE_AP_VLAN:
92742@@ -895,7 +895,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
92743 }
92744 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
92745
92746- if (local->open_count == 0)
92747+ if (local_read(&local->open_count) == 0)
92748 ieee80211_clear_tx_pending(local);
92749
92750 /*
92751@@ -931,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
92752
92753 ieee80211_recalc_ps(local, -1);
92754
92755- if (local->open_count == 0) {
92756+ if (local_read(&local->open_count) == 0) {
92757 ieee80211_stop_device(local);
92758
92759 /* no reconfiguring after stop! */
92760@@ -942,7 +942,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
92761 ieee80211_configure_filter(local);
92762 ieee80211_hw_config(local, hw_reconf_flags);
92763
92764- if (local->monitors == local->open_count)
92765+ if (local->monitors == local_read(&local->open_count))
92766 ieee80211_add_virtual_monitor(local);
92767 }
92768
92769diff --git a/net/mac80211/main.c b/net/mac80211/main.c
92770index 8a7bfc4..4407cd0 100644
92771--- a/net/mac80211/main.c
92772+++ b/net/mac80211/main.c
92773@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
92774 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
92775 IEEE80211_CONF_CHANGE_POWER);
92776
92777- if (changed && local->open_count) {
92778+ if (changed && local_read(&local->open_count)) {
92779 ret = drv_config(local, changed);
92780 /*
92781 * Goal:
92782diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
92783index 3401262..d5cd68d 100644
92784--- a/net/mac80211/pm.c
92785+++ b/net/mac80211/pm.c
92786@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
92787 struct ieee80211_sub_if_data *sdata;
92788 struct sta_info *sta;
92789
92790- if (!local->open_count)
92791+ if (!local_read(&local->open_count))
92792 goto suspend;
92793
92794 ieee80211_scan_cancel(local);
92795@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
92796 cancel_work_sync(&local->dynamic_ps_enable_work);
92797 del_timer_sync(&local->dynamic_ps_timer);
92798
92799- local->wowlan = wowlan && local->open_count;
92800+ local->wowlan = wowlan && local_read(&local->open_count);
92801 if (local->wowlan) {
92802 int err = drv_suspend(local, wowlan);
92803 if (err < 0) {
92804@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
92805 WARN_ON(!list_empty(&local->chanctx_list));
92806
92807 /* stop hardware - this must stop RX */
92808- if (local->open_count)
92809+ if (local_read(&local->open_count))
92810 ieee80211_stop_device(local);
92811
92812 suspend:
92813diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
92814index a02bef3..f2f38dd 100644
92815--- a/net/mac80211/rate.c
92816+++ b/net/mac80211/rate.c
92817@@ -712,7 +712,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
92818
92819 ASSERT_RTNL();
92820
92821- if (local->open_count)
92822+ if (local_read(&local->open_count))
92823 return -EBUSY;
92824
92825 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
92826diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
92827index c97a065..ff61928 100644
92828--- a/net/mac80211/rc80211_pid_debugfs.c
92829+++ b/net/mac80211/rc80211_pid_debugfs.c
92830@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
92831
92832 spin_unlock_irqrestore(&events->lock, status);
92833
92834- if (copy_to_user(buf, pb, p))
92835+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
92836 return -EFAULT;
92837
92838 return p;
92839diff --git a/net/mac80211/util.c b/net/mac80211/util.c
92840index 72e6292..e6319eb 100644
92841--- a/net/mac80211/util.c
92842+++ b/net/mac80211/util.c
92843@@ -1472,7 +1472,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
92844 }
92845 #endif
92846 /* everything else happens only if HW was up & running */
92847- if (!local->open_count)
92848+ if (!local_read(&local->open_count))
92849 goto wake_up;
92850
92851 /*
92852@@ -1696,7 +1696,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
92853 local->in_reconfig = false;
92854 barrier();
92855
92856- if (local->monitors == local->open_count && local->monitors > 0)
92857+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
92858 ieee80211_add_virtual_monitor(local);
92859
92860 /*
92861diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
92862index 56d22ca..87c778f 100644
92863--- a/net/netfilter/Kconfig
92864+++ b/net/netfilter/Kconfig
92865@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
92866
92867 To compile it as a module, choose M here. If unsure, say N.
92868
92869+config NETFILTER_XT_MATCH_GRADM
92870+ tristate '"gradm" match support'
92871+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
92872+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
92873+ ---help---
92874+ The gradm match allows to match on grsecurity RBAC being enabled.
92875+ It is useful when iptables rules are applied early on bootup to
92876+ prevent connections to the machine (except from a trusted host)
92877+ while the RBAC system is disabled.
92878+
92879 config NETFILTER_XT_MATCH_HASHLIMIT
92880 tristate '"hashlimit" match support'
92881 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
92882diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
92883index a1abf87..dbcb7ee 100644
92884--- a/net/netfilter/Makefile
92885+++ b/net/netfilter/Makefile
92886@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
92887 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
92888 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
92889 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
92890+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
92891 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
92892 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
92893 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
92894diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
92895index f771390..145b765 100644
92896--- a/net/netfilter/ipset/ip_set_core.c
92897+++ b/net/netfilter/ipset/ip_set_core.c
92898@@ -1820,7 +1820,7 @@ done:
92899 return ret;
92900 }
92901
92902-static struct nf_sockopt_ops so_set __read_mostly = {
92903+static struct nf_sockopt_ops so_set = {
92904 .pf = PF_INET,
92905 .get_optmin = SO_IP_SET,
92906 .get_optmax = SO_IP_SET + 1,
92907diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
92908index a083bda..da661c3 100644
92909--- a/net/netfilter/ipvs/ip_vs_conn.c
92910+++ b/net/netfilter/ipvs/ip_vs_conn.c
92911@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
92912 /* Increase the refcnt counter of the dest */
92913 ip_vs_dest_hold(dest);
92914
92915- conn_flags = atomic_read(&dest->conn_flags);
92916+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
92917 if (cp->protocol != IPPROTO_UDP)
92918 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
92919 flags = cp->flags;
92920@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
92921
92922 cp->control = NULL;
92923 atomic_set(&cp->n_control, 0);
92924- atomic_set(&cp->in_pkts, 0);
92925+ atomic_set_unchecked(&cp->in_pkts, 0);
92926
92927 cp->packet_xmit = NULL;
92928 cp->app = NULL;
92929@@ -1190,7 +1190,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
92930
92931 /* Don't drop the entry if its number of incoming packets is not
92932 located in [0, 8] */
92933- i = atomic_read(&cp->in_pkts);
92934+ i = atomic_read_unchecked(&cp->in_pkts);
92935 if (i > 8 || i < 0) return 0;
92936
92937 if (!todrop_rate[i]) return 0;
92938diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
92939index 23b8eb5..48a8959 100644
92940--- a/net/netfilter/ipvs/ip_vs_core.c
92941+++ b/net/netfilter/ipvs/ip_vs_core.c
92942@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
92943 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
92944 /* do not touch skb anymore */
92945
92946- atomic_inc(&cp->in_pkts);
92947+ atomic_inc_unchecked(&cp->in_pkts);
92948 ip_vs_conn_put(cp);
92949 return ret;
92950 }
92951@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
92952 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
92953 pkts = sysctl_sync_threshold(ipvs);
92954 else
92955- pkts = atomic_add_return(1, &cp->in_pkts);
92956+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
92957
92958 if (ipvs->sync_state & IP_VS_STATE_MASTER)
92959 ip_vs_sync_conn(net, cp, pkts);
92960diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
92961index 9e6c2a0..28552e2 100644
92962--- a/net/netfilter/ipvs/ip_vs_ctl.c
92963+++ b/net/netfilter/ipvs/ip_vs_ctl.c
92964@@ -789,7 +789,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
92965 */
92966 ip_vs_rs_hash(ipvs, dest);
92967 }
92968- atomic_set(&dest->conn_flags, conn_flags);
92969+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
92970
92971 /* bind the service */
92972 if (!dest->svc) {
92973@@ -1657,7 +1657,7 @@ proc_do_sync_ports(ctl_table *table, int write,
92974 * align with netns init in ip_vs_control_net_init()
92975 */
92976
92977-static struct ctl_table vs_vars[] = {
92978+static ctl_table_no_const vs_vars[] __read_only = {
92979 {
92980 .procname = "amemthresh",
92981 .maxlen = sizeof(int),
92982@@ -2060,7 +2060,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
92983 " %-7s %-6d %-10d %-10d\n",
92984 &dest->addr.in6,
92985 ntohs(dest->port),
92986- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
92987+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
92988 atomic_read(&dest->weight),
92989 atomic_read(&dest->activeconns),
92990 atomic_read(&dest->inactconns));
92991@@ -2071,7 +2071,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
92992 "%-7s %-6d %-10d %-10d\n",
92993 ntohl(dest->addr.ip),
92994 ntohs(dest->port),
92995- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
92996+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
92997 atomic_read(&dest->weight),
92998 atomic_read(&dest->activeconns),
92999 atomic_read(&dest->inactconns));
93000@@ -2549,7 +2549,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
93001
93002 entry.addr = dest->addr.ip;
93003 entry.port = dest->port;
93004- entry.conn_flags = atomic_read(&dest->conn_flags);
93005+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
93006 entry.weight = atomic_read(&dest->weight);
93007 entry.u_threshold = dest->u_threshold;
93008 entry.l_threshold = dest->l_threshold;
93009@@ -3092,7 +3092,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
93010 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
93011 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
93012 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
93013- (atomic_read(&dest->conn_flags) &
93014+ (atomic_read_unchecked(&dest->conn_flags) &
93015 IP_VS_CONN_F_FWD_MASK)) ||
93016 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
93017 atomic_read(&dest->weight)) ||
93018@@ -3682,7 +3682,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
93019 {
93020 int idx;
93021 struct netns_ipvs *ipvs = net_ipvs(net);
93022- struct ctl_table *tbl;
93023+ ctl_table_no_const *tbl;
93024
93025 atomic_set(&ipvs->dropentry, 0);
93026 spin_lock_init(&ipvs->dropentry_lock);
93027diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
93028index 5ea26bd..c9bc65f 100644
93029--- a/net/netfilter/ipvs/ip_vs_lblc.c
93030+++ b/net/netfilter/ipvs/ip_vs_lblc.c
93031@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
93032 * IPVS LBLC sysctl table
93033 */
93034 #ifdef CONFIG_SYSCTL
93035-static ctl_table vs_vars_table[] = {
93036+static ctl_table_no_const vs_vars_table[] __read_only = {
93037 {
93038 .procname = "lblc_expiration",
93039 .data = NULL,
93040diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
93041index 50123c2..067c773 100644
93042--- a/net/netfilter/ipvs/ip_vs_lblcr.c
93043+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
93044@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
93045 * IPVS LBLCR sysctl table
93046 */
93047
93048-static ctl_table vs_vars_table[] = {
93049+static ctl_table_no_const vs_vars_table[] __read_only = {
93050 {
93051 .procname = "lblcr_expiration",
93052 .data = NULL,
93053diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
93054index f6046d9..4f10cfd 100644
93055--- a/net/netfilter/ipvs/ip_vs_sync.c
93056+++ b/net/netfilter/ipvs/ip_vs_sync.c
93057@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
93058 cp = cp->control;
93059 if (cp) {
93060 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
93061- pkts = atomic_add_return(1, &cp->in_pkts);
93062+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
93063 else
93064 pkts = sysctl_sync_threshold(ipvs);
93065 ip_vs_sync_conn(net, cp->control, pkts);
93066@@ -758,7 +758,7 @@ control:
93067 if (!cp)
93068 return;
93069 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
93070- pkts = atomic_add_return(1, &cp->in_pkts);
93071+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
93072 else
93073 pkts = sysctl_sync_threshold(ipvs);
93074 goto sloop;
93075@@ -882,7 +882,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
93076
93077 if (opt)
93078 memcpy(&cp->in_seq, opt, sizeof(*opt));
93079- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
93080+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
93081 cp->state = state;
93082 cp->old_state = cp->state;
93083 /*
93084diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
93085index b75ff64..0c51bbe 100644
93086--- a/net/netfilter/ipvs/ip_vs_xmit.c
93087+++ b/net/netfilter/ipvs/ip_vs_xmit.c
93088@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
93089 else
93090 rc = NF_ACCEPT;
93091 /* do not touch skb anymore */
93092- atomic_inc(&cp->in_pkts);
93093+ atomic_inc_unchecked(&cp->in_pkts);
93094 goto out;
93095 }
93096
93097@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
93098 else
93099 rc = NF_ACCEPT;
93100 /* do not touch skb anymore */
93101- atomic_inc(&cp->in_pkts);
93102+ atomic_inc_unchecked(&cp->in_pkts);
93103 goto out;
93104 }
93105
93106diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
93107index 2d3030a..7ba1c0a 100644
93108--- a/net/netfilter/nf_conntrack_acct.c
93109+++ b/net/netfilter/nf_conntrack_acct.c
93110@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
93111 #ifdef CONFIG_SYSCTL
93112 static int nf_conntrack_acct_init_sysctl(struct net *net)
93113 {
93114- struct ctl_table *table;
93115+ ctl_table_no_const *table;
93116
93117 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
93118 GFP_KERNEL);
93119diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
93120index 0283bae..5febcb0 100644
93121--- a/net/netfilter/nf_conntrack_core.c
93122+++ b/net/netfilter/nf_conntrack_core.c
93123@@ -1614,6 +1614,10 @@ void nf_conntrack_init_end(void)
93124 #define DYING_NULLS_VAL ((1<<30)+1)
93125 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
93126
93127+#ifdef CONFIG_GRKERNSEC_HIDESYM
93128+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
93129+#endif
93130+
93131 int nf_conntrack_init_net(struct net *net)
93132 {
93133 int ret;
93134@@ -1628,7 +1632,11 @@ int nf_conntrack_init_net(struct net *net)
93135 goto err_stat;
93136 }
93137
93138+#ifdef CONFIG_GRKERNSEC_HIDESYM
93139+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
93140+#else
93141 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
93142+#endif
93143 if (!net->ct.slabname) {
93144 ret = -ENOMEM;
93145 goto err_slabname;
93146diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
93147index 1df1761..ce8b88a 100644
93148--- a/net/netfilter/nf_conntrack_ecache.c
93149+++ b/net/netfilter/nf_conntrack_ecache.c
93150@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
93151 #ifdef CONFIG_SYSCTL
93152 static int nf_conntrack_event_init_sysctl(struct net *net)
93153 {
93154- struct ctl_table *table;
93155+ ctl_table_no_const *table;
93156
93157 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
93158 GFP_KERNEL);
93159diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
93160index 974a2a4..52cc6ff 100644
93161--- a/net/netfilter/nf_conntrack_helper.c
93162+++ b/net/netfilter/nf_conntrack_helper.c
93163@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
93164
93165 static int nf_conntrack_helper_init_sysctl(struct net *net)
93166 {
93167- struct ctl_table *table;
93168+ ctl_table_no_const *table;
93169
93170 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
93171 GFP_KERNEL);
93172diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
93173index 0ab9636..cea3c6a 100644
93174--- a/net/netfilter/nf_conntrack_proto.c
93175+++ b/net/netfilter/nf_conntrack_proto.c
93176@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
93177
93178 static void
93179 nf_ct_unregister_sysctl(struct ctl_table_header **header,
93180- struct ctl_table **table,
93181+ ctl_table_no_const **table,
93182 unsigned int users)
93183 {
93184 if (users > 0)
93185diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
93186index a99b6c3..3841268 100644
93187--- a/net/netfilter/nf_conntrack_proto_dccp.c
93188+++ b/net/netfilter/nf_conntrack_proto_dccp.c
93189@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
93190 out_invalid:
93191 if (LOG_INVALID(net, IPPROTO_DCCP))
93192 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
93193- NULL, msg);
93194+ NULL, "%s", msg);
93195 return false;
93196 }
93197
93198@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
93199
93200 out_invalid:
93201 if (LOG_INVALID(net, IPPROTO_DCCP))
93202- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
93203+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
93204 return -NF_ACCEPT;
93205 }
93206
93207diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
93208index 4d4d8f1..e0f9a32 100644
93209--- a/net/netfilter/nf_conntrack_proto_tcp.c
93210+++ b/net/netfilter/nf_conntrack_proto_tcp.c
93211@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
93212 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
93213 __u32 seq, ack, sack, end, win, swin;
93214 s16 receiver_offset;
93215- bool res;
93216+ bool res, in_recv_win;
93217
93218 /*
93219 * Get the required data from the packet.
93220@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
93221 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
93222 receiver->td_scale);
93223
93224+ /* Is the ending sequence in the receive window (if available)? */
93225+ in_recv_win = !receiver->td_maxwin ||
93226+ after(end, sender->td_end - receiver->td_maxwin - 1);
93227+
93228 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
93229 before(seq, sender->td_maxend + 1),
93230- after(end, sender->td_end - receiver->td_maxwin - 1),
93231+ (in_recv_win ? 1 : 0),
93232 before(sack, receiver->td_end + 1),
93233 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
93234
93235 if (before(seq, sender->td_maxend + 1) &&
93236- after(end, sender->td_end - receiver->td_maxwin - 1) &&
93237+ in_recv_win &&
93238 before(sack, receiver->td_end + 1) &&
93239 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
93240 /*
93241@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
93242 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
93243 "nf_ct_tcp: %s ",
93244 before(seq, sender->td_maxend + 1) ?
93245- after(end, sender->td_end - receiver->td_maxwin - 1) ?
93246+ in_recv_win ?
93247 before(sack, receiver->td_end + 1) ?
93248 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
93249 : "ACK is under the lower bound (possible overly delayed ACK)"
93250diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
93251index bd700b4..4a3dc61 100644
93252--- a/net/netfilter/nf_conntrack_standalone.c
93253+++ b/net/netfilter/nf_conntrack_standalone.c
93254@@ -471,7 +471,7 @@ static ctl_table nf_ct_netfilter_table[] = {
93255
93256 static int nf_conntrack_standalone_init_sysctl(struct net *net)
93257 {
93258- struct ctl_table *table;
93259+ ctl_table_no_const *table;
93260
93261 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
93262 GFP_KERNEL);
93263diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
93264index 902fb0a..87f7fdb 100644
93265--- a/net/netfilter/nf_conntrack_timestamp.c
93266+++ b/net/netfilter/nf_conntrack_timestamp.c
93267@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
93268 #ifdef CONFIG_SYSCTL
93269 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
93270 {
93271- struct ctl_table *table;
93272+ ctl_table_no_const *table;
93273
93274 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
93275 GFP_KERNEL);
93276diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
93277index 3b18dd1..f79e0ca 100644
93278--- a/net/netfilter/nf_log.c
93279+++ b/net/netfilter/nf_log.c
93280@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
93281
93282 #ifdef CONFIG_SYSCTL
93283 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
93284-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
93285+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
93286
93287 static int nf_log_proc_dostring(ctl_table *table, int write,
93288 void __user *buffer, size_t *lenp, loff_t *ppos)
93289@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
93290 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
93291 mutex_unlock(&nf_log_mutex);
93292 } else {
93293+ ctl_table_no_const nf_log_table = *table;
93294+
93295 mutex_lock(&nf_log_mutex);
93296 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
93297 lockdep_is_held(&nf_log_mutex));
93298 if (!logger)
93299- table->data = "NONE";
93300+ nf_log_table.data = "NONE";
93301 else
93302- table->data = logger->name;
93303- r = proc_dostring(table, write, buffer, lenp, ppos);
93304+ nf_log_table.data = logger->name;
93305+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
93306 mutex_unlock(&nf_log_mutex);
93307 }
93308
93309diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
93310index f042ae5..30ea486 100644
93311--- a/net/netfilter/nf_sockopt.c
93312+++ b/net/netfilter/nf_sockopt.c
93313@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
93314 }
93315 }
93316
93317- list_add(&reg->list, &nf_sockopts);
93318+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
93319 out:
93320 mutex_unlock(&nf_sockopt_mutex);
93321 return ret;
93322@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
93323 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
93324 {
93325 mutex_lock(&nf_sockopt_mutex);
93326- list_del(&reg->list);
93327+ pax_list_del((struct list_head *)&reg->list);
93328 mutex_unlock(&nf_sockopt_mutex);
93329 }
93330 EXPORT_SYMBOL(nf_unregister_sockopt);
93331diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
93332index 962e979..e46f350 100644
93333--- a/net/netfilter/nfnetlink_log.c
93334+++ b/net/netfilter/nfnetlink_log.c
93335@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
93336 struct nfnl_log_net {
93337 spinlock_t instances_lock;
93338 struct hlist_head instance_table[INSTANCE_BUCKETS];
93339- atomic_t global_seq;
93340+ atomic_unchecked_t global_seq;
93341 };
93342
93343 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
93344@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
93345 nfmsg->version = NFNETLINK_V0;
93346 nfmsg->res_id = htons(inst->group_num);
93347
93348+ memset(&pmsg, 0, sizeof(pmsg));
93349 pmsg.hw_protocol = skb->protocol;
93350 pmsg.hook = hooknum;
93351
93352@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
93353 if (indev && skb->dev &&
93354 skb->mac_header != skb->network_header) {
93355 struct nfulnl_msg_packet_hw phw;
93356- int len = dev_parse_header(skb, phw.hw_addr);
93357+ int len;
93358+
93359+ memset(&phw, 0, sizeof(phw));
93360+ len = dev_parse_header(skb, phw.hw_addr);
93361 if (len > 0) {
93362 phw.hw_addrlen = htons(len);
93363 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
93364@@ -559,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
93365 /* global sequence number */
93366 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
93367 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
93368- htonl(atomic_inc_return(&log->global_seq))))
93369+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
93370 goto nla_put_failure;
93371
93372 if (data_len) {
93373diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
93374index 5352b2d..e0083ce 100644
93375--- a/net/netfilter/nfnetlink_queue_core.c
93376+++ b/net/netfilter/nfnetlink_queue_core.c
93377@@ -444,7 +444,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
93378 if (indev && entskb->dev &&
93379 entskb->mac_header != entskb->network_header) {
93380 struct nfqnl_msg_packet_hw phw;
93381- int len = dev_parse_header(entskb, phw.hw_addr);
93382+ int len;
93383+
93384+ memset(&phw, 0, sizeof(phw));
93385+ len = dev_parse_header(entskb, phw.hw_addr);
93386 if (len) {
93387 phw.hw_addrlen = htons(len);
93388 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
93389diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
93390index 7011c71..6113cc7 100644
93391--- a/net/netfilter/xt_TCPMSS.c
93392+++ b/net/netfilter/xt_TCPMSS.c
93393@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93394 {
93395 const struct xt_tcpmss_info *info = par->targinfo;
93396 struct tcphdr *tcph;
93397- unsigned int tcplen, i;
93398+ int len, tcp_hdrlen;
93399+ unsigned int i;
93400 __be16 oldval;
93401 u16 newmss;
93402 u8 *opt;
93403@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93404 if (!skb_make_writable(skb, skb->len))
93405 return -1;
93406
93407- tcplen = skb->len - tcphoff;
93408+ len = skb->len - tcphoff;
93409+ if (len < (int)sizeof(struct tcphdr))
93410+ return -1;
93411+
93412 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
93413+ tcp_hdrlen = tcph->doff * 4;
93414
93415- /* Header cannot be larger than the packet */
93416- if (tcplen < tcph->doff*4)
93417+ if (len < tcp_hdrlen)
93418 return -1;
93419
93420 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
93421@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93422 newmss = info->mss;
93423
93424 opt = (u_int8_t *)tcph;
93425- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
93426- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
93427- opt[i+1] == TCPOLEN_MSS) {
93428+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
93429+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
93430 u_int16_t oldmss;
93431
93432 oldmss = (opt[i+2] << 8) | opt[i+3];
93433@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93434 }
93435
93436 /* There is data after the header so the option can't be added
93437- without moving it, and doing so may make the SYN packet
93438- itself too large. Accept the packet unmodified instead. */
93439- if (tcplen > tcph->doff*4)
93440+ * without moving it, and doing so may make the SYN packet
93441+ * itself too large. Accept the packet unmodified instead.
93442+ */
93443+ if (len > tcp_hdrlen)
93444 return 0;
93445
93446 /*
93447@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
93448 newmss = min(newmss, (u16)1220);
93449
93450 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
93451- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
93452+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
93453
93454 inet_proto_csum_replace2(&tcph->check, skb,
93455- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
93456+ htons(len), htons(len + TCPOLEN_MSS), 1);
93457 opt[0] = TCPOPT_MSS;
93458 opt[1] = TCPOLEN_MSS;
93459 opt[2] = (newmss & 0xff00) >> 8;
93460diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
93461index b68fa19..625fa1d 100644
93462--- a/net/netfilter/xt_TCPOPTSTRIP.c
93463+++ b/net/netfilter/xt_TCPOPTSTRIP.c
93464@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
93465 struct tcphdr *tcph;
93466 u_int16_t n, o;
93467 u_int8_t *opt;
93468- int len;
93469+ int len, tcp_hdrlen;
93470
93471 /* This is a fragment, no TCP header is available */
93472 if (par->fragoff != 0)
93473@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
93474 return NF_DROP;
93475
93476 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
93477- if (tcph->doff * 4 > len)
93478+ tcp_hdrlen = tcph->doff * 4;
93479+
93480+ if (len < tcp_hdrlen)
93481 return NF_DROP;
93482
93483 opt = (u_int8_t *)tcph;
93484@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
93485 * Walk through all TCP options - if we find some option to remove,
93486 * set all octets to %TCPOPT_NOP and adjust checksum.
93487 */
93488- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
93489+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
93490 optl = optlen(opt, i);
93491
93492- if (i + optl > tcp_hdrlen(skb))
93493+ if (i + optl > tcp_hdrlen)
93494 break;
93495
93496 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
93497diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
93498new file mode 100644
93499index 0000000..c566332
93500--- /dev/null
93501+++ b/net/netfilter/xt_gradm.c
93502@@ -0,0 +1,51 @@
93503+/*
93504+ * gradm match for netfilter
93505